1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Ingenic SoC CGU driver
5 * Copyright (c) 2013-2015 Imagination Technologies
6 * Author: Paul Burton <paul.burton@mips.com>
9 #include <linux/bitops.h>
10 #include <linux/clk.h>
11 #include <linux/clk-provider.h>
12 #include <linux/clkdev.h>
13 #include <linux/delay.h>
15 #include <linux/iopoll.h>
16 #include <linux/math64.h>
18 #include <linux/of_address.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/time.h>
25 #define MHZ (1000 * 1000)
27 static inline const struct ingenic_cgu_clk_info
*
28 to_clk_info(struct ingenic_clk
*clk
)
30 return &clk
->cgu
->clock_info
[clk
->idx
];
34 * ingenic_cgu_gate_get() - get the value of clock gate register bit
35 * @cgu: reference to the CGU whose registers should be read
36 * @info: info struct describing the gate bit
38 * Retrieves the state of the clock gate bit described by info. The
39 * caller must hold cgu->lock.
41 * Return: true if the gate bit is set, else false.
44 ingenic_cgu_gate_get(struct ingenic_cgu
*cgu
,
45 const struct ingenic_cgu_gate_info
*info
)
47 return !!(readl(cgu
->base
+ info
->reg
) & BIT(info
->bit
))
48 ^ info
->clear_to_gate
;
52 * ingenic_cgu_gate_set() - set the value of clock gate register bit
53 * @cgu: reference to the CGU whose registers should be modified
54 * @info: info struct describing the gate bit
55 * @val: non-zero to gate a clock, otherwise zero
57 * Sets the given gate bit in order to gate or ungate a clock.
59 * The caller must hold cgu->lock.
62 ingenic_cgu_gate_set(struct ingenic_cgu
*cgu
,
63 const struct ingenic_cgu_gate_info
*info
, bool val
)
65 u32 clkgr
= readl(cgu
->base
+ info
->reg
);
67 if (val
^ info
->clear_to_gate
)
68 clkgr
|= BIT(info
->bit
);
70 clkgr
&= ~BIT(info
->bit
);
72 writel(clkgr
, cgu
->base
+ info
->reg
);
80 ingenic_pll_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
82 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
83 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
84 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
85 const struct ingenic_cgu_pll_info
*pll_info
;
86 unsigned m
, n
, od_enc
, od
;
90 BUG_ON(clk_info
->type
!= CGU_CLK_PLL
);
91 pll_info
= &clk_info
->pll
;
93 ctl
= readl(cgu
->base
+ pll_info
->reg
);
95 m
= (ctl
>> pll_info
->m_shift
) & GENMASK(pll_info
->m_bits
- 1, 0);
96 m
+= pll_info
->m_offset
;
97 n
= (ctl
>> pll_info
->n_shift
) & GENMASK(pll_info
->n_bits
- 1, 0);
98 n
+= pll_info
->n_offset
;
99 od_enc
= ctl
>> pll_info
->od_shift
;
100 od_enc
&= GENMASK(pll_info
->od_bits
- 1, 0);
102 ctl
= readl(cgu
->base
+ pll_info
->bypass_reg
);
104 bypass
= !pll_info
->no_bypass_bit
&&
105 !!(ctl
& BIT(pll_info
->bypass_bit
));
110 for (od
= 0; od
< pll_info
->od_max
; od
++) {
111 if (pll_info
->od_encoding
[od
] == od_enc
)
114 BUG_ON(od
== pll_info
->od_max
);
117 return div_u64((u64
)parent_rate
* m
* pll_info
->rate_multiplier
,
122 ingenic_pll_calc(const struct ingenic_cgu_clk_info
*clk_info
,
123 unsigned long rate
, unsigned long parent_rate
,
124 unsigned *pm
, unsigned *pn
, unsigned *pod
)
126 const struct ingenic_cgu_pll_info
*pll_info
;
129 pll_info
= &clk_info
->pll
;
133 * The frequency after the input divider must be between 10 and 50 MHz.
134 * The highest divider yields the best resolution.
136 n
= parent_rate
/ (10 * MHZ
);
137 n
= min_t(unsigned, n
, 1 << clk_info
->pll
.n_bits
);
138 n
= max_t(unsigned, n
, pll_info
->n_offset
);
140 m
= (rate
/ MHZ
) * od
* n
/ (parent_rate
/ MHZ
);
141 m
= min_t(unsigned, m
, 1 << clk_info
->pll
.m_bits
);
142 m
= max_t(unsigned, m
, pll_info
->m_offset
);
151 return div_u64((u64
)parent_rate
* m
* pll_info
->rate_multiplier
,
156 ingenic_pll_round_rate(struct clk_hw
*hw
, unsigned long req_rate
,
157 unsigned long *prate
)
159 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
160 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
162 return ingenic_pll_calc(clk_info
, req_rate
, *prate
, NULL
, NULL
, NULL
);
165 static inline int ingenic_pll_check_stable(struct ingenic_cgu
*cgu
,
166 const struct ingenic_cgu_pll_info
*pll_info
)
170 return readl_poll_timeout(cgu
->base
+ pll_info
->reg
, ctl
,
171 ctl
& BIT(pll_info
->stable_bit
),
172 0, 100 * USEC_PER_MSEC
);
176 ingenic_pll_set_rate(struct clk_hw
*hw
, unsigned long req_rate
,
177 unsigned long parent_rate
)
179 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
180 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
181 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
182 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
183 unsigned long rate
, flags
;
184 unsigned int m
, n
, od
;
188 rate
= ingenic_pll_calc(clk_info
, req_rate
, parent_rate
,
190 if (rate
!= req_rate
)
191 pr_info("ingenic-cgu: request '%s' rate %luHz, actual %luHz\n",
192 clk_info
->name
, req_rate
, rate
);
194 spin_lock_irqsave(&cgu
->lock
, flags
);
195 ctl
= readl(cgu
->base
+ pll_info
->reg
);
197 ctl
&= ~(GENMASK(pll_info
->m_bits
- 1, 0) << pll_info
->m_shift
);
198 ctl
|= (m
- pll_info
->m_offset
) << pll_info
->m_shift
;
200 ctl
&= ~(GENMASK(pll_info
->n_bits
- 1, 0) << pll_info
->n_shift
);
201 ctl
|= (n
- pll_info
->n_offset
) << pll_info
->n_shift
;
203 ctl
&= ~(GENMASK(pll_info
->od_bits
- 1, 0) << pll_info
->od_shift
);
204 ctl
|= pll_info
->od_encoding
[od
- 1] << pll_info
->od_shift
;
206 writel(ctl
, cgu
->base
+ pll_info
->reg
);
208 /* If the PLL is enabled, verify that it's stable */
209 if (ctl
& BIT(pll_info
->enable_bit
))
210 ret
= ingenic_pll_check_stable(cgu
, pll_info
);
212 spin_unlock_irqrestore(&cgu
->lock
, flags
);
217 static int ingenic_pll_enable(struct clk_hw
*hw
)
219 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
220 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
221 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
222 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
227 spin_lock_irqsave(&cgu
->lock
, flags
);
228 ctl
= readl(cgu
->base
+ pll_info
->bypass_reg
);
230 ctl
&= ~BIT(pll_info
->bypass_bit
);
232 writel(ctl
, cgu
->base
+ pll_info
->bypass_reg
);
234 ctl
= readl(cgu
->base
+ pll_info
->reg
);
236 ctl
|= BIT(pll_info
->enable_bit
);
238 writel(ctl
, cgu
->base
+ pll_info
->reg
);
240 ret
= ingenic_pll_check_stable(cgu
, pll_info
);
241 spin_unlock_irqrestore(&cgu
->lock
, flags
);
246 static void ingenic_pll_disable(struct clk_hw
*hw
)
248 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
249 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
250 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
251 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
255 spin_lock_irqsave(&cgu
->lock
, flags
);
256 ctl
= readl(cgu
->base
+ pll_info
->reg
);
258 ctl
&= ~BIT(pll_info
->enable_bit
);
260 writel(ctl
, cgu
->base
+ pll_info
->reg
);
261 spin_unlock_irqrestore(&cgu
->lock
, flags
);
264 static int ingenic_pll_is_enabled(struct clk_hw
*hw
)
266 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
267 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
268 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
269 const struct ingenic_cgu_pll_info
*pll_info
= &clk_info
->pll
;
272 ctl
= readl(cgu
->base
+ pll_info
->reg
);
274 return !!(ctl
& BIT(pll_info
->enable_bit
));
277 static const struct clk_ops ingenic_pll_ops
= {
278 .recalc_rate
= ingenic_pll_recalc_rate
,
279 .round_rate
= ingenic_pll_round_rate
,
280 .set_rate
= ingenic_pll_set_rate
,
282 .enable
= ingenic_pll_enable
,
283 .disable
= ingenic_pll_disable
,
284 .is_enabled
= ingenic_pll_is_enabled
,
288 * Operations for all non-PLL clocks
291 static u8
ingenic_clk_get_parent(struct clk_hw
*hw
)
293 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
294 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
295 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
297 u8 i
, hw_idx
, idx
= 0;
299 if (clk_info
->type
& CGU_CLK_MUX
) {
300 reg
= readl(cgu
->base
+ clk_info
->mux
.reg
);
301 hw_idx
= (reg
>> clk_info
->mux
.shift
) &
302 GENMASK(clk_info
->mux
.bits
- 1, 0);
305 * Convert the hardware index to the parent index by skipping
306 * over any -1's in the parents array.
308 for (i
= 0; i
< hw_idx
; i
++) {
309 if (clk_info
->parents
[i
] != -1)
317 static int ingenic_clk_set_parent(struct clk_hw
*hw
, u8 idx
)
319 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
320 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
321 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
323 u8 curr_idx
, hw_idx
, num_poss
;
326 if (clk_info
->type
& CGU_CLK_MUX
) {
328 * Convert the parent index to the hardware index by adding
329 * 1 for any -1 in the parents array preceding the given
330 * index. That is, we want the index of idx'th entry in
331 * clk_info->parents which does not equal -1.
333 hw_idx
= curr_idx
= 0;
334 num_poss
= 1 << clk_info
->mux
.bits
;
335 for (; hw_idx
< num_poss
; hw_idx
++) {
336 if (clk_info
->parents
[hw_idx
] == -1)
343 /* idx should always be a valid parent */
344 BUG_ON(curr_idx
!= idx
);
346 mask
= GENMASK(clk_info
->mux
.bits
- 1, 0);
347 mask
<<= clk_info
->mux
.shift
;
349 spin_lock_irqsave(&cgu
->lock
, flags
);
351 /* write the register */
352 reg
= readl(cgu
->base
+ clk_info
->mux
.reg
);
354 reg
|= hw_idx
<< clk_info
->mux
.shift
;
355 writel(reg
, cgu
->base
+ clk_info
->mux
.reg
);
357 spin_unlock_irqrestore(&cgu
->lock
, flags
);
361 return idx
? -EINVAL
: 0;
365 ingenic_clk_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
367 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
368 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
369 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
370 unsigned long rate
= parent_rate
;
373 if (clk_info
->type
& CGU_CLK_DIV
) {
374 div_reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
375 div
= (div_reg
>> clk_info
->div
.shift
) &
376 GENMASK(clk_info
->div
.bits
- 1, 0);
378 if (clk_info
->div
.div_table
)
379 div
= clk_info
->div
.div_table
[div
];
381 div
= (div
+ 1) * clk_info
->div
.div
;
384 } else if (clk_info
->type
& CGU_CLK_FIXDIV
) {
385 rate
/= clk_info
->fixdiv
.div
;
392 ingenic_clk_calc_hw_div(const struct ingenic_cgu_clk_info
*clk_info
,
395 unsigned int i
, best_i
= 0, best
= (unsigned int)-1;
397 for (i
= 0; i
< (1 << clk_info
->div
.bits
)
398 && clk_info
->div
.div_table
[i
]; i
++) {
399 if (clk_info
->div
.div_table
[i
] >= div
&&
400 clk_info
->div
.div_table
[i
] < best
) {
401 best
= clk_info
->div
.div_table
[i
];
413 ingenic_clk_calc_div(const struct ingenic_cgu_clk_info
*clk_info
,
414 unsigned long parent_rate
, unsigned long req_rate
)
416 unsigned int div
, hw_div
;
418 /* calculate the divide */
419 div
= DIV_ROUND_UP(parent_rate
, req_rate
);
421 if (clk_info
->div
.div_table
) {
422 hw_div
= ingenic_clk_calc_hw_div(clk_info
, div
);
424 return clk_info
->div
.div_table
[hw_div
];
427 /* Impose hardware constraints */
428 div
= min_t(unsigned, div
, 1 << clk_info
->div
.bits
);
429 div
= max_t(unsigned, div
, 1);
432 * If the divider value itself must be divided before being written to
433 * the divider register, we must ensure we don't have any bits set that
434 * would be lost as a result of doing so.
436 div
/= clk_info
->div
.div
;
437 div
*= clk_info
->div
.div
;
443 ingenic_clk_round_rate(struct clk_hw
*hw
, unsigned long req_rate
,
444 unsigned long *parent_rate
)
446 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
447 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
448 unsigned int div
= 1;
450 if (clk_info
->type
& CGU_CLK_DIV
)
451 div
= ingenic_clk_calc_div(clk_info
, *parent_rate
, req_rate
);
452 else if (clk_info
->type
& CGU_CLK_FIXDIV
)
453 div
= clk_info
->fixdiv
.div
;
454 else if (clk_hw_can_set_rate_parent(hw
))
455 *parent_rate
= req_rate
;
457 return DIV_ROUND_UP(*parent_rate
, div
);
460 static inline int ingenic_clk_check_stable(struct ingenic_cgu
*cgu
,
461 const struct ingenic_cgu_clk_info
*clk_info
)
465 return readl_poll_timeout(cgu
->base
+ clk_info
->div
.reg
, reg
,
466 !(reg
& BIT(clk_info
->div
.busy_bit
)),
467 0, 100 * USEC_PER_MSEC
);
471 ingenic_clk_set_rate(struct clk_hw
*hw
, unsigned long req_rate
,
472 unsigned long parent_rate
)
474 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
475 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
476 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
477 unsigned long rate
, flags
;
478 unsigned int hw_div
, div
;
482 if (clk_info
->type
& CGU_CLK_DIV
) {
483 div
= ingenic_clk_calc_div(clk_info
, parent_rate
, req_rate
);
484 rate
= DIV_ROUND_UP(parent_rate
, div
);
486 if (rate
!= req_rate
)
489 if (clk_info
->div
.div_table
)
490 hw_div
= ingenic_clk_calc_hw_div(clk_info
, div
);
492 hw_div
= ((div
/ clk_info
->div
.div
) - 1);
494 spin_lock_irqsave(&cgu
->lock
, flags
);
495 reg
= readl(cgu
->base
+ clk_info
->div
.reg
);
497 /* update the divide */
498 mask
= GENMASK(clk_info
->div
.bits
- 1, 0);
499 reg
&= ~(mask
<< clk_info
->div
.shift
);
500 reg
|= hw_div
<< clk_info
->div
.shift
;
502 /* clear the stop bit */
503 if (clk_info
->div
.stop_bit
!= -1)
504 reg
&= ~BIT(clk_info
->div
.stop_bit
);
506 /* set the change enable bit */
507 if (clk_info
->div
.ce_bit
!= -1)
508 reg
|= BIT(clk_info
->div
.ce_bit
);
510 /* update the hardware */
511 writel(reg
, cgu
->base
+ clk_info
->div
.reg
);
513 /* wait for the change to take effect */
514 if (clk_info
->div
.busy_bit
!= -1)
515 ret
= ingenic_clk_check_stable(cgu
, clk_info
);
517 spin_unlock_irqrestore(&cgu
->lock
, flags
);
524 static int ingenic_clk_enable(struct clk_hw
*hw
)
526 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
527 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
528 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
531 if (clk_info
->type
& CGU_CLK_GATE
) {
532 /* ungate the clock */
533 spin_lock_irqsave(&cgu
->lock
, flags
);
534 ingenic_cgu_gate_set(cgu
, &clk_info
->gate
, false);
535 spin_unlock_irqrestore(&cgu
->lock
, flags
);
537 if (clk_info
->gate
.delay_us
)
538 udelay(clk_info
->gate
.delay_us
);
544 static void ingenic_clk_disable(struct clk_hw
*hw
)
546 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
547 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
548 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
551 if (clk_info
->type
& CGU_CLK_GATE
) {
553 spin_lock_irqsave(&cgu
->lock
, flags
);
554 ingenic_cgu_gate_set(cgu
, &clk_info
->gate
, true);
555 spin_unlock_irqrestore(&cgu
->lock
, flags
);
559 static int ingenic_clk_is_enabled(struct clk_hw
*hw
)
561 struct ingenic_clk
*ingenic_clk
= to_ingenic_clk(hw
);
562 const struct ingenic_cgu_clk_info
*clk_info
= to_clk_info(ingenic_clk
);
563 struct ingenic_cgu
*cgu
= ingenic_clk
->cgu
;
566 if (clk_info
->type
& CGU_CLK_GATE
)
567 enabled
= !ingenic_cgu_gate_get(cgu
, &clk_info
->gate
);
572 static const struct clk_ops ingenic_clk_ops
= {
573 .get_parent
= ingenic_clk_get_parent
,
574 .set_parent
= ingenic_clk_set_parent
,
576 .recalc_rate
= ingenic_clk_recalc_rate
,
577 .round_rate
= ingenic_clk_round_rate
,
578 .set_rate
= ingenic_clk_set_rate
,
580 .enable
= ingenic_clk_enable
,
581 .disable
= ingenic_clk_disable
,
582 .is_enabled
= ingenic_clk_is_enabled
,
589 static int ingenic_register_clock(struct ingenic_cgu
*cgu
, unsigned idx
)
591 const struct ingenic_cgu_clk_info
*clk_info
= &cgu
->clock_info
[idx
];
592 struct clk_init_data clk_init
;
593 struct ingenic_clk
*ingenic_clk
= NULL
;
594 struct clk
*clk
, *parent
;
595 const char *parent_names
[4];
596 unsigned caps
, i
, num_possible
;
599 BUILD_BUG_ON(ARRAY_SIZE(clk_info
->parents
) > ARRAY_SIZE(parent_names
));
601 if (clk_info
->type
== CGU_CLK_EXT
) {
602 clk
= of_clk_get_by_name(cgu
->np
, clk_info
->name
);
604 pr_err("%s: no external clock '%s' provided\n",
605 __func__
, clk_info
->name
);
609 err
= clk_register_clkdev(clk
, clk_info
->name
, NULL
);
614 cgu
->clocks
.clks
[idx
] = clk
;
618 if (!clk_info
->type
) {
619 pr_err("%s: no clock type specified for '%s'\n", __func__
,
624 ingenic_clk
= kzalloc(sizeof(*ingenic_clk
), GFP_KERNEL
);
630 ingenic_clk
->hw
.init
= &clk_init
;
631 ingenic_clk
->cgu
= cgu
;
632 ingenic_clk
->idx
= idx
;
634 clk_init
.name
= clk_info
->name
;
636 clk_init
.parent_names
= parent_names
;
638 caps
= clk_info
->type
;
640 if (caps
& CGU_CLK_DIV
) {
641 caps
&= ~CGU_CLK_DIV
;
642 } else if (!(caps
& CGU_CLK_CUSTOM
)) {
643 /* pass rate changes to the parent clock */
644 clk_init
.flags
|= CLK_SET_RATE_PARENT
;
647 if (caps
& (CGU_CLK_MUX
| CGU_CLK_CUSTOM
)) {
648 clk_init
.num_parents
= 0;
650 if (caps
& CGU_CLK_MUX
)
651 num_possible
= 1 << clk_info
->mux
.bits
;
653 num_possible
= ARRAY_SIZE(clk_info
->parents
);
655 for (i
= 0; i
< num_possible
; i
++) {
656 if (clk_info
->parents
[i
] == -1)
659 parent
= cgu
->clocks
.clks
[clk_info
->parents
[i
]];
660 parent_names
[clk_init
.num_parents
] =
661 __clk_get_name(parent
);
662 clk_init
.num_parents
++;
665 BUG_ON(!clk_init
.num_parents
);
666 BUG_ON(clk_init
.num_parents
> ARRAY_SIZE(parent_names
));
668 BUG_ON(clk_info
->parents
[0] == -1);
669 clk_init
.num_parents
= 1;
670 parent
= cgu
->clocks
.clks
[clk_info
->parents
[0]];
671 parent_names
[0] = __clk_get_name(parent
);
674 if (caps
& CGU_CLK_CUSTOM
) {
675 clk_init
.ops
= clk_info
->custom
.clk_ops
;
677 caps
&= ~CGU_CLK_CUSTOM
;
680 pr_err("%s: custom clock may not be combined with type 0x%x\n",
684 } else if (caps
& CGU_CLK_PLL
) {
685 clk_init
.ops
= &ingenic_pll_ops
;
687 caps
&= ~CGU_CLK_PLL
;
690 pr_err("%s: PLL may not be combined with type 0x%x\n",
695 clk_init
.ops
= &ingenic_clk_ops
;
698 /* nothing to do for gates or fixed dividers */
699 caps
&= ~(CGU_CLK_GATE
| CGU_CLK_FIXDIV
);
701 if (caps
& CGU_CLK_MUX
) {
702 if (!(caps
& CGU_CLK_MUX_GLITCHFREE
))
703 clk_init
.flags
|= CLK_SET_PARENT_GATE
;
705 caps
&= ~(CGU_CLK_MUX
| CGU_CLK_MUX_GLITCHFREE
);
709 pr_err("%s: unknown clock type 0x%x\n", __func__
, caps
);
713 clk
= clk_register(NULL
, &ingenic_clk
->hw
);
715 pr_err("%s: failed to register clock '%s'\n", __func__
,
721 err
= clk_register_clkdev(clk
, clk_info
->name
, NULL
);
725 cgu
->clocks
.clks
[idx
] = clk
;
733 ingenic_cgu_new(const struct ingenic_cgu_clk_info
*clock_info
,
734 unsigned num_clocks
, struct device_node
*np
)
736 struct ingenic_cgu
*cgu
;
738 cgu
= kzalloc(sizeof(*cgu
), GFP_KERNEL
);
742 cgu
->base
= of_iomap(np
, 0);
744 pr_err("%s: failed to map CGU registers\n", __func__
);
749 cgu
->clock_info
= clock_info
;
750 cgu
->clocks
.clk_num
= num_clocks
;
752 spin_lock_init(&cgu
->lock
);
762 int ingenic_cgu_register_clocks(struct ingenic_cgu
*cgu
)
767 cgu
->clocks
.clks
= kcalloc(cgu
->clocks
.clk_num
, sizeof(struct clk
*),
769 if (!cgu
->clocks
.clks
) {
774 for (i
= 0; i
< cgu
->clocks
.clk_num
; i
++) {
775 err
= ingenic_register_clock(cgu
, i
);
777 goto err_out_unregister
;
780 err
= of_clk_add_provider(cgu
->np
, of_clk_src_onecell_get
,
783 goto err_out_unregister
;
788 for (i
= 0; i
< cgu
->clocks
.clk_num
; i
++) {
789 if (!cgu
->clocks
.clks
[i
])
791 if (cgu
->clock_info
[i
].type
& CGU_CLK_EXT
)
792 clk_put(cgu
->clocks
.clks
[i
]);
794 clk_unregister(cgu
->clocks
.clks
[i
]);
796 kfree(cgu
->clocks
.clks
);