1 // SPDX-License-Identifier: GPL-2.0
3 * Renesas RZ/V2H(P) Clock Pulse Generator
5 * Copyright (C) 2024 Renesas Electronics Corp.
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/iopoll.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_clock.h>
25 #include <linux/pm_domain.h>
26 #include <linux/reset-controller.h>
28 #include <dt-bindings/clock/renesas-cpg-mssr.h>
30 #include "rzv2h-cpg.h"
33 #define WARN_DEBUG(x) WARN_ON(x)
35 #define WARN_DEBUG(x) do { } while (0)
38 #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
39 #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
40 #define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
41 #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
43 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
44 #define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
45 #define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
46 #define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
48 #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
50 #define GET_MOD_CLK_ID(base, index, bit) \
51 ((base) + ((((index) * (16))) + (bit)))
53 #define CPG_CLKSTATUS0 (0x700)
56 * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
59 * @base: CPG register block base address
60 * @rmw_lock: protects register accesses
61 * @clks: Array containing all Core and Module Clocks
62 * @num_core_clks: Number of Core Clocks in clks[]
63 * @num_mod_clks: Number of Module Clocks in clks[]
64 * @resets: Array of resets
65 * @num_resets: Number of Module Resets in info->resets[]
66 * @last_dt_core_clk: ID of the last Core Clock exported to DT
67 * @rcdev: Reset controller entity
69 struct rzv2h_cpg_priv
{
75 unsigned int num_core_clks
;
76 unsigned int num_mod_clks
;
77 struct rzv2h_reset
*resets
;
78 unsigned int num_resets
;
79 unsigned int last_dt_core_clk
;
81 struct reset_controller_dev rcdev
;
84 #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
87 struct rzv2h_cpg_priv
*priv
;
94 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
97 * struct mod_clock - Module clock
99 * @priv: CPG private data
100 * @hw: handle between common and hardware-specific interfaces
101 * @on_index: register offset
102 * @on_bit: ON/MON bit
103 * @mon_index: monitor register offset
104 * @mon_bit: montor bit
107 struct rzv2h_cpg_priv
*priv
;
115 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
118 * struct ddiv_clk - DDIV clock
120 * @priv: CPG private data
122 * @mon: monitor bit in CPG_CLKSTATUS0 register
125 struct rzv2h_cpg_priv
*priv
;
126 struct clk_divider div
;
130 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
132 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw
*hw
,
133 unsigned long parent_rate
)
135 struct pll_clk
*pll_clk
= to_pll(hw
);
136 struct rzv2h_cpg_priv
*priv
= pll_clk
->priv
;
137 unsigned int clk1
, clk2
;
140 if (!PLL_CLK_ACCESS(pll_clk
->conf
))
143 clk1
= readl(priv
->base
+ PLL_CLK1_OFFSET(pll_clk
->conf
));
144 clk2
= readl(priv
->base
+ PLL_CLK2_OFFSET(pll_clk
->conf
));
146 rate
= mul_u64_u32_shr(parent_rate
, (MDIV(clk1
) << 16) + KDIV(clk1
),
149 return DIV_ROUND_CLOSEST_ULL(rate
, PDIV(clk1
));
152 static const struct clk_ops rzv2h_cpg_pll_ops
= {
153 .recalc_rate
= rzv2h_cpg_pll_clk_recalc_rate
,
156 static struct clk
* __init
157 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk
*core
,
158 struct rzv2h_cpg_priv
*priv
,
159 const struct clk_ops
*ops
)
161 void __iomem
*base
= priv
->base
;
162 struct device
*dev
= priv
->dev
;
163 struct clk_init_data init
;
164 const struct clk
*parent
;
165 const char *parent_name
;
166 struct pll_clk
*pll_clk
;
169 parent
= priv
->clks
[core
->parent
];
171 return ERR_CAST(parent
);
173 pll_clk
= devm_kzalloc(dev
, sizeof(*pll_clk
), GFP_KERNEL
);
175 return ERR_PTR(-ENOMEM
);
177 parent_name
= __clk_get_name(parent
);
178 init
.name
= core
->name
;
181 init
.parent_names
= &parent_name
;
182 init
.num_parents
= 1;
184 pll_clk
->hw
.init
= &init
;
185 pll_clk
->conf
= core
->cfg
.conf
;
186 pll_clk
->base
= base
;
187 pll_clk
->priv
= priv
;
188 pll_clk
->type
= core
->type
;
190 ret
= devm_clk_hw_register(dev
, &pll_clk
->hw
);
194 return pll_clk
->hw
.clk
;
197 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw
*hw
,
198 unsigned long parent_rate
)
200 struct clk_divider
*divider
= to_clk_divider(hw
);
203 val
= readl(divider
->reg
) >> divider
->shift
;
204 val
&= clk_div_mask(divider
->width
);
206 return divider_recalc_rate(hw
, parent_rate
, val
, divider
->table
,
207 divider
->flags
, divider
->width
);
210 static long rzv2h_ddiv_round_rate(struct clk_hw
*hw
, unsigned long rate
,
211 unsigned long *prate
)
213 struct clk_divider
*divider
= to_clk_divider(hw
);
215 return divider_round_rate(hw
, rate
, prate
, divider
->table
,
216 divider
->width
, divider
->flags
);
219 static int rzv2h_ddiv_determine_rate(struct clk_hw
*hw
,
220 struct clk_rate_request
*req
)
222 struct clk_divider
*divider
= to_clk_divider(hw
);
224 return divider_determine_rate(hw
, req
, divider
->table
, divider
->width
,
228 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem
*base
, u8 mon
)
230 u32 bitmask
= BIT(mon
);
233 return readl_poll_timeout_atomic(base
+ CPG_CLKSTATUS0
, val
, !(val
& bitmask
), 10, 200);
236 static int rzv2h_ddiv_set_rate(struct clk_hw
*hw
, unsigned long rate
,
237 unsigned long parent_rate
)
239 struct clk_divider
*divider
= to_clk_divider(hw
);
240 struct ddiv_clk
*ddiv
= to_ddiv_clock(divider
);
241 struct rzv2h_cpg_priv
*priv
= ddiv
->priv
;
242 unsigned long flags
= 0;
247 value
= divider_get_val(rate
, parent_rate
, divider
->table
,
248 divider
->width
, divider
->flags
);
252 spin_lock_irqsave(divider
->lock
, flags
);
254 ret
= rzv2h_cpg_wait_ddiv_clk_update_done(priv
->base
, ddiv
->mon
);
258 val
= readl(divider
->reg
) | DDIV_DIVCTL_WEN(divider
->shift
);
259 val
&= ~(clk_div_mask(divider
->width
) << divider
->shift
);
260 val
|= (u32
)value
<< divider
->shift
;
261 writel(val
, divider
->reg
);
263 ret
= rzv2h_cpg_wait_ddiv_clk_update_done(priv
->base
, ddiv
->mon
);
267 spin_unlock_irqrestore(divider
->lock
, flags
);
272 spin_unlock_irqrestore(divider
->lock
, flags
);
276 static const struct clk_ops rzv2h_ddiv_clk_divider_ops
= {
277 .recalc_rate
= rzv2h_ddiv_recalc_rate
,
278 .round_rate
= rzv2h_ddiv_round_rate
,
279 .determine_rate
= rzv2h_ddiv_determine_rate
,
280 .set_rate
= rzv2h_ddiv_set_rate
,
283 static struct clk
* __init
284 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk
*core
,
285 struct rzv2h_cpg_priv
*priv
)
287 struct ddiv cfg_ddiv
= core
->cfg
.ddiv
;
288 struct clk_init_data init
= {};
289 struct device
*dev
= priv
->dev
;
290 u8 shift
= cfg_ddiv
.shift
;
291 u8 width
= cfg_ddiv
.width
;
292 const struct clk
*parent
;
293 const char *parent_name
;
294 struct clk_divider
*div
;
295 struct ddiv_clk
*ddiv
;
298 parent
= priv
->clks
[core
->parent
];
300 return ERR_CAST(parent
);
302 parent_name
= __clk_get_name(parent
);
304 if ((shift
+ width
) > 16)
305 return ERR_PTR(-EINVAL
);
307 ddiv
= devm_kzalloc(priv
->dev
, sizeof(*ddiv
), GFP_KERNEL
);
309 return ERR_PTR(-ENOMEM
);
311 init
.name
= core
->name
;
312 init
.ops
= &rzv2h_ddiv_clk_divider_ops
;
313 init
.parent_names
= &parent_name
;
314 init
.num_parents
= 1;
317 ddiv
->mon
= cfg_ddiv
.monbit
;
319 div
->reg
= priv
->base
+ cfg_ddiv
.offset
;
322 div
->flags
= core
->flag
;
323 div
->lock
= &priv
->rmw_lock
;
324 div
->hw
.init
= &init
;
325 div
->table
= core
->dtable
;
327 ret
= devm_clk_hw_register(dev
, &div
->hw
);
335 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args
*clkspec
,
338 unsigned int clkidx
= clkspec
->args
[1];
339 struct rzv2h_cpg_priv
*priv
= data
;
340 struct device
*dev
= priv
->dev
;
344 switch (clkspec
->args
[0]) {
347 if (clkidx
> priv
->last_dt_core_clk
) {
348 dev_err(dev
, "Invalid %s clock index %u\n", type
, clkidx
);
349 return ERR_PTR(-EINVAL
);
351 clk
= priv
->clks
[clkidx
];
356 if (clkidx
>= priv
->num_mod_clks
) {
357 dev_err(dev
, "Invalid %s clock index %u\n", type
, clkidx
);
358 return ERR_PTR(-EINVAL
);
360 clk
= priv
->clks
[priv
->num_core_clks
+ clkidx
];
364 dev_err(dev
, "Invalid CPG clock type %u\n", clkspec
->args
[0]);
365 return ERR_PTR(-EINVAL
);
369 dev_err(dev
, "Cannot get %s clock %u: %ld", type
, clkidx
,
372 dev_dbg(dev
, "clock (%u, %u) is %pC at %lu Hz\n",
373 clkspec
->args
[0], clkspec
->args
[1], clk
,
379 rzv2h_cpg_register_core_clk(const struct cpg_core_clk
*core
,
380 struct rzv2h_cpg_priv
*priv
)
382 struct clk
*clk
= ERR_PTR(-EOPNOTSUPP
), *parent
;
383 unsigned int id
= core
->id
, div
= core
->div
;
384 struct device
*dev
= priv
->dev
;
385 const char *parent_name
;
386 struct clk_hw
*clk_hw
;
388 WARN_DEBUG(id
>= priv
->num_core_clks
);
389 WARN_DEBUG(PTR_ERR(priv
->clks
[id
]) != -ENOENT
);
391 switch (core
->type
) {
393 clk
= of_clk_get_by_name(priv
->dev
->of_node
, core
->name
);
396 WARN_DEBUG(core
->parent
>= priv
->num_core_clks
);
397 parent
= priv
->clks
[core
->parent
];
398 if (IS_ERR(parent
)) {
403 parent_name
= __clk_get_name(parent
);
404 clk_hw
= devm_clk_hw_register_fixed_factor(dev
, core
->name
,
405 parent_name
, CLK_SET_RATE_PARENT
,
408 clk
= ERR_CAST(clk_hw
);
413 clk
= rzv2h_cpg_pll_clk_register(core
, priv
, &rzv2h_cpg_pll_ops
);
416 clk
= rzv2h_cpg_ddiv_clk_register(core
, priv
);
422 if (IS_ERR_OR_NULL(clk
))
425 dev_dbg(dev
, "Core clock %pC at %lu Hz\n", clk
, clk_get_rate(clk
));
426 priv
->clks
[id
] = clk
;
430 dev_err(dev
, "Failed to register core clock %s: %ld\n",
431 core
->name
, PTR_ERR(clk
));
434 static int rzv2h_mod_clock_endisable(struct clk_hw
*hw
, bool enable
)
436 struct mod_clock
*clock
= to_mod_clock(hw
);
437 unsigned int reg
= GET_CLK_ON_OFFSET(clock
->on_index
);
438 struct rzv2h_cpg_priv
*priv
= clock
->priv
;
439 u32 bitmask
= BIT(clock
->on_bit
);
440 struct device
*dev
= priv
->dev
;
444 dev_dbg(dev
, "CLK_ON 0x%x/%pC %s\n", reg
, hw
->clk
,
445 enable
? "ON" : "OFF");
447 value
= bitmask
<< 16;
451 writel(value
, priv
->base
+ reg
);
453 if (!enable
|| clock
->mon_index
< 0)
456 reg
= GET_CLK_MON_OFFSET(clock
->mon_index
);
457 bitmask
= BIT(clock
->mon_bit
);
458 error
= readl_poll_timeout_atomic(priv
->base
+ reg
, value
,
459 value
& bitmask
, 0, 10);
461 dev_err(dev
, "Failed to enable CLK_ON %p\n",
467 static int rzv2h_mod_clock_enable(struct clk_hw
*hw
)
469 return rzv2h_mod_clock_endisable(hw
, true);
472 static void rzv2h_mod_clock_disable(struct clk_hw
*hw
)
474 rzv2h_mod_clock_endisable(hw
, false);
477 static int rzv2h_mod_clock_is_enabled(struct clk_hw
*hw
)
479 struct mod_clock
*clock
= to_mod_clock(hw
);
480 struct rzv2h_cpg_priv
*priv
= clock
->priv
;
484 if (clock
->mon_index
>= 0) {
485 offset
= GET_CLK_MON_OFFSET(clock
->mon_index
);
486 bitmask
= BIT(clock
->mon_bit
);
488 offset
= GET_CLK_ON_OFFSET(clock
->on_index
);
489 bitmask
= BIT(clock
->on_bit
);
492 return readl(priv
->base
+ offset
) & bitmask
;
495 static const struct clk_ops rzv2h_mod_clock_ops
= {
496 .enable
= rzv2h_mod_clock_enable
,
497 .disable
= rzv2h_mod_clock_disable
,
498 .is_enabled
= rzv2h_mod_clock_is_enabled
,
502 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk
*mod
,
503 struct rzv2h_cpg_priv
*priv
)
505 struct mod_clock
*clock
= NULL
;
506 struct device
*dev
= priv
->dev
;
507 struct clk_init_data init
;
508 struct clk
*parent
, *clk
;
509 const char *parent_name
;
513 id
= GET_MOD_CLK_ID(priv
->num_core_clks
, mod
->on_index
, mod
->on_bit
);
514 WARN_DEBUG(id
>= priv
->num_core_clks
+ priv
->num_mod_clks
);
515 WARN_DEBUG(mod
->parent
>= priv
->num_core_clks
+ priv
->num_mod_clks
);
516 WARN_DEBUG(PTR_ERR(priv
->clks
[id
]) != -ENOENT
);
518 parent
= priv
->clks
[mod
->parent
];
519 if (IS_ERR(parent
)) {
524 clock
= devm_kzalloc(dev
, sizeof(*clock
), GFP_KERNEL
);
526 clk
= ERR_PTR(-ENOMEM
);
530 init
.name
= mod
->name
;
531 init
.ops
= &rzv2h_mod_clock_ops
;
532 init
.flags
= CLK_SET_RATE_PARENT
;
534 init
.flags
|= CLK_IS_CRITICAL
;
536 parent_name
= __clk_get_name(parent
);
537 init
.parent_names
= &parent_name
;
538 init
.num_parents
= 1;
540 clock
->on_index
= mod
->on_index
;
541 clock
->on_bit
= mod
->on_bit
;
542 clock
->mon_index
= mod
->mon_index
;
543 clock
->mon_bit
= mod
->mon_bit
;
545 clock
->hw
.init
= &init
;
547 ret
= devm_clk_hw_register(dev
, &clock
->hw
);
553 priv
->clks
[id
] = clock
->hw
.clk
;
558 dev_err(dev
, "Failed to register module clock %s: %ld\n",
559 mod
->name
, PTR_ERR(clk
));
562 static int rzv2h_cpg_assert(struct reset_controller_dev
*rcdev
,
565 struct rzv2h_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
566 unsigned int reg
= GET_RST_OFFSET(priv
->resets
[id
].reset_index
);
567 u32 mask
= BIT(priv
->resets
[id
].reset_bit
);
568 u8 monbit
= priv
->resets
[id
].mon_bit
;
569 u32 value
= mask
<< 16;
571 dev_dbg(rcdev
->dev
, "assert id:%ld offset:0x%x\n", id
, reg
);
573 writel(value
, priv
->base
+ reg
);
575 reg
= GET_RST_MON_OFFSET(priv
->resets
[id
].mon_index
);
578 return readl_poll_timeout_atomic(priv
->base
+ reg
, value
,
579 value
& mask
, 10, 200);
582 static int rzv2h_cpg_deassert(struct reset_controller_dev
*rcdev
,
585 struct rzv2h_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
586 unsigned int reg
= GET_RST_OFFSET(priv
->resets
[id
].reset_index
);
587 u32 mask
= BIT(priv
->resets
[id
].reset_bit
);
588 u8 monbit
= priv
->resets
[id
].mon_bit
;
589 u32 value
= (mask
<< 16) | mask
;
591 dev_dbg(rcdev
->dev
, "deassert id:%ld offset:0x%x\n", id
, reg
);
593 writel(value
, priv
->base
+ reg
);
595 reg
= GET_RST_MON_OFFSET(priv
->resets
[id
].mon_index
);
598 return readl_poll_timeout_atomic(priv
->base
+ reg
, value
,
599 !(value
& mask
), 10, 200);
602 static int rzv2h_cpg_reset(struct reset_controller_dev
*rcdev
,
607 ret
= rzv2h_cpg_assert(rcdev
, id
);
611 return rzv2h_cpg_deassert(rcdev
, id
);
614 static int rzv2h_cpg_status(struct reset_controller_dev
*rcdev
,
617 struct rzv2h_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
618 unsigned int reg
= GET_RST_MON_OFFSET(priv
->resets
[id
].mon_index
);
619 u8 monbit
= priv
->resets
[id
].mon_bit
;
621 return !!(readl(priv
->base
+ reg
) & BIT(monbit
));
624 static const struct reset_control_ops rzv2h_cpg_reset_ops
= {
625 .reset
= rzv2h_cpg_reset
,
626 .assert = rzv2h_cpg_assert
,
627 .deassert
= rzv2h_cpg_deassert
,
628 .status
= rzv2h_cpg_status
,
631 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev
*rcdev
,
632 const struct of_phandle_args
*reset_spec
)
634 struct rzv2h_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
635 unsigned int id
= reset_spec
->args
[0];
636 u8 rst_index
= id
/ 16;
637 u8 rst_bit
= id
% 16;
640 for (i
= 0; i
< rcdev
->nr_resets
; i
++) {
641 if (rst_index
== priv
->resets
[i
].reset_index
&&
642 rst_bit
== priv
->resets
[i
].reset_bit
)
649 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv
*priv
)
651 priv
->rcdev
.ops
= &rzv2h_cpg_reset_ops
;
652 priv
->rcdev
.of_node
= priv
->dev
->of_node
;
653 priv
->rcdev
.dev
= priv
->dev
;
654 priv
->rcdev
.of_reset_n_cells
= 1;
655 priv
->rcdev
.of_xlate
= rzv2h_cpg_reset_xlate
;
656 priv
->rcdev
.nr_resets
= priv
->num_resets
;
658 return devm_reset_controller_register(priv
->dev
, &priv
->rcdev
);
662 * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
663 * @priv: pointer to CPG private data structure
664 * @genpd: generic PM domain
666 struct rzv2h_cpg_pd
{
667 struct rzv2h_cpg_priv
*priv
;
668 struct generic_pm_domain genpd
;
671 static int rzv2h_cpg_attach_dev(struct generic_pm_domain
*domain
, struct device
*dev
)
673 struct device_node
*np
= dev
->of_node
;
674 struct of_phandle_args clkspec
;
680 while (!of_parse_phandle_with_args(np
, "clocks", "#clock-cells", i
,
684 error
= pm_clk_create(dev
);
686 of_node_put(clkspec
.np
);
690 clk
= of_clk_get_from_provider(&clkspec
);
691 of_node_put(clkspec
.np
);
693 error
= PTR_ERR(clk
);
697 error
= pm_clk_add_clk(dev
, clk
);
699 dev_err(dev
, "pm_clk_add_clk failed %d\n",
717 static void rzv2h_cpg_detach_dev(struct generic_pm_domain
*unused
, struct device
*dev
)
719 if (!pm_clk_no_clocks(dev
))
723 static void rzv2h_cpg_genpd_remove_simple(void *data
)
725 pm_genpd_remove(data
);
728 static int __init
rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv
*priv
)
730 struct device
*dev
= priv
->dev
;
731 struct device_node
*np
= dev
->of_node
;
732 struct rzv2h_cpg_pd
*pd
;
735 pd
= devm_kzalloc(dev
, sizeof(*pd
), GFP_KERNEL
);
739 pd
->genpd
.name
= np
->name
;
741 pd
->genpd
.flags
|= GENPD_FLAG_ALWAYS_ON
| GENPD_FLAG_PM_CLK
| GENPD_FLAG_ACTIVE_WAKEUP
;
742 pd
->genpd
.attach_dev
= rzv2h_cpg_attach_dev
;
743 pd
->genpd
.detach_dev
= rzv2h_cpg_detach_dev
;
744 ret
= pm_genpd_init(&pd
->genpd
, &pm_domain_always_on_gov
, false);
748 ret
= devm_add_action_or_reset(dev
, rzv2h_cpg_genpd_remove_simple
, &pd
->genpd
);
752 return of_genpd_add_provider_simple(np
, &pd
->genpd
);
755 static void rzv2h_cpg_del_clk_provider(void *data
)
757 of_clk_del_provider(data
);
760 static int __init
rzv2h_cpg_probe(struct platform_device
*pdev
)
762 struct device
*dev
= &pdev
->dev
;
763 struct device_node
*np
= dev
->of_node
;
764 const struct rzv2h_cpg_info
*info
;
765 struct rzv2h_cpg_priv
*priv
;
766 unsigned int nclks
, i
;
770 info
= of_device_get_match_data(dev
);
772 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
776 spin_lock_init(&priv
->rmw_lock
);
780 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
781 if (IS_ERR(priv
->base
))
782 return PTR_ERR(priv
->base
);
784 nclks
= info
->num_total_core_clks
+ info
->num_hw_mod_clks
;
785 clks
= devm_kmalloc_array(dev
, nclks
, sizeof(*clks
), GFP_KERNEL
);
789 priv
->resets
= devm_kmemdup(dev
, info
->resets
, sizeof(*info
->resets
) *
790 info
->num_resets
, GFP_KERNEL
);
794 dev_set_drvdata(dev
, priv
);
796 priv
->num_core_clks
= info
->num_total_core_clks
;
797 priv
->num_mod_clks
= info
->num_hw_mod_clks
;
798 priv
->last_dt_core_clk
= info
->last_dt_core_clk
;
799 priv
->num_resets
= info
->num_resets
;
801 for (i
= 0; i
< nclks
; i
++)
802 clks
[i
] = ERR_PTR(-ENOENT
);
804 for (i
= 0; i
< info
->num_core_clks
; i
++)
805 rzv2h_cpg_register_core_clk(&info
->core_clks
[i
], priv
);
807 for (i
= 0; i
< info
->num_mod_clks
; i
++)
808 rzv2h_cpg_register_mod_clk(&info
->mod_clks
[i
], priv
);
810 error
= of_clk_add_provider(np
, rzv2h_cpg_clk_src_twocell_get
, priv
);
814 error
= devm_add_action_or_reset(dev
, rzv2h_cpg_del_clk_provider
, np
);
818 error
= rzv2h_cpg_add_pm_domains(priv
);
822 error
= rzv2h_cpg_reset_controller_register(priv
);
829 static const struct of_device_id rzv2h_cpg_match
[] = {
830 #ifdef CONFIG_CLK_R9A09G057
832 .compatible
= "renesas,r9a09g057-cpg",
833 .data
= &r9a09g057_cpg_info
,
839 static struct platform_driver rzv2h_cpg_driver
= {
842 .of_match_table
= rzv2h_cpg_match
,
846 static int __init
rzv2h_cpg_init(void)
848 return platform_driver_probe(&rzv2h_cpg_driver
, rzv2h_cpg_probe
);
851 subsys_initcall(rzv2h_cpg_init
);
853 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");