drm/tests: hdmi: Fix memory leaks in drm_display_mode_from_cea_vic()
[drm/drm-misc.git] / drivers / clk / renesas / rzv2h-cpg.c
blobb524a9d33610f66c4c3efedd26640c9e3924c72c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/V2H(P) Clock Pulse Generator
5 * Copyright (C) 2024 Renesas Electronics Corp.
7 * Based on rzg2l-cpg.c
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/delay.h>
18 #include <linux/init.h>
19 #include <linux/iopoll.h>
20 #include <linux/mod_devicetable.h>
21 #include <linux/module.h>
22 #include <linux/of.h>
23 #include <linux/platform_device.h>
24 #include <linux/pm_clock.h>
25 #include <linux/pm_domain.h>
26 #include <linux/reset-controller.h>
28 #include <dt-bindings/clock/renesas-cpg-mssr.h>
30 #include "rzv2h-cpg.h"
32 #ifdef DEBUG
33 #define WARN_DEBUG(x) WARN_ON(x)
34 #else
35 #define WARN_DEBUG(x) do { } while (0)
36 #endif
38 #define GET_CLK_ON_OFFSET(x) (0x600 + ((x) * 4))
39 #define GET_CLK_MON_OFFSET(x) (0x800 + ((x) * 4))
40 #define GET_RST_OFFSET(x) (0x900 + ((x) * 4))
41 #define GET_RST_MON_OFFSET(x) (0xA00 + ((x) * 4))
43 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), (val)))
44 #define MDIV(val) FIELD_GET(GENMASK(15, 6), (val))
45 #define PDIV(val) FIELD_GET(GENMASK(5, 0), (val))
46 #define SDIV(val) FIELD_GET(GENMASK(2, 0), (val))
48 #define DDIV_DIVCTL_WEN(shift) BIT((shift) + 16)
50 #define GET_MOD_CLK_ID(base, index, bit) \
51 ((base) + ((((index) * (16))) + (bit)))
53 #define CPG_CLKSTATUS0 (0x700)
55 /**
56 * struct rzv2h_cpg_priv - Clock Pulse Generator Private Data
58 * @dev: CPG device
59 * @base: CPG register block base address
60 * @rmw_lock: protects register accesses
61 * @clks: Array containing all Core and Module Clocks
62 * @num_core_clks: Number of Core Clocks in clks[]
63 * @num_mod_clks: Number of Module Clocks in clks[]
64 * @resets: Array of resets
65 * @num_resets: Number of Module Resets in info->resets[]
66 * @last_dt_core_clk: ID of the last Core Clock exported to DT
67 * @rcdev: Reset controller entity
69 struct rzv2h_cpg_priv {
70 struct device *dev;
71 void __iomem *base;
72 spinlock_t rmw_lock;
74 struct clk **clks;
75 unsigned int num_core_clks;
76 unsigned int num_mod_clks;
77 struct rzv2h_reset *resets;
78 unsigned int num_resets;
79 unsigned int last_dt_core_clk;
81 struct reset_controller_dev rcdev;
84 #define rcdev_to_priv(x) container_of(x, struct rzv2h_cpg_priv, rcdev)
86 struct pll_clk {
87 struct rzv2h_cpg_priv *priv;
88 void __iomem *base;
89 struct clk_hw hw;
90 unsigned int conf;
91 unsigned int type;
94 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
96 /**
97 * struct mod_clock - Module clock
99 * @priv: CPG private data
100 * @hw: handle between common and hardware-specific interfaces
101 * @on_index: register offset
102 * @on_bit: ON/MON bit
103 * @mon_index: monitor register offset
104 * @mon_bit: montor bit
106 struct mod_clock {
107 struct rzv2h_cpg_priv *priv;
108 struct clk_hw hw;
109 u8 on_index;
110 u8 on_bit;
111 s8 mon_index;
112 u8 mon_bit;
115 #define to_mod_clock(_hw) container_of(_hw, struct mod_clock, hw)
118 * struct ddiv_clk - DDIV clock
120 * @priv: CPG private data
121 * @div: divider clk
122 * @mon: monitor bit in CPG_CLKSTATUS0 register
124 struct ddiv_clk {
125 struct rzv2h_cpg_priv *priv;
126 struct clk_divider div;
127 u8 mon;
130 #define to_ddiv_clock(_div) container_of(_div, struct ddiv_clk, div)
132 static unsigned long rzv2h_cpg_pll_clk_recalc_rate(struct clk_hw *hw,
133 unsigned long parent_rate)
135 struct pll_clk *pll_clk = to_pll(hw);
136 struct rzv2h_cpg_priv *priv = pll_clk->priv;
137 unsigned int clk1, clk2;
138 u64 rate;
140 if (!PLL_CLK_ACCESS(pll_clk->conf))
141 return 0;
143 clk1 = readl(priv->base + PLL_CLK1_OFFSET(pll_clk->conf));
144 clk2 = readl(priv->base + PLL_CLK2_OFFSET(pll_clk->conf));
146 rate = mul_u64_u32_shr(parent_rate, (MDIV(clk1) << 16) + KDIV(clk1),
147 16 + SDIV(clk2));
149 return DIV_ROUND_CLOSEST_ULL(rate, PDIV(clk1));
152 static const struct clk_ops rzv2h_cpg_pll_ops = {
153 .recalc_rate = rzv2h_cpg_pll_clk_recalc_rate,
156 static struct clk * __init
157 rzv2h_cpg_pll_clk_register(const struct cpg_core_clk *core,
158 struct rzv2h_cpg_priv *priv,
159 const struct clk_ops *ops)
161 void __iomem *base = priv->base;
162 struct device *dev = priv->dev;
163 struct clk_init_data init;
164 const struct clk *parent;
165 const char *parent_name;
166 struct pll_clk *pll_clk;
167 int ret;
169 parent = priv->clks[core->parent];
170 if (IS_ERR(parent))
171 return ERR_CAST(parent);
173 pll_clk = devm_kzalloc(dev, sizeof(*pll_clk), GFP_KERNEL);
174 if (!pll_clk)
175 return ERR_PTR(-ENOMEM);
177 parent_name = __clk_get_name(parent);
178 init.name = core->name;
179 init.ops = ops;
180 init.flags = 0;
181 init.parent_names = &parent_name;
182 init.num_parents = 1;
184 pll_clk->hw.init = &init;
185 pll_clk->conf = core->cfg.conf;
186 pll_clk->base = base;
187 pll_clk->priv = priv;
188 pll_clk->type = core->type;
190 ret = devm_clk_hw_register(dev, &pll_clk->hw);
191 if (ret)
192 return ERR_PTR(ret);
194 return pll_clk->hw.clk;
197 static unsigned long rzv2h_ddiv_recalc_rate(struct clk_hw *hw,
198 unsigned long parent_rate)
200 struct clk_divider *divider = to_clk_divider(hw);
201 unsigned int val;
203 val = readl(divider->reg) >> divider->shift;
204 val &= clk_div_mask(divider->width);
206 return divider_recalc_rate(hw, parent_rate, val, divider->table,
207 divider->flags, divider->width);
210 static long rzv2h_ddiv_round_rate(struct clk_hw *hw, unsigned long rate,
211 unsigned long *prate)
213 struct clk_divider *divider = to_clk_divider(hw);
215 return divider_round_rate(hw, rate, prate, divider->table,
216 divider->width, divider->flags);
219 static int rzv2h_ddiv_determine_rate(struct clk_hw *hw,
220 struct clk_rate_request *req)
222 struct clk_divider *divider = to_clk_divider(hw);
224 return divider_determine_rate(hw, req, divider->table, divider->width,
225 divider->flags);
228 static inline int rzv2h_cpg_wait_ddiv_clk_update_done(void __iomem *base, u8 mon)
230 u32 bitmask = BIT(mon);
231 u32 val;
233 return readl_poll_timeout_atomic(base + CPG_CLKSTATUS0, val, !(val & bitmask), 10, 200);
236 static int rzv2h_ddiv_set_rate(struct clk_hw *hw, unsigned long rate,
237 unsigned long parent_rate)
239 struct clk_divider *divider = to_clk_divider(hw);
240 struct ddiv_clk *ddiv = to_ddiv_clock(divider);
241 struct rzv2h_cpg_priv *priv = ddiv->priv;
242 unsigned long flags = 0;
243 int value;
244 u32 val;
245 int ret;
247 value = divider_get_val(rate, parent_rate, divider->table,
248 divider->width, divider->flags);
249 if (value < 0)
250 return value;
252 spin_lock_irqsave(divider->lock, flags);
254 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
255 if (ret)
256 goto ddiv_timeout;
258 val = readl(divider->reg) | DDIV_DIVCTL_WEN(divider->shift);
259 val &= ~(clk_div_mask(divider->width) << divider->shift);
260 val |= (u32)value << divider->shift;
261 writel(val, divider->reg);
263 ret = rzv2h_cpg_wait_ddiv_clk_update_done(priv->base, ddiv->mon);
264 if (ret)
265 goto ddiv_timeout;
267 spin_unlock_irqrestore(divider->lock, flags);
269 return 0;
271 ddiv_timeout:
272 spin_unlock_irqrestore(divider->lock, flags);
273 return ret;
276 static const struct clk_ops rzv2h_ddiv_clk_divider_ops = {
277 .recalc_rate = rzv2h_ddiv_recalc_rate,
278 .round_rate = rzv2h_ddiv_round_rate,
279 .determine_rate = rzv2h_ddiv_determine_rate,
280 .set_rate = rzv2h_ddiv_set_rate,
283 static struct clk * __init
284 rzv2h_cpg_ddiv_clk_register(const struct cpg_core_clk *core,
285 struct rzv2h_cpg_priv *priv)
287 struct ddiv cfg_ddiv = core->cfg.ddiv;
288 struct clk_init_data init = {};
289 struct device *dev = priv->dev;
290 u8 shift = cfg_ddiv.shift;
291 u8 width = cfg_ddiv.width;
292 const struct clk *parent;
293 const char *parent_name;
294 struct clk_divider *div;
295 struct ddiv_clk *ddiv;
296 int ret;
298 parent = priv->clks[core->parent];
299 if (IS_ERR(parent))
300 return ERR_CAST(parent);
302 parent_name = __clk_get_name(parent);
304 if ((shift + width) > 16)
305 return ERR_PTR(-EINVAL);
307 ddiv = devm_kzalloc(priv->dev, sizeof(*ddiv), GFP_KERNEL);
308 if (!ddiv)
309 return ERR_PTR(-ENOMEM);
311 init.name = core->name;
312 init.ops = &rzv2h_ddiv_clk_divider_ops;
313 init.parent_names = &parent_name;
314 init.num_parents = 1;
316 ddiv->priv = priv;
317 ddiv->mon = cfg_ddiv.monbit;
318 div = &ddiv->div;
319 div->reg = priv->base + cfg_ddiv.offset;
320 div->shift = shift;
321 div->width = width;
322 div->flags = core->flag;
323 div->lock = &priv->rmw_lock;
324 div->hw.init = &init;
325 div->table = core->dtable;
327 ret = devm_clk_hw_register(dev, &div->hw);
328 if (ret)
329 return ERR_PTR(ret);
331 return div->hw.clk;
334 static struct clk
335 *rzv2h_cpg_clk_src_twocell_get(struct of_phandle_args *clkspec,
336 void *data)
338 unsigned int clkidx = clkspec->args[1];
339 struct rzv2h_cpg_priv *priv = data;
340 struct device *dev = priv->dev;
341 const char *type;
342 struct clk *clk;
344 switch (clkspec->args[0]) {
345 case CPG_CORE:
346 type = "core";
347 if (clkidx > priv->last_dt_core_clk) {
348 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
349 return ERR_PTR(-EINVAL);
351 clk = priv->clks[clkidx];
352 break;
354 case CPG_MOD:
355 type = "module";
356 if (clkidx >= priv->num_mod_clks) {
357 dev_err(dev, "Invalid %s clock index %u\n", type, clkidx);
358 return ERR_PTR(-EINVAL);
360 clk = priv->clks[priv->num_core_clks + clkidx];
361 break;
363 default:
364 dev_err(dev, "Invalid CPG clock type %u\n", clkspec->args[0]);
365 return ERR_PTR(-EINVAL);
368 if (IS_ERR(clk))
369 dev_err(dev, "Cannot get %s clock %u: %ld", type, clkidx,
370 PTR_ERR(clk));
371 else
372 dev_dbg(dev, "clock (%u, %u) is %pC at %lu Hz\n",
373 clkspec->args[0], clkspec->args[1], clk,
374 clk_get_rate(clk));
375 return clk;
378 static void __init
379 rzv2h_cpg_register_core_clk(const struct cpg_core_clk *core,
380 struct rzv2h_cpg_priv *priv)
382 struct clk *clk = ERR_PTR(-EOPNOTSUPP), *parent;
383 unsigned int id = core->id, div = core->div;
384 struct device *dev = priv->dev;
385 const char *parent_name;
386 struct clk_hw *clk_hw;
388 WARN_DEBUG(id >= priv->num_core_clks);
389 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
391 switch (core->type) {
392 case CLK_TYPE_IN:
393 clk = of_clk_get_by_name(priv->dev->of_node, core->name);
394 break;
395 case CLK_TYPE_FF:
396 WARN_DEBUG(core->parent >= priv->num_core_clks);
397 parent = priv->clks[core->parent];
398 if (IS_ERR(parent)) {
399 clk = parent;
400 goto fail;
403 parent_name = __clk_get_name(parent);
404 clk_hw = devm_clk_hw_register_fixed_factor(dev, core->name,
405 parent_name, CLK_SET_RATE_PARENT,
406 core->mult, div);
407 if (IS_ERR(clk_hw))
408 clk = ERR_CAST(clk_hw);
409 else
410 clk = clk_hw->clk;
411 break;
412 case CLK_TYPE_PLL:
413 clk = rzv2h_cpg_pll_clk_register(core, priv, &rzv2h_cpg_pll_ops);
414 break;
415 case CLK_TYPE_DDIV:
416 clk = rzv2h_cpg_ddiv_clk_register(core, priv);
417 break;
418 default:
419 goto fail;
422 if (IS_ERR_OR_NULL(clk))
423 goto fail;
425 dev_dbg(dev, "Core clock %pC at %lu Hz\n", clk, clk_get_rate(clk));
426 priv->clks[id] = clk;
427 return;
429 fail:
430 dev_err(dev, "Failed to register core clock %s: %ld\n",
431 core->name, PTR_ERR(clk));
434 static int rzv2h_mod_clock_endisable(struct clk_hw *hw, bool enable)
436 struct mod_clock *clock = to_mod_clock(hw);
437 unsigned int reg = GET_CLK_ON_OFFSET(clock->on_index);
438 struct rzv2h_cpg_priv *priv = clock->priv;
439 u32 bitmask = BIT(clock->on_bit);
440 struct device *dev = priv->dev;
441 u32 value;
442 int error;
444 dev_dbg(dev, "CLK_ON 0x%x/%pC %s\n", reg, hw->clk,
445 enable ? "ON" : "OFF");
447 value = bitmask << 16;
448 if (enable)
449 value |= bitmask;
451 writel(value, priv->base + reg);
453 if (!enable || clock->mon_index < 0)
454 return 0;
456 reg = GET_CLK_MON_OFFSET(clock->mon_index);
457 bitmask = BIT(clock->mon_bit);
458 error = readl_poll_timeout_atomic(priv->base + reg, value,
459 value & bitmask, 0, 10);
460 if (error)
461 dev_err(dev, "Failed to enable CLK_ON %p\n",
462 priv->base + reg);
464 return error;
467 static int rzv2h_mod_clock_enable(struct clk_hw *hw)
469 return rzv2h_mod_clock_endisable(hw, true);
472 static void rzv2h_mod_clock_disable(struct clk_hw *hw)
474 rzv2h_mod_clock_endisable(hw, false);
477 static int rzv2h_mod_clock_is_enabled(struct clk_hw *hw)
479 struct mod_clock *clock = to_mod_clock(hw);
480 struct rzv2h_cpg_priv *priv = clock->priv;
481 u32 bitmask;
482 u32 offset;
484 if (clock->mon_index >= 0) {
485 offset = GET_CLK_MON_OFFSET(clock->mon_index);
486 bitmask = BIT(clock->mon_bit);
487 } else {
488 offset = GET_CLK_ON_OFFSET(clock->on_index);
489 bitmask = BIT(clock->on_bit);
492 return readl(priv->base + offset) & bitmask;
495 static const struct clk_ops rzv2h_mod_clock_ops = {
496 .enable = rzv2h_mod_clock_enable,
497 .disable = rzv2h_mod_clock_disable,
498 .is_enabled = rzv2h_mod_clock_is_enabled,
501 static void __init
502 rzv2h_cpg_register_mod_clk(const struct rzv2h_mod_clk *mod,
503 struct rzv2h_cpg_priv *priv)
505 struct mod_clock *clock = NULL;
506 struct device *dev = priv->dev;
507 struct clk_init_data init;
508 struct clk *parent, *clk;
509 const char *parent_name;
510 unsigned int id;
511 int ret;
513 id = GET_MOD_CLK_ID(priv->num_core_clks, mod->on_index, mod->on_bit);
514 WARN_DEBUG(id >= priv->num_core_clks + priv->num_mod_clks);
515 WARN_DEBUG(mod->parent >= priv->num_core_clks + priv->num_mod_clks);
516 WARN_DEBUG(PTR_ERR(priv->clks[id]) != -ENOENT);
518 parent = priv->clks[mod->parent];
519 if (IS_ERR(parent)) {
520 clk = parent;
521 goto fail;
524 clock = devm_kzalloc(dev, sizeof(*clock), GFP_KERNEL);
525 if (!clock) {
526 clk = ERR_PTR(-ENOMEM);
527 goto fail;
530 init.name = mod->name;
531 init.ops = &rzv2h_mod_clock_ops;
532 init.flags = CLK_SET_RATE_PARENT;
533 if (mod->critical)
534 init.flags |= CLK_IS_CRITICAL;
536 parent_name = __clk_get_name(parent);
537 init.parent_names = &parent_name;
538 init.num_parents = 1;
540 clock->on_index = mod->on_index;
541 clock->on_bit = mod->on_bit;
542 clock->mon_index = mod->mon_index;
543 clock->mon_bit = mod->mon_bit;
544 clock->priv = priv;
545 clock->hw.init = &init;
547 ret = devm_clk_hw_register(dev, &clock->hw);
548 if (ret) {
549 clk = ERR_PTR(ret);
550 goto fail;
553 priv->clks[id] = clock->hw.clk;
555 return;
557 fail:
558 dev_err(dev, "Failed to register module clock %s: %ld\n",
559 mod->name, PTR_ERR(clk));
562 static int rzv2h_cpg_assert(struct reset_controller_dev *rcdev,
563 unsigned long id)
565 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
566 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
567 u32 mask = BIT(priv->resets[id].reset_bit);
568 u8 monbit = priv->resets[id].mon_bit;
569 u32 value = mask << 16;
571 dev_dbg(rcdev->dev, "assert id:%ld offset:0x%x\n", id, reg);
573 writel(value, priv->base + reg);
575 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
576 mask = BIT(monbit);
578 return readl_poll_timeout_atomic(priv->base + reg, value,
579 value & mask, 10, 200);
582 static int rzv2h_cpg_deassert(struct reset_controller_dev *rcdev,
583 unsigned long id)
585 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
586 unsigned int reg = GET_RST_OFFSET(priv->resets[id].reset_index);
587 u32 mask = BIT(priv->resets[id].reset_bit);
588 u8 monbit = priv->resets[id].mon_bit;
589 u32 value = (mask << 16) | mask;
591 dev_dbg(rcdev->dev, "deassert id:%ld offset:0x%x\n", id, reg);
593 writel(value, priv->base + reg);
595 reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
596 mask = BIT(monbit);
598 return readl_poll_timeout_atomic(priv->base + reg, value,
599 !(value & mask), 10, 200);
602 static int rzv2h_cpg_reset(struct reset_controller_dev *rcdev,
603 unsigned long id)
605 int ret;
607 ret = rzv2h_cpg_assert(rcdev, id);
608 if (ret)
609 return ret;
611 return rzv2h_cpg_deassert(rcdev, id);
614 static int rzv2h_cpg_status(struct reset_controller_dev *rcdev,
615 unsigned long id)
617 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
618 unsigned int reg = GET_RST_MON_OFFSET(priv->resets[id].mon_index);
619 u8 monbit = priv->resets[id].mon_bit;
621 return !!(readl(priv->base + reg) & BIT(monbit));
624 static const struct reset_control_ops rzv2h_cpg_reset_ops = {
625 .reset = rzv2h_cpg_reset,
626 .assert = rzv2h_cpg_assert,
627 .deassert = rzv2h_cpg_deassert,
628 .status = rzv2h_cpg_status,
631 static int rzv2h_cpg_reset_xlate(struct reset_controller_dev *rcdev,
632 const struct of_phandle_args *reset_spec)
634 struct rzv2h_cpg_priv *priv = rcdev_to_priv(rcdev);
635 unsigned int id = reset_spec->args[0];
636 u8 rst_index = id / 16;
637 u8 rst_bit = id % 16;
638 unsigned int i;
640 for (i = 0; i < rcdev->nr_resets; i++) {
641 if (rst_index == priv->resets[i].reset_index &&
642 rst_bit == priv->resets[i].reset_bit)
643 return i;
646 return -EINVAL;
649 static int rzv2h_cpg_reset_controller_register(struct rzv2h_cpg_priv *priv)
651 priv->rcdev.ops = &rzv2h_cpg_reset_ops;
652 priv->rcdev.of_node = priv->dev->of_node;
653 priv->rcdev.dev = priv->dev;
654 priv->rcdev.of_reset_n_cells = 1;
655 priv->rcdev.of_xlate = rzv2h_cpg_reset_xlate;
656 priv->rcdev.nr_resets = priv->num_resets;
658 return devm_reset_controller_register(priv->dev, &priv->rcdev);
662 * struct rzv2h_cpg_pd - RZ/V2H power domain data structure
663 * @priv: pointer to CPG private data structure
664 * @genpd: generic PM domain
666 struct rzv2h_cpg_pd {
667 struct rzv2h_cpg_priv *priv;
668 struct generic_pm_domain genpd;
671 static int rzv2h_cpg_attach_dev(struct generic_pm_domain *domain, struct device *dev)
673 struct device_node *np = dev->of_node;
674 struct of_phandle_args clkspec;
675 bool once = true;
676 struct clk *clk;
677 int error;
678 int i = 0;
680 while (!of_parse_phandle_with_args(np, "clocks", "#clock-cells", i,
681 &clkspec)) {
682 if (once) {
683 once = false;
684 error = pm_clk_create(dev);
685 if (error) {
686 of_node_put(clkspec.np);
687 goto err;
690 clk = of_clk_get_from_provider(&clkspec);
691 of_node_put(clkspec.np);
692 if (IS_ERR(clk)) {
693 error = PTR_ERR(clk);
694 goto fail_destroy;
697 error = pm_clk_add_clk(dev, clk);
698 if (error) {
699 dev_err(dev, "pm_clk_add_clk failed %d\n",
700 error);
701 goto fail_put;
703 i++;
706 return 0;
708 fail_put:
709 clk_put(clk);
711 fail_destroy:
712 pm_clk_destroy(dev);
713 err:
714 return error;
717 static void rzv2h_cpg_detach_dev(struct generic_pm_domain *unused, struct device *dev)
719 if (!pm_clk_no_clocks(dev))
720 pm_clk_destroy(dev);
723 static void rzv2h_cpg_genpd_remove_simple(void *data)
725 pm_genpd_remove(data);
728 static int __init rzv2h_cpg_add_pm_domains(struct rzv2h_cpg_priv *priv)
730 struct device *dev = priv->dev;
731 struct device_node *np = dev->of_node;
732 struct rzv2h_cpg_pd *pd;
733 int ret;
735 pd = devm_kzalloc(dev, sizeof(*pd), GFP_KERNEL);
736 if (!pd)
737 return -ENOMEM;
739 pd->genpd.name = np->name;
740 pd->priv = priv;
741 pd->genpd.flags |= GENPD_FLAG_ALWAYS_ON | GENPD_FLAG_PM_CLK | GENPD_FLAG_ACTIVE_WAKEUP;
742 pd->genpd.attach_dev = rzv2h_cpg_attach_dev;
743 pd->genpd.detach_dev = rzv2h_cpg_detach_dev;
744 ret = pm_genpd_init(&pd->genpd, &pm_domain_always_on_gov, false);
745 if (ret)
746 return ret;
748 ret = devm_add_action_or_reset(dev, rzv2h_cpg_genpd_remove_simple, &pd->genpd);
749 if (ret)
750 return ret;
752 return of_genpd_add_provider_simple(np, &pd->genpd);
755 static void rzv2h_cpg_del_clk_provider(void *data)
757 of_clk_del_provider(data);
760 static int __init rzv2h_cpg_probe(struct platform_device *pdev)
762 struct device *dev = &pdev->dev;
763 struct device_node *np = dev->of_node;
764 const struct rzv2h_cpg_info *info;
765 struct rzv2h_cpg_priv *priv;
766 unsigned int nclks, i;
767 struct clk **clks;
768 int error;
770 info = of_device_get_match_data(dev);
772 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
773 if (!priv)
774 return -ENOMEM;
776 spin_lock_init(&priv->rmw_lock);
778 priv->dev = dev;
780 priv->base = devm_platform_ioremap_resource(pdev, 0);
781 if (IS_ERR(priv->base))
782 return PTR_ERR(priv->base);
784 nclks = info->num_total_core_clks + info->num_hw_mod_clks;
785 clks = devm_kmalloc_array(dev, nclks, sizeof(*clks), GFP_KERNEL);
786 if (!clks)
787 return -ENOMEM;
789 priv->resets = devm_kmemdup(dev, info->resets, sizeof(*info->resets) *
790 info->num_resets, GFP_KERNEL);
791 if (!priv->resets)
792 return -ENOMEM;
794 dev_set_drvdata(dev, priv);
795 priv->clks = clks;
796 priv->num_core_clks = info->num_total_core_clks;
797 priv->num_mod_clks = info->num_hw_mod_clks;
798 priv->last_dt_core_clk = info->last_dt_core_clk;
799 priv->num_resets = info->num_resets;
801 for (i = 0; i < nclks; i++)
802 clks[i] = ERR_PTR(-ENOENT);
804 for (i = 0; i < info->num_core_clks; i++)
805 rzv2h_cpg_register_core_clk(&info->core_clks[i], priv);
807 for (i = 0; i < info->num_mod_clks; i++)
808 rzv2h_cpg_register_mod_clk(&info->mod_clks[i], priv);
810 error = of_clk_add_provider(np, rzv2h_cpg_clk_src_twocell_get, priv);
811 if (error)
812 return error;
814 error = devm_add_action_or_reset(dev, rzv2h_cpg_del_clk_provider, np);
815 if (error)
816 return error;
818 error = rzv2h_cpg_add_pm_domains(priv);
819 if (error)
820 return error;
822 error = rzv2h_cpg_reset_controller_register(priv);
823 if (error)
824 return error;
826 return 0;
829 static const struct of_device_id rzv2h_cpg_match[] = {
830 #ifdef CONFIG_CLK_R9A09G057
832 .compatible = "renesas,r9a09g057-cpg",
833 .data = &r9a09g057_cpg_info,
835 #endif
836 { /* sentinel */ }
839 static struct platform_driver rzv2h_cpg_driver = {
840 .driver = {
841 .name = "rzv2h-cpg",
842 .of_match_table = rzv2h_cpg_match,
846 static int __init rzv2h_cpg_init(void)
848 return platform_driver_probe(&rzv2h_cpg_driver, rzv2h_cpg_probe);
851 subsys_initcall(rzv2h_cpg_init);
853 MODULE_DESCRIPTION("Renesas RZ/V2H CPG Driver");