1 // SPDX-License-Identifier: GPL-2.0
3 * RZ/G2L Clock Pulse Generator
5 * Copyright (C) 2021 Renesas Electronics Corp.
7 * Based on renesas-cpg-mssr.c
9 * Copyright (C) 2015 Glider bvba
10 * Copyright (C) 2013 Ideas On Board SPRL
11 * Copyright (C) 2015 Renesas Electronics Corp.
14 #include <linux/bitfield.h>
15 #include <linux/clk.h>
16 #include <linux/clk-provider.h>
17 #include <linux/clk/renesas.h>
18 #include <linux/delay.h>
19 #include <linux/device.h>
20 #include <linux/init.h>
21 #include <linux/iopoll.h>
22 #include <linux/mod_devicetable.h>
23 #include <linux/module.h>
25 #include <linux/platform_device.h>
26 #include <linux/pm_clock.h>
27 #include <linux/pm_domain.h>
28 #include <linux/reset-controller.h>
29 #include <linux/slab.h>
30 #include <linux/units.h>
32 #include <dt-bindings/clock/renesas-cpg-mssr.h>
34 #include "rzg2l-cpg.h"
37 #define WARN_DEBUG(x) WARN_ON(x)
39 #define WARN_DEBUG(x) do { } while (0)
42 #define GET_SHIFT(val) ((val >> 12) & 0xff)
43 #define GET_WIDTH(val) ((val >> 8) & 0xf)
45 #define KDIV(val) ((s16)FIELD_GET(GENMASK(31, 16), val))
46 #define MDIV(val) FIELD_GET(GENMASK(15, 6), val)
47 #define PDIV(val) FIELD_GET(GENMASK(5, 0), val)
48 #define SDIV(val) FIELD_GET(GENMASK(2, 0), val)
50 #define RZG3S_DIV_P GENMASK(28, 26)
51 #define RZG3S_DIV_M GENMASK(25, 22)
52 #define RZG3S_DIV_NI GENMASK(21, 13)
53 #define RZG3S_DIV_NF GENMASK(12, 1)
55 #define CLK_ON_R(reg) (reg)
56 #define CLK_MON_R(reg) (0x180 + (reg))
57 #define CLK_RST_R(reg) (reg)
58 #define CLK_MRST_R(reg) (0x180 + (reg))
60 #define GET_REG_OFFSET(val) ((val >> 20) & 0xfff)
61 #define GET_REG_SAMPLL_CLK1(val) ((val >> 22) & 0xfff)
62 #define GET_REG_SAMPLL_CLK2(val) ((val >> 12) & 0xfff)
64 #define CPG_WEN_BIT BIT(16)
66 #define MAX_VCLK_FREQ (148500000)
69 * struct clk_hw_data - clock hardware data
71 * @conf: clock configuration (register offset, shift, width)
72 * @sconf: clock status configuration (register offset, shift, width)
73 * @priv: CPG private data structure
79 struct rzg2l_cpg_priv
*priv
;
82 #define to_clk_hw_data(_hw) container_of(_hw, struct clk_hw_data, hw)
85 * struct sd_mux_hw_data - SD MUX clock hardware data
86 * @hw_data: clock hw data
87 * @mtable: clock mux table
89 struct sd_mux_hw_data
{
90 struct clk_hw_data hw_data
;
94 #define to_sd_mux_hw_data(_hw) container_of(_hw, struct sd_mux_hw_data, hw_data)
97 * struct div_hw_data - divider clock hardware data
98 * @hw_data: clock hw data
99 * @dtable: pointer to divider table
100 * @invalid_rate: invalid rate for divider
101 * @max_rate: maximum rate for divider
102 * @width: divider width
105 struct clk_hw_data hw_data
;
106 const struct clk_div_table
*dtable
;
107 unsigned long invalid_rate
;
108 unsigned long max_rate
;
112 #define to_div_hw_data(_hw) container_of(_hw, struct div_hw_data, hw_data)
114 struct rzg2l_pll5_param
{
123 struct rzg2l_pll5_mux_dsi_div_param
{
130 * struct rzg2l_cpg_priv - Clock Pulse Generator Private Data
132 * @rcdev: Reset controller entity
134 * @base: CPG register block base address
135 * @rmw_lock: protects register accesses
136 * @clks: Array containing all Core and Module Clocks
137 * @num_core_clks: Number of Core Clocks in clks[]
138 * @num_mod_clks: Number of Module Clocks in clks[]
139 * @num_resets: Number of Module Resets in info->resets[]
140 * @last_dt_core_clk: ID of the last Core Clock exported to DT
141 * @info: Pointer to platform data
142 * @mux_dsi_div_params: pll5 mux and dsi div parameters
144 struct rzg2l_cpg_priv
{
145 struct reset_controller_dev rcdev
;
151 unsigned int num_core_clks
;
152 unsigned int num_mod_clks
;
153 unsigned int num_resets
;
154 unsigned int last_dt_core_clk
;
156 const struct rzg2l_cpg_info
*info
;
158 struct rzg2l_pll5_mux_dsi_div_param mux_dsi_div_params
;
161 static void rzg2l_cpg_del_clk_provider(void *data
)
163 of_clk_del_provider(data
);
166 /* Must be called in atomic context. */
167 static int rzg2l_cpg_wait_clk_update_done(void __iomem
*base
, u32 conf
)
169 u32 bitmask
= GENMASK(GET_WIDTH(conf
) - 1, 0) << GET_SHIFT(conf
);
170 u32 off
= GET_REG_OFFSET(conf
);
173 return readl_poll_timeout_atomic(base
+ off
, val
, !(val
& bitmask
), 10, 200);
176 int rzg2l_cpg_sd_clk_mux_notifier(struct notifier_block
*nb
, unsigned long event
,
179 struct clk_notifier_data
*cnd
= data
;
180 struct clk_hw
*hw
= __clk_get_hw(cnd
->clk
);
181 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
182 struct rzg2l_cpg_priv
*priv
= clk_hw_data
->priv
;
183 u32 off
= GET_REG_OFFSET(clk_hw_data
->conf
);
184 u32 shift
= GET_SHIFT(clk_hw_data
->conf
);
185 const u32 clk_src_266
= 3;
189 if (event
!= PRE_RATE_CHANGE
|| (cnd
->new_rate
/ MEGA
== 266))
192 spin_lock_irqsave(&priv
->rmw_lock
, flags
);
195 * As per the HW manual, we should not directly switch from 533 MHz to
196 * 400 MHz and vice versa. To change the setting from 2’b01 (533 MHz)
197 * to 2’b10 (400 MHz) or vice versa, Switch to 2’b11 (266 MHz) first,
198 * and then switch to the target setting (2’b01 (533 MHz) or 2’b10
200 * Setting a value of '0' to the SEL_SDHI0_SET or SEL_SDHI1_SET clock
201 * switching register is prohibited.
202 * The clock mux has 3 input clocks(533 MHz, 400 MHz, and 266 MHz), and
203 * the index to value mapping is done by adding 1 to the index.
206 writel((CPG_WEN_BIT
| clk_src_266
) << shift
, priv
->base
+ off
);
208 /* Wait for the update done. */
209 ret
= rzg2l_cpg_wait_clk_update_done(priv
->base
, clk_hw_data
->sconf
);
211 spin_unlock_irqrestore(&priv
->rmw_lock
, flags
);
214 dev_err(priv
->dev
, "failed to switch to safe clk source\n");
216 return notifier_from_errno(ret
);
219 int rzg3s_cpg_div_clk_notifier(struct notifier_block
*nb
, unsigned long event
,
222 struct clk_notifier_data
*cnd
= data
;
223 struct clk_hw
*hw
= __clk_get_hw(cnd
->clk
);
224 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
225 struct div_hw_data
*div_hw_data
= to_div_hw_data(clk_hw_data
);
226 struct rzg2l_cpg_priv
*priv
= clk_hw_data
->priv
;
227 u32 off
= GET_REG_OFFSET(clk_hw_data
->conf
);
228 u32 shift
= GET_SHIFT(clk_hw_data
->conf
);
233 if (event
!= PRE_RATE_CHANGE
|| !div_hw_data
->invalid_rate
||
234 div_hw_data
->invalid_rate
% cnd
->new_rate
)
237 spin_lock_irqsave(&priv
->rmw_lock
, flags
);
239 val
= readl(priv
->base
+ off
);
241 val
&= GENMASK(GET_WIDTH(clk_hw_data
->conf
) - 1, 0);
244 * There are different constraints for the user of this notifiers as follows:
245 * 1/ SD div cannot be 1 (val == 0) if parent rate is 800MHz
246 * 2/ OCTA / SPI div cannot be 1 (val == 0) if parent rate is 400MHz
247 * As SD can have only one parent having 800MHz and OCTA div can have
248 * only one parent having 400MHz we took into account the parent rate
249 * at the beginning of function (by checking invalid_rate % new_rate).
250 * Now it is time to check the hardware divider and update it accordingly.
253 writel((CPG_WEN_BIT
| 1) << shift
, priv
->base
+ off
);
254 /* Wait for the update done. */
255 ret
= rzg2l_cpg_wait_clk_update_done(priv
->base
, clk_hw_data
->sconf
);
258 spin_unlock_irqrestore(&priv
->rmw_lock
, flags
);
261 dev_err(priv
->dev
, "Failed to downgrade the div\n");
263 return notifier_from_errno(ret
);
266 static int rzg2l_register_notifier(struct clk_hw
*hw
, const struct cpg_core_clk
*core
,
267 struct rzg2l_cpg_priv
*priv
)
269 struct notifier_block
*nb
;
274 nb
= devm_kzalloc(priv
->dev
, sizeof(*nb
), GFP_KERNEL
);
278 nb
->notifier_call
= core
->notifier
;
280 return clk_notifier_register(hw
->clk
, nb
);
283 static unsigned long rzg3s_div_clk_recalc_rate(struct clk_hw
*hw
,
284 unsigned long parent_rate
)
286 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
287 struct div_hw_data
*div_hw_data
= to_div_hw_data(clk_hw_data
);
288 struct rzg2l_cpg_priv
*priv
= clk_hw_data
->priv
;
291 val
= readl(priv
->base
+ GET_REG_OFFSET(clk_hw_data
->conf
));
292 val
>>= GET_SHIFT(clk_hw_data
->conf
);
293 val
&= GENMASK(GET_WIDTH(clk_hw_data
->conf
) - 1, 0);
295 return divider_recalc_rate(hw
, parent_rate
, val
, div_hw_data
->dtable
,
296 CLK_DIVIDER_ROUND_CLOSEST
, div_hw_data
->width
);
299 static int rzg3s_div_clk_determine_rate(struct clk_hw
*hw
, struct clk_rate_request
*req
)
301 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
302 struct div_hw_data
*div_hw_data
= to_div_hw_data(clk_hw_data
);
304 if (div_hw_data
->max_rate
&& req
->rate
> div_hw_data
->max_rate
)
305 req
->rate
= div_hw_data
->max_rate
;
307 return divider_determine_rate(hw
, req
, div_hw_data
->dtable
, div_hw_data
->width
,
308 CLK_DIVIDER_ROUND_CLOSEST
);
311 static int rzg3s_div_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
312 unsigned long parent_rate
)
314 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
315 struct div_hw_data
*div_hw_data
= to_div_hw_data(clk_hw_data
);
316 struct rzg2l_cpg_priv
*priv
= clk_hw_data
->priv
;
317 u32 off
= GET_REG_OFFSET(clk_hw_data
->conf
);
318 u32 shift
= GET_SHIFT(clk_hw_data
->conf
);
323 val
= divider_get_val(rate
, parent_rate
, div_hw_data
->dtable
, div_hw_data
->width
,
324 CLK_DIVIDER_ROUND_CLOSEST
);
326 spin_lock_irqsave(&priv
->rmw_lock
, flags
);
327 writel((CPG_WEN_BIT
| val
) << shift
, priv
->base
+ off
);
328 /* Wait for the update done. */
329 ret
= rzg2l_cpg_wait_clk_update_done(priv
->base
, clk_hw_data
->sconf
);
330 spin_unlock_irqrestore(&priv
->rmw_lock
, flags
);
335 static const struct clk_ops rzg3s_div_clk_ops
= {
336 .recalc_rate
= rzg3s_div_clk_recalc_rate
,
337 .determine_rate
= rzg3s_div_clk_determine_rate
,
338 .set_rate
= rzg3s_div_clk_set_rate
,
341 static struct clk
* __init
342 rzg3s_cpg_div_clk_register(const struct cpg_core_clk
*core
, struct rzg2l_cpg_priv
*priv
)
344 struct div_hw_data
*div_hw_data
;
345 struct clk_init_data init
= {};
346 const struct clk_div_table
*clkt
;
347 struct clk_hw
*clk_hw
;
348 const struct clk
*parent
;
349 const char *parent_name
;
353 parent
= priv
->clks
[core
->parent
];
355 return ERR_CAST(parent
);
357 parent_name
= __clk_get_name(parent
);
359 div_hw_data
= devm_kzalloc(priv
->dev
, sizeof(*div_hw_data
), GFP_KERNEL
);
361 return ERR_PTR(-ENOMEM
);
363 init
.name
= core
->name
;
364 init
.flags
= core
->flag
;
365 init
.ops
= &rzg3s_div_clk_ops
;
366 init
.parent_names
= &parent_name
;
367 init
.num_parents
= 1;
369 /* Get the maximum divider to retrieve div width. */
370 for (clkt
= core
->dtable
; clkt
->div
; clkt
++) {
375 div_hw_data
->hw_data
.priv
= priv
;
376 div_hw_data
->hw_data
.conf
= core
->conf
;
377 div_hw_data
->hw_data
.sconf
= core
->sconf
;
378 div_hw_data
->dtable
= core
->dtable
;
379 div_hw_data
->invalid_rate
= core
->invalid_rate
;
380 div_hw_data
->max_rate
= core
->max_rate
;
381 div_hw_data
->width
= fls(max
) - 1;
383 clk_hw
= &div_hw_data
->hw_data
.hw
;
384 clk_hw
->init
= &init
;
386 ret
= devm_clk_hw_register(priv
->dev
, clk_hw
);
390 ret
= rzg2l_register_notifier(clk_hw
, core
, priv
);
392 dev_err(priv
->dev
, "Failed to register notifier for %s\n",
400 static struct clk
* __init
401 rzg2l_cpg_div_clk_register(const struct cpg_core_clk
*core
,
402 struct rzg2l_cpg_priv
*priv
)
404 void __iomem
*base
= priv
->base
;
405 struct device
*dev
= priv
->dev
;
406 const struct clk
*parent
;
407 const char *parent_name
;
408 struct clk_hw
*clk_hw
;
410 parent
= priv
->clks
[core
->parent
];
412 return ERR_CAST(parent
);
414 parent_name
= __clk_get_name(parent
);
417 clk_hw
= clk_hw_register_divider_table(dev
, core
->name
,
419 base
+ GET_REG_OFFSET(core
->conf
),
420 GET_SHIFT(core
->conf
),
421 GET_WIDTH(core
->conf
),
426 clk_hw
= clk_hw_register_divider(dev
, core
->name
,
428 base
+ GET_REG_OFFSET(core
->conf
),
429 GET_SHIFT(core
->conf
),
430 GET_WIDTH(core
->conf
),
431 core
->flag
, &priv
->rmw_lock
);
434 return ERR_CAST(clk_hw
);
439 static struct clk
* __init
440 rzg2l_cpg_mux_clk_register(const struct cpg_core_clk
*core
,
441 struct rzg2l_cpg_priv
*priv
)
443 const struct clk_hw
*clk_hw
;
445 clk_hw
= devm_clk_hw_register_mux(priv
->dev
, core
->name
,
446 core
->parent_names
, core
->num_parents
,
448 priv
->base
+ GET_REG_OFFSET(core
->conf
),
449 GET_SHIFT(core
->conf
),
450 GET_WIDTH(core
->conf
),
451 core
->mux_flags
, &priv
->rmw_lock
);
453 return ERR_CAST(clk_hw
);
458 static int rzg2l_cpg_sd_clk_mux_set_parent(struct clk_hw
*hw
, u8 index
)
460 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
461 struct sd_mux_hw_data
*sd_mux_hw_data
= to_sd_mux_hw_data(clk_hw_data
);
462 struct rzg2l_cpg_priv
*priv
= clk_hw_data
->priv
;
463 u32 off
= GET_REG_OFFSET(clk_hw_data
->conf
);
464 u32 shift
= GET_SHIFT(clk_hw_data
->conf
);
469 val
= clk_mux_index_to_val(sd_mux_hw_data
->mtable
, CLK_MUX_ROUND_CLOSEST
, index
);
471 spin_lock_irqsave(&priv
->rmw_lock
, flags
);
473 writel((CPG_WEN_BIT
| val
) << shift
, priv
->base
+ off
);
475 /* Wait for the update done. */
476 ret
= rzg2l_cpg_wait_clk_update_done(priv
->base
, clk_hw_data
->sconf
);
478 spin_unlock_irqrestore(&priv
->rmw_lock
, flags
);
481 dev_err(priv
->dev
, "Failed to switch parent\n");
486 static u8
rzg2l_cpg_sd_clk_mux_get_parent(struct clk_hw
*hw
)
488 struct clk_hw_data
*clk_hw_data
= to_clk_hw_data(hw
);
489 struct sd_mux_hw_data
*sd_mux_hw_data
= to_sd_mux_hw_data(clk_hw_data
);
490 struct rzg2l_cpg_priv
*priv
= clk_hw_data
->priv
;
493 val
= readl(priv
->base
+ GET_REG_OFFSET(clk_hw_data
->conf
));
494 val
>>= GET_SHIFT(clk_hw_data
->conf
);
495 val
&= GENMASK(GET_WIDTH(clk_hw_data
->conf
) - 1, 0);
497 return clk_mux_val_to_index(hw
, sd_mux_hw_data
->mtable
, CLK_MUX_ROUND_CLOSEST
, val
);
500 static const struct clk_ops rzg2l_cpg_sd_clk_mux_ops
= {
501 .determine_rate
= __clk_mux_determine_rate_closest
,
502 .set_parent
= rzg2l_cpg_sd_clk_mux_set_parent
,
503 .get_parent
= rzg2l_cpg_sd_clk_mux_get_parent
,
506 static struct clk
* __init
507 rzg2l_cpg_sd_mux_clk_register(const struct cpg_core_clk
*core
,
508 struct rzg2l_cpg_priv
*priv
)
510 struct sd_mux_hw_data
*sd_mux_hw_data
;
511 struct clk_init_data init
;
512 struct clk_hw
*clk_hw
;
515 sd_mux_hw_data
= devm_kzalloc(priv
->dev
, sizeof(*sd_mux_hw_data
), GFP_KERNEL
);
517 return ERR_PTR(-ENOMEM
);
519 sd_mux_hw_data
->hw_data
.priv
= priv
;
520 sd_mux_hw_data
->hw_data
.conf
= core
->conf
;
521 sd_mux_hw_data
->hw_data
.sconf
= core
->sconf
;
522 sd_mux_hw_data
->mtable
= core
->mtable
;
524 init
.name
= core
->name
;
525 init
.ops
= &rzg2l_cpg_sd_clk_mux_ops
;
526 init
.flags
= core
->flag
;
527 init
.num_parents
= core
->num_parents
;
528 init
.parent_names
= core
->parent_names
;
530 clk_hw
= &sd_mux_hw_data
->hw_data
.hw
;
531 clk_hw
->init
= &init
;
533 ret
= devm_clk_hw_register(priv
->dev
, clk_hw
);
537 ret
= rzg2l_register_notifier(clk_hw
, core
, priv
);
539 dev_err(priv
->dev
, "Failed to register notifier for %s\n",
548 rzg2l_cpg_get_foutpostdiv_rate(struct rzg2l_pll5_param
*params
,
551 unsigned long foutpostdiv_rate
, foutvco_rate
;
553 params
->pl5_intin
= rate
/ MEGA
;
554 params
->pl5_fracin
= div_u64(((u64
)rate
% MEGA
) << 24, MEGA
);
555 params
->pl5_refdiv
= 2;
556 params
->pl5_postdiv1
= 1;
557 params
->pl5_postdiv2
= 1;
558 params
->pl5_spread
= 0x16;
560 foutvco_rate
= div_u64(mul_u32_u32(EXTAL_FREQ_IN_MEGA_HZ
* MEGA
,
561 (params
->pl5_intin
<< 24) + params
->pl5_fracin
),
562 params
->pl5_refdiv
) >> 24;
563 foutpostdiv_rate
= DIV_ROUND_CLOSEST_ULL(foutvco_rate
,
564 params
->pl5_postdiv1
* params
->pl5_postdiv2
);
566 return foutpostdiv_rate
;
569 struct dsi_div_hw_data
{
573 struct rzg2l_cpg_priv
*priv
;
576 #define to_dsi_div_hw_data(_hw) container_of(_hw, struct dsi_div_hw_data, hw)
578 static unsigned long rzg2l_cpg_dsi_div_recalc_rate(struct clk_hw
*hw
,
579 unsigned long parent_rate
)
581 struct dsi_div_hw_data
*dsi_div
= to_dsi_div_hw_data(hw
);
582 unsigned long rate
= dsi_div
->rate
;
590 static unsigned long rzg2l_cpg_get_vclk_parent_rate(struct clk_hw
*hw
,
593 struct dsi_div_hw_data
*dsi_div
= to_dsi_div_hw_data(hw
);
594 struct rzg2l_cpg_priv
*priv
= dsi_div
->priv
;
595 struct rzg2l_pll5_param params
;
596 unsigned long parent_rate
;
598 parent_rate
= rzg2l_cpg_get_foutpostdiv_rate(¶ms
, rate
);
600 if (priv
->mux_dsi_div_params
.clksrc
)
606 static int rzg2l_cpg_dsi_div_determine_rate(struct clk_hw
*hw
,
607 struct clk_rate_request
*req
)
609 if (req
->rate
> MAX_VCLK_FREQ
)
610 req
->rate
= MAX_VCLK_FREQ
;
612 req
->best_parent_rate
= rzg2l_cpg_get_vclk_parent_rate(hw
, req
->rate
);
617 static int rzg2l_cpg_dsi_div_set_rate(struct clk_hw
*hw
,
619 unsigned long parent_rate
)
621 struct dsi_div_hw_data
*dsi_div
= to_dsi_div_hw_data(hw
);
622 struct rzg2l_cpg_priv
*priv
= dsi_div
->priv
;
625 * MUX -->DIV_DSI_{A,B} -->M3 -->VCLK
627 * Based on the dot clock, the DSI divider clock sets the divider value,
628 * calculates the pll parameters for generating FOUTPOSTDIV and the clk
629 * source for the MUX and propagates that info to the parents.
632 if (!rate
|| rate
> MAX_VCLK_FREQ
)
635 dsi_div
->rate
= rate
;
636 writel(CPG_PL5_SDIV_DIV_DSI_A_WEN
| CPG_PL5_SDIV_DIV_DSI_B_WEN
|
637 (priv
->mux_dsi_div_params
.dsi_div_a
<< 0) |
638 (priv
->mux_dsi_div_params
.dsi_div_b
<< 8),
639 priv
->base
+ CPG_PL5_SDIV
);
644 static const struct clk_ops rzg2l_cpg_dsi_div_ops
= {
645 .recalc_rate
= rzg2l_cpg_dsi_div_recalc_rate
,
646 .determine_rate
= rzg2l_cpg_dsi_div_determine_rate
,
647 .set_rate
= rzg2l_cpg_dsi_div_set_rate
,
650 static struct clk
* __init
651 rzg2l_cpg_dsi_div_clk_register(const struct cpg_core_clk
*core
,
652 struct rzg2l_cpg_priv
*priv
)
654 struct dsi_div_hw_data
*clk_hw_data
;
655 const struct clk
*parent
;
656 const char *parent_name
;
657 struct clk_init_data init
;
658 struct clk_hw
*clk_hw
;
661 parent
= priv
->clks
[core
->parent
];
663 return ERR_CAST(parent
);
665 clk_hw_data
= devm_kzalloc(priv
->dev
, sizeof(*clk_hw_data
), GFP_KERNEL
);
667 return ERR_PTR(-ENOMEM
);
669 clk_hw_data
->priv
= priv
;
671 parent_name
= __clk_get_name(parent
);
672 init
.name
= core
->name
;
673 init
.ops
= &rzg2l_cpg_dsi_div_ops
;
674 init
.flags
= CLK_SET_RATE_PARENT
;
675 init
.parent_names
= &parent_name
;
676 init
.num_parents
= 1;
678 clk_hw
= &clk_hw_data
->hw
;
679 clk_hw
->init
= &init
;
681 ret
= devm_clk_hw_register(priv
->dev
, clk_hw
);
688 struct pll5_mux_hw_data
{
692 struct rzg2l_cpg_priv
*priv
;
695 #define to_pll5_mux_hw_data(_hw) container_of(_hw, struct pll5_mux_hw_data, hw)
697 static int rzg2l_cpg_pll5_4_clk_mux_determine_rate(struct clk_hw
*hw
,
698 struct clk_rate_request
*req
)
700 struct clk_hw
*parent
;
701 struct pll5_mux_hw_data
*hwdata
= to_pll5_mux_hw_data(hw
);
702 struct rzg2l_cpg_priv
*priv
= hwdata
->priv
;
704 parent
= clk_hw_get_parent_by_index(hw
, priv
->mux_dsi_div_params
.clksrc
);
705 req
->best_parent_hw
= parent
;
706 req
->best_parent_rate
= req
->rate
;
711 static int rzg2l_cpg_pll5_4_clk_mux_set_parent(struct clk_hw
*hw
, u8 index
)
713 struct pll5_mux_hw_data
*hwdata
= to_pll5_mux_hw_data(hw
);
714 struct rzg2l_cpg_priv
*priv
= hwdata
->priv
;
718 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
721 * Based on the dot clock, the DSI divider clock calculates the parent
722 * rate and clk source for the MUX. It propagates that info to
723 * pll5_4_clk_mux which sets the clock source for DSI divider clock.
726 writel(CPG_OTHERFUNC1_REG_RES0_ON_WEN
| index
,
727 priv
->base
+ CPG_OTHERFUNC1_REG
);
732 static u8
rzg2l_cpg_pll5_4_clk_mux_get_parent(struct clk_hw
*hw
)
734 struct pll5_mux_hw_data
*hwdata
= to_pll5_mux_hw_data(hw
);
735 struct rzg2l_cpg_priv
*priv
= hwdata
->priv
;
737 return readl(priv
->base
+ GET_REG_OFFSET(hwdata
->conf
));
740 static const struct clk_ops rzg2l_cpg_pll5_4_clk_mux_ops
= {
741 .determine_rate
= rzg2l_cpg_pll5_4_clk_mux_determine_rate
,
742 .set_parent
= rzg2l_cpg_pll5_4_clk_mux_set_parent
,
743 .get_parent
= rzg2l_cpg_pll5_4_clk_mux_get_parent
,
746 static struct clk
* __init
747 rzg2l_cpg_pll5_4_mux_clk_register(const struct cpg_core_clk
*core
,
748 struct rzg2l_cpg_priv
*priv
)
750 struct pll5_mux_hw_data
*clk_hw_data
;
751 struct clk_init_data init
;
752 struct clk_hw
*clk_hw
;
755 clk_hw_data
= devm_kzalloc(priv
->dev
, sizeof(*clk_hw_data
), GFP_KERNEL
);
757 return ERR_PTR(-ENOMEM
);
759 clk_hw_data
->priv
= priv
;
760 clk_hw_data
->conf
= core
->conf
;
762 init
.name
= core
->name
;
763 init
.ops
= &rzg2l_cpg_pll5_4_clk_mux_ops
;
764 init
.flags
= CLK_SET_RATE_PARENT
;
765 init
.num_parents
= core
->num_parents
;
766 init
.parent_names
= core
->parent_names
;
768 clk_hw
= &clk_hw_data
->hw
;
769 clk_hw
->init
= &init
;
771 ret
= devm_clk_hw_register(priv
->dev
, clk_hw
);
781 unsigned long foutpostdiv_rate
;
782 struct rzg2l_cpg_priv
*priv
;
785 #define to_sipll5(_hw) container_of(_hw, struct sipll5, hw)
787 static unsigned long rzg2l_cpg_get_vclk_rate(struct clk_hw
*hw
,
790 struct sipll5
*sipll5
= to_sipll5(hw
);
791 struct rzg2l_cpg_priv
*priv
= sipll5
->priv
;
794 vclk
= rate
/ ((1 << priv
->mux_dsi_div_params
.dsi_div_a
) *
795 (priv
->mux_dsi_div_params
.dsi_div_b
+ 1));
797 if (priv
->mux_dsi_div_params
.clksrc
)
803 static unsigned long rzg2l_cpg_sipll5_recalc_rate(struct clk_hw
*hw
,
804 unsigned long parent_rate
)
806 struct sipll5
*sipll5
= to_sipll5(hw
);
807 unsigned long pll5_rate
= sipll5
->foutpostdiv_rate
;
810 pll5_rate
= parent_rate
;
815 static long rzg2l_cpg_sipll5_round_rate(struct clk_hw
*hw
,
817 unsigned long *parent_rate
)
822 static int rzg2l_cpg_sipll5_set_rate(struct clk_hw
*hw
,
824 unsigned long parent_rate
)
826 struct sipll5
*sipll5
= to_sipll5(hw
);
827 struct rzg2l_cpg_priv
*priv
= sipll5
->priv
;
828 struct rzg2l_pll5_param params
;
829 unsigned long vclk_rate
;
834 * OSC --> PLL5 --> FOUTPOSTDIV-->|
835 * | | -->MUX -->DIV_DSIA_B -->M3 -->VCLK
838 * Based on the dot clock, the DSI divider clock calculates the parent
839 * rate and the pll5 parameters for generating FOUTPOSTDIV. It propagates
840 * that info to sipll5 which sets parameters for generating FOUTPOSTDIV.
842 * OSC --> PLL5 --> FOUTPOSTDIV
848 vclk_rate
= rzg2l_cpg_get_vclk_rate(hw
, rate
);
849 sipll5
->foutpostdiv_rate
=
850 rzg2l_cpg_get_foutpostdiv_rate(¶ms
, vclk_rate
);
852 /* Put PLL5 into standby mode */
853 writel(CPG_SIPLL5_STBY_RESETB_WEN
, priv
->base
+ CPG_SIPLL5_STBY
);
854 ret
= readl_poll_timeout(priv
->base
+ CPG_SIPLL5_MON
, val
,
855 !(val
& CPG_SIPLL5_MON_PLL5_LOCK
), 100, 250000);
857 dev_err(priv
->dev
, "failed to release pll5 lock");
861 /* Output clock setting 1 */
862 writel((params
.pl5_postdiv1
<< 0) | (params
.pl5_postdiv2
<< 4) |
863 (params
.pl5_refdiv
<< 8), priv
->base
+ CPG_SIPLL5_CLK1
);
865 /* Output clock setting, SSCG modulation value setting 3 */
866 writel((params
.pl5_fracin
<< 8), priv
->base
+ CPG_SIPLL5_CLK3
);
868 /* Output clock setting 4 */
869 writel(CPG_SIPLL5_CLK4_RESV_LSB
| (params
.pl5_intin
<< 16),
870 priv
->base
+ CPG_SIPLL5_CLK4
);
872 /* Output clock setting 5 */
873 writel(params
.pl5_spread
, priv
->base
+ CPG_SIPLL5_CLK5
);
875 /* PLL normal mode setting */
876 writel(CPG_SIPLL5_STBY_DOWNSPREAD_WEN
| CPG_SIPLL5_STBY_SSCG_EN_WEN
|
877 CPG_SIPLL5_STBY_RESETB_WEN
| CPG_SIPLL5_STBY_RESETB
,
878 priv
->base
+ CPG_SIPLL5_STBY
);
880 /* PLL normal mode transition, output clock stability check */
881 ret
= readl_poll_timeout(priv
->base
+ CPG_SIPLL5_MON
, val
,
882 (val
& CPG_SIPLL5_MON_PLL5_LOCK
), 100, 250000);
884 dev_err(priv
->dev
, "failed to lock pll5");
891 static const struct clk_ops rzg2l_cpg_sipll5_ops
= {
892 .recalc_rate
= rzg2l_cpg_sipll5_recalc_rate
,
893 .round_rate
= rzg2l_cpg_sipll5_round_rate
,
894 .set_rate
= rzg2l_cpg_sipll5_set_rate
,
897 static struct clk
* __init
898 rzg2l_cpg_sipll5_register(const struct cpg_core_clk
*core
,
899 struct rzg2l_cpg_priv
*priv
)
901 const struct clk
*parent
;
902 struct clk_init_data init
;
903 const char *parent_name
;
904 struct sipll5
*sipll5
;
905 struct clk_hw
*clk_hw
;
908 parent
= priv
->clks
[core
->parent
];
910 return ERR_CAST(parent
);
912 sipll5
= devm_kzalloc(priv
->dev
, sizeof(*sipll5
), GFP_KERNEL
);
914 return ERR_PTR(-ENOMEM
);
916 init
.name
= core
->name
;
917 parent_name
= __clk_get_name(parent
);
918 init
.ops
= &rzg2l_cpg_sipll5_ops
;
920 init
.parent_names
= &parent_name
;
921 init
.num_parents
= 1;
923 sipll5
->hw
.init
= &init
;
924 sipll5
->conf
= core
->conf
;
927 writel(CPG_SIPLL5_STBY_SSCG_EN_WEN
| CPG_SIPLL5_STBY_RESETB_WEN
|
928 CPG_SIPLL5_STBY_RESETB
, priv
->base
+ CPG_SIPLL5_STBY
);
930 clk_hw
= &sipll5
->hw
;
931 clk_hw
->init
= &init
;
933 ret
= devm_clk_hw_register(priv
->dev
, clk_hw
);
937 priv
->mux_dsi_div_params
.clksrc
= 1; /* Use clk src 1 for DSI */
938 priv
->mux_dsi_div_params
.dsi_div_a
= 1; /* Divided by 2 */
939 priv
->mux_dsi_div_params
.dsi_div_b
= 2; /* Divided by 3 */
949 struct rzg2l_cpg_priv
*priv
;
952 #define to_pll(_hw) container_of(_hw, struct pll_clk, hw)
954 static unsigned long rzg2l_cpg_pll_clk_recalc_rate(struct clk_hw
*hw
,
955 unsigned long parent_rate
)
957 struct pll_clk
*pll_clk
= to_pll(hw
);
958 struct rzg2l_cpg_priv
*priv
= pll_clk
->priv
;
959 unsigned int val1
, val2
;
962 if (pll_clk
->type
!= CLK_TYPE_SAM_PLL
)
965 val1
= readl(priv
->base
+ GET_REG_SAMPLL_CLK1(pll_clk
->conf
));
966 val2
= readl(priv
->base
+ GET_REG_SAMPLL_CLK2(pll_clk
->conf
));
968 rate
= mul_u64_u32_shr(parent_rate
, (MDIV(val1
) << 16) + KDIV(val1
),
971 return DIV_ROUND_CLOSEST_ULL(rate
, PDIV(val1
));
974 static const struct clk_ops rzg2l_cpg_pll_ops
= {
975 .recalc_rate
= rzg2l_cpg_pll_clk_recalc_rate
,
978 static unsigned long rzg3s_cpg_pll_clk_recalc_rate(struct clk_hw
*hw
,
979 unsigned long parent_rate
)
981 struct pll_clk
*pll_clk
= to_pll(hw
);
982 struct rzg2l_cpg_priv
*priv
= pll_clk
->priv
;
983 u32 nir
, nfr
, mr
, pr
, val
;
986 if (pll_clk
->type
!= CLK_TYPE_G3S_PLL
)
989 val
= readl(priv
->base
+ GET_REG_SAMPLL_CLK1(pll_clk
->conf
));
991 pr
= 1 << FIELD_GET(RZG3S_DIV_P
, val
);
992 /* Hardware interprets values higher than 8 as p = 16. */
996 mr
= FIELD_GET(RZG3S_DIV_M
, val
) + 1;
997 nir
= FIELD_GET(RZG3S_DIV_NI
, val
) + 1;
998 nfr
= FIELD_GET(RZG3S_DIV_NF
, val
);
1000 rate
= mul_u64_u32_shr(parent_rate
, 4096 * nir
+ nfr
, 12);
1002 return DIV_ROUND_CLOSEST_ULL(rate
, (mr
* pr
));
1005 static const struct clk_ops rzg3s_cpg_pll_ops
= {
1006 .recalc_rate
= rzg3s_cpg_pll_clk_recalc_rate
,
1009 static struct clk
* __init
1010 rzg2l_cpg_pll_clk_register(const struct cpg_core_clk
*core
,
1011 struct rzg2l_cpg_priv
*priv
,
1012 const struct clk_ops
*ops
)
1014 struct device
*dev
= priv
->dev
;
1015 const struct clk
*parent
;
1016 struct clk_init_data init
;
1017 const char *parent_name
;
1018 struct pll_clk
*pll_clk
;
1021 parent
= priv
->clks
[core
->parent
];
1023 return ERR_CAST(parent
);
1025 pll_clk
= devm_kzalloc(dev
, sizeof(*pll_clk
), GFP_KERNEL
);
1027 return ERR_PTR(-ENOMEM
);
1029 parent_name
= __clk_get_name(parent
);
1030 init
.name
= core
->name
;
1033 init
.parent_names
= &parent_name
;
1034 init
.num_parents
= 1;
1036 pll_clk
->hw
.init
= &init
;
1037 pll_clk
->conf
= core
->conf
;
1038 pll_clk
->base
= priv
->base
;
1039 pll_clk
->priv
= priv
;
1040 pll_clk
->type
= core
->type
;
1042 ret
= devm_clk_hw_register(dev
, &pll_clk
->hw
);
1044 return ERR_PTR(ret
);
1046 return pll_clk
->hw
.clk
;
1050 *rzg2l_cpg_clk_src_twocell_get(struct of_phandle_args
*clkspec
,
1053 unsigned int clkidx
= clkspec
->args
[1];
1054 struct rzg2l_cpg_priv
*priv
= data
;
1055 struct device
*dev
= priv
->dev
;
1059 switch (clkspec
->args
[0]) {
1062 if (clkidx
> priv
->last_dt_core_clk
) {
1063 dev_err(dev
, "Invalid %s clock index %u\n", type
, clkidx
);
1064 return ERR_PTR(-EINVAL
);
1066 clk
= priv
->clks
[clkidx
];
1071 if (clkidx
>= priv
->num_mod_clks
) {
1072 dev_err(dev
, "Invalid %s clock index %u\n", type
,
1074 return ERR_PTR(-EINVAL
);
1076 clk
= priv
->clks
[priv
->num_core_clks
+ clkidx
];
1080 dev_err(dev
, "Invalid CPG clock type %u\n", clkspec
->args
[0]);
1081 return ERR_PTR(-EINVAL
);
1085 dev_err(dev
, "Cannot get %s clock %u: %ld", type
, clkidx
,
1088 dev_dbg(dev
, "clock (%u, %u) is %pC at %lu Hz\n",
1089 clkspec
->args
[0], clkspec
->args
[1], clk
,
1095 rzg2l_cpg_register_core_clk(const struct cpg_core_clk
*core
,
1096 const struct rzg2l_cpg_info
*info
,
1097 struct rzg2l_cpg_priv
*priv
)
1099 struct clk
*clk
= ERR_PTR(-EOPNOTSUPP
), *parent
;
1100 struct device
*dev
= priv
->dev
;
1101 unsigned int id
= core
->id
, div
= core
->div
;
1102 const char *parent_name
;
1103 struct clk_hw
*clk_hw
;
1105 WARN_DEBUG(id
>= priv
->num_core_clks
);
1106 WARN_DEBUG(PTR_ERR(priv
->clks
[id
]) != -ENOENT
);
1109 /* Skip NULLified clock */
1113 switch (core
->type
) {
1115 clk
= of_clk_get_by_name(priv
->dev
->of_node
, core
->name
);
1118 WARN_DEBUG(core
->parent
>= priv
->num_core_clks
);
1119 parent
= priv
->clks
[core
->parent
];
1120 if (IS_ERR(parent
)) {
1125 parent_name
= __clk_get_name(parent
);
1126 clk_hw
= devm_clk_hw_register_fixed_factor(dev
, core
->name
, parent_name
,
1127 CLK_SET_RATE_PARENT
,
1130 clk
= ERR_CAST(clk_hw
);
1134 case CLK_TYPE_SAM_PLL
:
1135 clk
= rzg2l_cpg_pll_clk_register(core
, priv
, &rzg2l_cpg_pll_ops
);
1137 case CLK_TYPE_G3S_PLL
:
1138 clk
= rzg2l_cpg_pll_clk_register(core
, priv
, &rzg3s_cpg_pll_ops
);
1140 case CLK_TYPE_SIPLL5
:
1141 clk
= rzg2l_cpg_sipll5_register(core
, priv
);
1144 clk
= rzg2l_cpg_div_clk_register(core
, priv
);
1146 case CLK_TYPE_G3S_DIV
:
1147 clk
= rzg3s_cpg_div_clk_register(core
, priv
);
1150 clk
= rzg2l_cpg_mux_clk_register(core
, priv
);
1152 case CLK_TYPE_SD_MUX
:
1153 clk
= rzg2l_cpg_sd_mux_clk_register(core
, priv
);
1155 case CLK_TYPE_PLL5_4_MUX
:
1156 clk
= rzg2l_cpg_pll5_4_mux_clk_register(core
, priv
);
1158 case CLK_TYPE_DSI_DIV
:
1159 clk
= rzg2l_cpg_dsi_div_clk_register(core
, priv
);
1165 if (IS_ERR_OR_NULL(clk
))
1168 dev_dbg(dev
, "Core clock %pC at %lu Hz\n", clk
, clk_get_rate(clk
));
1169 priv
->clks
[id
] = clk
;
1173 dev_err(dev
, "Failed to register %s clock %s: %ld\n", "core",
1174 core
->name
, PTR_ERR(clk
));
1178 * struct mstp_clock - MSTP gating clock
1180 * @hw: handle between common and hardware-specific interfaces
1181 * @off: register offset
1183 * @enabled: soft state of the clock, if it is coupled with another clock
1184 * @priv: CPG/MSTP private data
1185 * @sibling: pointer to the other coupled clock
1192 struct rzg2l_cpg_priv
*priv
;
1193 struct mstp_clock
*sibling
;
1196 #define to_mod_clock(_hw) container_of(_hw, struct mstp_clock, hw)
1198 static int rzg2l_mod_clock_endisable(struct clk_hw
*hw
, bool enable
)
1200 struct mstp_clock
*clock
= to_mod_clock(hw
);
1201 struct rzg2l_cpg_priv
*priv
= clock
->priv
;
1202 unsigned int reg
= clock
->off
;
1203 struct device
*dev
= priv
->dev
;
1204 u32 bitmask
= BIT(clock
->bit
);
1209 dev_dbg(dev
, "%pC does not support ON/OFF\n", hw
->clk
);
1213 dev_dbg(dev
, "CLK_ON 0x%x/%pC %s\n", CLK_ON_R(reg
), hw
->clk
,
1214 enable
? "ON" : "OFF");
1216 value
= bitmask
<< 16;
1220 writel(value
, priv
->base
+ CLK_ON_R(reg
));
1225 if (!priv
->info
->has_clk_mon_regs
)
1228 error
= readl_poll_timeout_atomic(priv
->base
+ CLK_MON_R(reg
), value
,
1229 value
& bitmask
, 0, 10);
1231 dev_err(dev
, "Failed to enable CLK_ON %p\n",
1232 priv
->base
+ CLK_ON_R(reg
));
1237 static int rzg2l_mod_clock_enable(struct clk_hw
*hw
)
1239 struct mstp_clock
*clock
= to_mod_clock(hw
);
1241 if (clock
->sibling
) {
1242 struct rzg2l_cpg_priv
*priv
= clock
->priv
;
1243 unsigned long flags
;
1246 spin_lock_irqsave(&priv
->rmw_lock
, flags
);
1247 enabled
= clock
->sibling
->enabled
;
1248 clock
->enabled
= true;
1249 spin_unlock_irqrestore(&priv
->rmw_lock
, flags
);
1254 return rzg2l_mod_clock_endisable(hw
, true);
1257 static void rzg2l_mod_clock_disable(struct clk_hw
*hw
)
1259 struct mstp_clock
*clock
= to_mod_clock(hw
);
1261 if (clock
->sibling
) {
1262 struct rzg2l_cpg_priv
*priv
= clock
->priv
;
1263 unsigned long flags
;
1266 spin_lock_irqsave(&priv
->rmw_lock
, flags
);
1267 enabled
= clock
->sibling
->enabled
;
1268 clock
->enabled
= false;
1269 spin_unlock_irqrestore(&priv
->rmw_lock
, flags
);
1274 rzg2l_mod_clock_endisable(hw
, false);
1277 static int rzg2l_mod_clock_is_enabled(struct clk_hw
*hw
)
1279 struct mstp_clock
*clock
= to_mod_clock(hw
);
1280 struct rzg2l_cpg_priv
*priv
= clock
->priv
;
1281 u32 bitmask
= BIT(clock
->bit
);
1285 dev_dbg(priv
->dev
, "%pC does not support ON/OFF\n", hw
->clk
);
1290 return clock
->enabled
;
1292 if (priv
->info
->has_clk_mon_regs
)
1293 value
= readl(priv
->base
+ CLK_MON_R(clock
->off
));
1295 value
= readl(priv
->base
+ clock
->off
);
1297 return value
& bitmask
;
1300 static const struct clk_ops rzg2l_mod_clock_ops
= {
1301 .enable
= rzg2l_mod_clock_enable
,
1302 .disable
= rzg2l_mod_clock_disable
,
1303 .is_enabled
= rzg2l_mod_clock_is_enabled
,
1306 static struct mstp_clock
1307 *rzg2l_mod_clock_get_sibling(struct mstp_clock
*clock
,
1308 struct rzg2l_cpg_priv
*priv
)
1313 for (i
= 0; i
< priv
->num_mod_clks
; i
++) {
1314 struct mstp_clock
*clk
;
1316 if (priv
->clks
[priv
->num_core_clks
+ i
] == ERR_PTR(-ENOENT
))
1319 hw
= __clk_get_hw(priv
->clks
[priv
->num_core_clks
+ i
]);
1320 clk
= to_mod_clock(hw
);
1321 if (clock
->off
== clk
->off
&& clock
->bit
== clk
->bit
)
1329 rzg2l_cpg_register_mod_clk(const struct rzg2l_mod_clk
*mod
,
1330 const struct rzg2l_cpg_info
*info
,
1331 struct rzg2l_cpg_priv
*priv
)
1333 struct mstp_clock
*clock
= NULL
;
1334 struct device
*dev
= priv
->dev
;
1335 unsigned int id
= mod
->id
;
1336 struct clk_init_data init
;
1337 struct clk
*parent
, *clk
;
1338 const char *parent_name
;
1342 WARN_DEBUG(id
< priv
->num_core_clks
);
1343 WARN_DEBUG(id
>= priv
->num_core_clks
+ priv
->num_mod_clks
);
1344 WARN_DEBUG(mod
->parent
>= priv
->num_core_clks
+ priv
->num_mod_clks
);
1345 WARN_DEBUG(PTR_ERR(priv
->clks
[id
]) != -ENOENT
);
1348 /* Skip NULLified clock */
1352 parent
= priv
->clks
[mod
->parent
];
1353 if (IS_ERR(parent
)) {
1358 clock
= devm_kzalloc(dev
, sizeof(*clock
), GFP_KERNEL
);
1360 clk
= ERR_PTR(-ENOMEM
);
1364 init
.name
= mod
->name
;
1365 init
.ops
= &rzg2l_mod_clock_ops
;
1366 init
.flags
= CLK_SET_RATE_PARENT
;
1367 for (i
= 0; i
< info
->num_crit_mod_clks
; i
++)
1368 if (id
== info
->crit_mod_clks
[i
]) {
1369 dev_dbg(dev
, "CPG %s setting CLK_IS_CRITICAL\n",
1371 init
.flags
|= CLK_IS_CRITICAL
;
1375 parent_name
= __clk_get_name(parent
);
1376 init
.parent_names
= &parent_name
;
1377 init
.num_parents
= 1;
1379 clock
->off
= mod
->off
;
1380 clock
->bit
= mod
->bit
;
1382 clock
->hw
.init
= &init
;
1384 ret
= devm_clk_hw_register(dev
, &clock
->hw
);
1390 clk
= clock
->hw
.clk
;
1391 dev_dbg(dev
, "Module clock %pC at %lu Hz\n", clk
, clk_get_rate(clk
));
1392 priv
->clks
[id
] = clk
;
1394 if (mod
->is_coupled
) {
1395 struct mstp_clock
*sibling
;
1397 clock
->enabled
= rzg2l_mod_clock_is_enabled(&clock
->hw
);
1398 sibling
= rzg2l_mod_clock_get_sibling(clock
, priv
);
1400 clock
->sibling
= sibling
;
1401 sibling
->sibling
= clock
;
1408 dev_err(dev
, "Failed to register %s clock %s: %ld\n", "module",
1409 mod
->name
, PTR_ERR(clk
));
1412 #define rcdev_to_priv(x) container_of(x, struct rzg2l_cpg_priv, rcdev)
1414 static int rzg2l_cpg_assert(struct reset_controller_dev
*rcdev
,
1417 struct rzg2l_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
1418 const struct rzg2l_cpg_info
*info
= priv
->info
;
1419 unsigned int reg
= info
->resets
[id
].off
;
1420 u32 mask
= BIT(info
->resets
[id
].bit
);
1421 s8 monbit
= info
->resets
[id
].monbit
;
1422 u32 value
= mask
<< 16;
1424 dev_dbg(rcdev
->dev
, "assert id:%ld offset:0x%x\n", id
, CLK_RST_R(reg
));
1426 writel(value
, priv
->base
+ CLK_RST_R(reg
));
1428 if (info
->has_clk_mon_regs
) {
1429 reg
= CLK_MRST_R(reg
);
1430 } else if (monbit
>= 0) {
1434 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1439 return readl_poll_timeout_atomic(priv
->base
+ reg
, value
,
1440 value
& mask
, 10, 200);
1443 static int rzg2l_cpg_deassert(struct reset_controller_dev
*rcdev
,
1446 struct rzg2l_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
1447 const struct rzg2l_cpg_info
*info
= priv
->info
;
1448 unsigned int reg
= info
->resets
[id
].off
;
1449 u32 mask
= BIT(info
->resets
[id
].bit
);
1450 s8 monbit
= info
->resets
[id
].monbit
;
1451 u32 value
= (mask
<< 16) | mask
;
1453 dev_dbg(rcdev
->dev
, "deassert id:%ld offset:0x%x\n", id
,
1456 writel(value
, priv
->base
+ CLK_RST_R(reg
));
1458 if (info
->has_clk_mon_regs
) {
1459 reg
= CLK_MRST_R(reg
);
1460 } else if (monbit
>= 0) {
1464 /* Wait for at least one cycle of the RCLK clock (@ ca. 32 kHz) */
1469 return readl_poll_timeout_atomic(priv
->base
+ reg
, value
,
1470 !(value
& mask
), 10, 200);
1473 static int rzg2l_cpg_reset(struct reset_controller_dev
*rcdev
,
1478 ret
= rzg2l_cpg_assert(rcdev
, id
);
1482 return rzg2l_cpg_deassert(rcdev
, id
);
1485 static int rzg2l_cpg_status(struct reset_controller_dev
*rcdev
,
1488 struct rzg2l_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
1489 const struct rzg2l_cpg_info
*info
= priv
->info
;
1490 s8 monbit
= info
->resets
[id
].monbit
;
1494 if (info
->has_clk_mon_regs
) {
1495 reg
= CLK_MRST_R(info
->resets
[id
].off
);
1496 bitmask
= BIT(info
->resets
[id
].bit
);
1497 } else if (monbit
>= 0) {
1499 bitmask
= BIT(monbit
);
1504 return !!(readl(priv
->base
+ reg
) & bitmask
);
1507 static const struct reset_control_ops rzg2l_cpg_reset_ops
= {
1508 .reset
= rzg2l_cpg_reset
,
1509 .assert = rzg2l_cpg_assert
,
1510 .deassert
= rzg2l_cpg_deassert
,
1511 .status
= rzg2l_cpg_status
,
1514 static int rzg2l_cpg_reset_xlate(struct reset_controller_dev
*rcdev
,
1515 const struct of_phandle_args
*reset_spec
)
1517 struct rzg2l_cpg_priv
*priv
= rcdev_to_priv(rcdev
);
1518 const struct rzg2l_cpg_info
*info
= priv
->info
;
1519 unsigned int id
= reset_spec
->args
[0];
1521 if (id
>= rcdev
->nr_resets
|| !info
->resets
[id
].off
) {
1522 dev_err(rcdev
->dev
, "Invalid reset index %u\n", id
);
1529 static int rzg2l_cpg_reset_controller_register(struct rzg2l_cpg_priv
*priv
)
1531 priv
->rcdev
.ops
= &rzg2l_cpg_reset_ops
;
1532 priv
->rcdev
.of_node
= priv
->dev
->of_node
;
1533 priv
->rcdev
.dev
= priv
->dev
;
1534 priv
->rcdev
.of_reset_n_cells
= 1;
1535 priv
->rcdev
.of_xlate
= rzg2l_cpg_reset_xlate
;
1536 priv
->rcdev
.nr_resets
= priv
->num_resets
;
1538 return devm_reset_controller_register(priv
->dev
, &priv
->rcdev
);
1541 static bool rzg2l_cpg_is_pm_clk(struct rzg2l_cpg_priv
*priv
,
1542 const struct of_phandle_args
*clkspec
)
1544 const struct rzg2l_cpg_info
*info
= priv
->info
;
1548 if (clkspec
->args_count
!= 2)
1551 if (clkspec
->args
[0] != CPG_MOD
)
1554 id
= clkspec
->args
[1] + info
->num_total_core_clks
;
1555 for (i
= 0; i
< info
->num_no_pm_mod_clks
; i
++) {
1556 if (info
->no_pm_mod_clks
[i
] == id
)
1564 * struct rzg2l_cpg_pm_domains - RZ/G2L PM domains data structure
1565 * @onecell_data: cell data
1566 * @domains: generic PM domains
1568 struct rzg2l_cpg_pm_domains
{
1569 struct genpd_onecell_data onecell_data
;
1570 struct generic_pm_domain
*domains
[];
1574 * struct rzg2l_cpg_pd - RZ/G2L power domain data structure
1575 * @genpd: generic PM domain
1576 * @priv: pointer to CPG private data structure
1577 * @conf: CPG PM domain configuration info
1578 * @id: RZ/G2L power domain ID
1580 struct rzg2l_cpg_pd
{
1581 struct generic_pm_domain genpd
;
1582 struct rzg2l_cpg_priv
*priv
;
1583 struct rzg2l_cpg_pm_domain_conf conf
;
1587 static int rzg2l_cpg_attach_dev(struct generic_pm_domain
*domain
, struct device
*dev
)
1589 struct rzg2l_cpg_pd
*pd
= container_of(domain
, struct rzg2l_cpg_pd
, genpd
);
1590 struct rzg2l_cpg_priv
*priv
= pd
->priv
;
1591 struct device_node
*np
= dev
->of_node
;
1592 struct of_phandle_args clkspec
;
1598 while (!of_parse_phandle_with_args(np
, "clocks", "#clock-cells", i
,
1600 if (rzg2l_cpg_is_pm_clk(priv
, &clkspec
)) {
1603 error
= pm_clk_create(dev
);
1605 of_node_put(clkspec
.np
);
1609 clk
= of_clk_get_from_provider(&clkspec
);
1610 of_node_put(clkspec
.np
);
1612 error
= PTR_ERR(clk
);
1616 error
= pm_clk_add_clk(dev
, clk
);
1618 dev_err(dev
, "pm_clk_add_clk failed %d\n",
1623 of_node_put(clkspec
.np
);
1634 pm_clk_destroy(dev
);
1639 static void rzg2l_cpg_detach_dev(struct generic_pm_domain
*unused
, struct device
*dev
)
1641 if (!pm_clk_no_clocks(dev
))
1642 pm_clk_destroy(dev
);
1645 static void rzg2l_cpg_genpd_remove(void *data
)
1647 struct genpd_onecell_data
*celldata
= data
;
1649 for (unsigned int i
= 0; i
< celldata
->num_domains
; i
++)
1650 pm_genpd_remove(celldata
->domains
[i
]);
1653 static void rzg2l_cpg_genpd_remove_simple(void *data
)
1655 pm_genpd_remove(data
);
1658 static int rzg2l_cpg_power_on(struct generic_pm_domain
*domain
)
1660 struct rzg2l_cpg_pd
*pd
= container_of(domain
, struct rzg2l_cpg_pd
, genpd
);
1661 struct rzg2l_cpg_reg_conf mstop
= pd
->conf
.mstop
;
1662 struct rzg2l_cpg_priv
*priv
= pd
->priv
;
1666 writel(mstop
.mask
<< 16, priv
->base
+ mstop
.off
);
1671 static int rzg2l_cpg_power_off(struct generic_pm_domain
*domain
)
1673 struct rzg2l_cpg_pd
*pd
= container_of(domain
, struct rzg2l_cpg_pd
, genpd
);
1674 struct rzg2l_cpg_reg_conf mstop
= pd
->conf
.mstop
;
1675 struct rzg2l_cpg_priv
*priv
= pd
->priv
;
1679 writel(mstop
.mask
| (mstop
.mask
<< 16), priv
->base
+ mstop
.off
);
1684 static int __init
rzg2l_cpg_pd_setup(struct rzg2l_cpg_pd
*pd
)
1686 bool always_on
= !!(pd
->genpd
.flags
& GENPD_FLAG_ALWAYS_ON
);
1687 struct dev_power_governor
*governor
;
1691 governor
= &pm_domain_always_on_gov
;
1693 governor
= &simple_qos_governor
;
1695 pd
->genpd
.flags
|= GENPD_FLAG_PM_CLK
| GENPD_FLAG_ACTIVE_WAKEUP
;
1696 pd
->genpd
.attach_dev
= rzg2l_cpg_attach_dev
;
1697 pd
->genpd
.detach_dev
= rzg2l_cpg_detach_dev
;
1698 pd
->genpd
.power_on
= rzg2l_cpg_power_on
;
1699 pd
->genpd
.power_off
= rzg2l_cpg_power_off
;
1701 ret
= pm_genpd_init(&pd
->genpd
, governor
, !always_on
);
1706 ret
= rzg2l_cpg_power_on(&pd
->genpd
);
1711 static int __init
rzg2l_cpg_add_clk_domain(struct rzg2l_cpg_priv
*priv
)
1713 struct device
*dev
= priv
->dev
;
1714 struct device_node
*np
= dev
->of_node
;
1715 struct rzg2l_cpg_pd
*pd
;
1718 pd
= devm_kzalloc(dev
, sizeof(*pd
), GFP_KERNEL
);
1722 pd
->genpd
.name
= np
->name
;
1723 pd
->genpd
.flags
= GENPD_FLAG_ALWAYS_ON
;
1725 ret
= rzg2l_cpg_pd_setup(pd
);
1729 ret
= devm_add_action_or_reset(dev
, rzg2l_cpg_genpd_remove_simple
, &pd
->genpd
);
1733 return of_genpd_add_provider_simple(np
, &pd
->genpd
);
1736 static struct generic_pm_domain
*
1737 rzg2l_cpg_pm_domain_xlate(const struct of_phandle_args
*spec
, void *data
)
1739 struct generic_pm_domain
*domain
= ERR_PTR(-ENOENT
);
1740 struct genpd_onecell_data
*genpd
= data
;
1742 if (spec
->args_count
!= 1)
1743 return ERR_PTR(-EINVAL
);
1745 for (unsigned int i
= 0; i
< genpd
->num_domains
; i
++) {
1746 struct rzg2l_cpg_pd
*pd
= container_of(genpd
->domains
[i
], struct rzg2l_cpg_pd
,
1749 if (pd
->id
== spec
->args
[0]) {
1750 domain
= &pd
->genpd
;
1758 static int __init
rzg2l_cpg_add_pm_domains(struct rzg2l_cpg_priv
*priv
)
1760 const struct rzg2l_cpg_info
*info
= priv
->info
;
1761 struct device
*dev
= priv
->dev
;
1762 struct device_node
*np
= dev
->of_node
;
1763 struct rzg2l_cpg_pm_domains
*domains
;
1764 struct generic_pm_domain
*parent
;
1768 ret
= of_property_read_u32(np
, "#power-domain-cells", &ncells
);
1772 /* For backward compatibility. */
1774 return rzg2l_cpg_add_clk_domain(priv
);
1776 domains
= devm_kzalloc(dev
, struct_size(domains
, domains
, info
->num_pm_domains
),
1781 domains
->onecell_data
.domains
= domains
->domains
;
1782 domains
->onecell_data
.num_domains
= info
->num_pm_domains
;
1783 domains
->onecell_data
.xlate
= rzg2l_cpg_pm_domain_xlate
;
1785 ret
= devm_add_action_or_reset(dev
, rzg2l_cpg_genpd_remove
, &domains
->onecell_data
);
1789 for (unsigned int i
= 0; i
< info
->num_pm_domains
; i
++) {
1790 struct rzg2l_cpg_pd
*pd
;
1792 pd
= devm_kzalloc(dev
, sizeof(*pd
), GFP_KERNEL
);
1796 pd
->genpd
.name
= info
->pm_domains
[i
].name
;
1797 pd
->genpd
.flags
= info
->pm_domains
[i
].genpd_flags
;
1798 pd
->conf
= info
->pm_domains
[i
].conf
;
1799 pd
->id
= info
->pm_domains
[i
].id
;
1802 ret
= rzg2l_cpg_pd_setup(pd
);
1806 domains
->domains
[i
] = &pd
->genpd
;
1807 /* Parent should be on the very first entry of info->pm_domains[]. */
1809 parent
= &pd
->genpd
;
1813 ret
= pm_genpd_add_subdomain(parent
, &pd
->genpd
);
1818 ret
= of_genpd_add_provider_onecell(np
, &domains
->onecell_data
);
1825 static int __init
rzg2l_cpg_probe(struct platform_device
*pdev
)
1827 struct device
*dev
= &pdev
->dev
;
1828 struct device_node
*np
= dev
->of_node
;
1829 const struct rzg2l_cpg_info
*info
;
1830 struct rzg2l_cpg_priv
*priv
;
1831 unsigned int nclks
, i
;
1835 info
= of_device_get_match_data(dev
);
1837 priv
= devm_kzalloc(dev
, sizeof(*priv
), GFP_KERNEL
);
1843 spin_lock_init(&priv
->rmw_lock
);
1845 priv
->base
= devm_platform_ioremap_resource(pdev
, 0);
1846 if (IS_ERR(priv
->base
))
1847 return PTR_ERR(priv
->base
);
1849 nclks
= info
->num_total_core_clks
+ info
->num_hw_mod_clks
;
1850 clks
= devm_kmalloc_array(dev
, nclks
, sizeof(*clks
), GFP_KERNEL
);
1854 dev_set_drvdata(dev
, priv
);
1856 priv
->num_core_clks
= info
->num_total_core_clks
;
1857 priv
->num_mod_clks
= info
->num_hw_mod_clks
;
1858 priv
->num_resets
= info
->num_resets
;
1859 priv
->last_dt_core_clk
= info
->last_dt_core_clk
;
1861 for (i
= 0; i
< nclks
; i
++)
1862 clks
[i
] = ERR_PTR(-ENOENT
);
1864 for (i
= 0; i
< info
->num_core_clks
; i
++)
1865 rzg2l_cpg_register_core_clk(&info
->core_clks
[i
], info
, priv
);
1867 for (i
= 0; i
< info
->num_mod_clks
; i
++)
1868 rzg2l_cpg_register_mod_clk(&info
->mod_clks
[i
], info
, priv
);
1870 error
= of_clk_add_provider(np
, rzg2l_cpg_clk_src_twocell_get
, priv
);
1874 error
= devm_add_action_or_reset(dev
, rzg2l_cpg_del_clk_provider
, np
);
1878 error
= rzg2l_cpg_add_pm_domains(priv
);
1882 error
= rzg2l_cpg_reset_controller_register(priv
);
1889 static const struct of_device_id rzg2l_cpg_match
[] = {
1890 #ifdef CONFIG_CLK_R9A07G043
1892 .compatible
= "renesas,r9a07g043-cpg",
1893 .data
= &r9a07g043_cpg_info
,
1896 #ifdef CONFIG_CLK_R9A07G044
1898 .compatible
= "renesas,r9a07g044-cpg",
1899 .data
= &r9a07g044_cpg_info
,
1902 #ifdef CONFIG_CLK_R9A07G054
1904 .compatible
= "renesas,r9a07g054-cpg",
1905 .data
= &r9a07g054_cpg_info
,
1908 #ifdef CONFIG_CLK_R9A08G045
1910 .compatible
= "renesas,r9a08g045-cpg",
1911 .data
= &r9a08g045_cpg_info
,
1914 #ifdef CONFIG_CLK_R9A09G011
1916 .compatible
= "renesas,r9a09g011-cpg",
1917 .data
= &r9a09g011_cpg_info
,
1923 static struct platform_driver rzg2l_cpg_driver
= {
1925 .name
= "rzg2l-cpg",
1926 .of_match_table
= rzg2l_cpg_match
,
1930 static int __init
rzg2l_cpg_init(void)
1932 return platform_driver_probe(&rzg2l_cpg_driver
, rzg2l_cpg_probe
);
1935 subsys_initcall(rzg2l_cpg_init
);
1937 MODULE_DESCRIPTION("Renesas RZ/G2L CPG Driver");