1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) STMicroelectronics 2017
4 * Author: Gabriel Fernandez <gabriel.fernandez@st.com> for STMicroelectronics.
8 #include <linux/clk-provider.h>
11 #include <linux/mfd/syscon.h>
13 #include <linux/of_address.h>
14 #include <linux/slab.h>
15 #include <linux/spinlock.h>
16 #include <linux/regmap.h>
18 #include <dt-bindings/clock/stm32h7-clks.h>
20 /* Reset Clock Control Registers */
23 #define RCC_D1CFGR 0x18
24 #define RCC_D2CFGR 0x1C
25 #define RCC_D3CFGR 0x20
26 #define RCC_PLLCKSELR 0x28
27 #define RCC_PLLCFGR 0x2C
28 #define RCC_PLL1DIVR 0x30
29 #define RCC_PLL1FRACR 0x34
30 #define RCC_PLL2DIVR 0x38
31 #define RCC_PLL2FRACR 0x3C
32 #define RCC_PLL3DIVR 0x40
33 #define RCC_PLL3FRACR 0x44
34 #define RCC_D1CCIPR 0x4C
35 #define RCC_D2CCIP1R 0x50
36 #define RCC_D2CCIP2R 0x54
37 #define RCC_D3CCIPR 0x58
40 #define RCC_AHB3ENR 0xD4
41 #define RCC_AHB1ENR 0xD8
42 #define RCC_AHB2ENR 0xDC
43 #define RCC_AHB4ENR 0xE0
44 #define RCC_APB3ENR 0xE4
45 #define RCC_APB1LENR 0xE8
46 #define RCC_APB1HENR 0xEC
47 #define RCC_APB2ENR 0xF0
48 #define RCC_APB4ENR 0xF4
50 static DEFINE_SPINLOCK(stm32rcc_lock
);
52 static void __iomem
*base
;
53 static struct clk_hw
**hws
;
55 /* System clock parent */
56 static const char * const sys_src
[] = {
57 "hsi_ck", "csi_ck", "hse_ck", "pll1_p" };
59 static const char * const tracein_src
[] = {
60 "hsi_ck", "csi_ck", "hse_ck", "pll1_r" };
62 static const char * const per_src
[] = {
63 "hsi_ker", "csi_ker", "hse_ck", "disabled" };
65 static const char * const pll_src
[] = {
66 "hsi_ck", "csi_ck", "hse_ck", "no clock" };
68 static const char * const sdmmc_src
[] = { "pll1_q", "pll2_r" };
70 static const char * const dsi_src
[] = { "ck_dsi_phy", "pll2_q" };
72 static const char * const qspi_src
[] = {
73 "hclk", "pll1_q", "pll2_r", "per_ck" };
75 static const char * const fmc_src
[] = {
76 "hclk", "pll1_q", "pll2_r", "per_ck" };
78 /* Kernel clock parent */
79 static const char * const swp_src
[] = { "pclk1", "hsi_ker" };
81 static const char * const fdcan_src
[] = { "hse_ck", "pll1_q", "pll2_q" };
83 static const char * const dfsdm1_src
[] = { "pclk2", "sys_ck" };
85 static const char * const spdifrx_src
[] = {
86 "pll1_q", "pll2_r", "pll3_r", "hsi_ker" };
88 static const char *spi_src1
[5] = {
89 "pll1_q", "pll2_p", "pll3_p", NULL
, "per_ck" };
91 static const char * const spi_src2
[] = {
92 "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
94 static const char * const spi_src3
[] = {
95 "pclk4", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "hse_ck" };
97 static const char * const lptim_src1
[] = {
98 "pclk1", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
100 static const char * const lptim_src2
[] = {
101 "pclk4", "pll2_p", "pll3_r", "lse_ck", "lsi_ck", "per_ck" };
103 static const char * const cec_src
[] = {"lse_ck", "lsi_ck", "csi_ker_div122" };
105 static const char * const usbotg_src
[] = {"pll1_q", "pll3_q", "rc48_ck" };
108 static const char * const i2c_src1
[] = {
109 "pclk1", "pll3_r", "hsi_ker", "csi_ker" };
111 static const char * const i2c_src2
[] = {
112 "pclk4", "pll3_r", "hsi_ker", "csi_ker" };
114 static const char * const rng_src
[] = {
115 "rc48_ck", "pll1_q", "lse_ck", "lsi_ck" };
118 static const char * const usart_src1
[] = {
119 "pclk2", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
121 /* usart 2,3,4,5,7,8 src */
122 static const char * const usart_src2
[] = {
123 "pclk1", "pll2_q", "pll3_q", "hsi_ker", "csi_ker", "lse_ck" };
125 static const char *sai_src
[5] = {
126 "pll1_q", "pll2_p", "pll3_p", NULL
, "per_ck" };
128 static const char * const adc_src
[] = { "pll2_p", "pll3_r", "per_ck" };
130 /* lptim 2,3,4,5 src */
131 static const char * const lpuart1_src
[] = {
132 "pclk3", "pll2_q", "pll3_q", "csi_ker", "lse_ck" };
134 static const char * const hrtim_src
[] = { "tim2_ker", "d1cpre" };
136 /* RTC clock parent */
137 static const char * const rtc_src
[] = { "off", "lse_ck", "lsi_ck", "hse_1M" };
139 /* Micro-controller output clock parent */
140 static const char * const mco_src1
[] = {
141 "hsi_ck", "lse_ck", "hse_ck", "pll1_q", "rc48_ck" };
143 static const char * const mco_src2
[] = {
144 "sys_ck", "pll2_p", "hse_ck", "pll1_p", "csi_ck", "lsi_ck" };
147 static const char * const ltdc_src
[] = {"pll3_r"};
149 /* Gate clock with ready bit and backup domain management */
150 struct stm32_ready_gate
{
151 struct clk_gate gate
;
155 #define to_ready_gate_clk(_rgate) container_of(_rgate, struct stm32_ready_gate,\
158 #define RGATE_TIMEOUT 10000
160 static int ready_gate_clk_enable(struct clk_hw
*hw
)
162 struct clk_gate
*gate
= to_clk_gate(hw
);
163 struct stm32_ready_gate
*rgate
= to_ready_gate_clk(gate
);
165 unsigned int timeout
= RGATE_TIMEOUT
;
167 if (clk_gate_ops
.is_enabled(hw
))
170 clk_gate_ops
.enable(hw
);
172 /* We can't use readl_poll_timeout() because we can blocked if
173 * someone enables this clock before clocksource changes.
174 * Only jiffies counter is available. Jiffies are incremented by
175 * interruptions and enable op does not allow to be interrupted.
178 bit_status
= !(readl(gate
->reg
) & BIT(rgate
->bit_rdy
));
183 } while (bit_status
&& --timeout
);
188 static void ready_gate_clk_disable(struct clk_hw
*hw
)
190 struct clk_gate
*gate
= to_clk_gate(hw
);
191 struct stm32_ready_gate
*rgate
= to_ready_gate_clk(gate
);
193 unsigned int timeout
= RGATE_TIMEOUT
;
195 if (!clk_gate_ops
.is_enabled(hw
))
198 clk_gate_ops
.disable(hw
);
201 bit_status
= !!(readl(gate
->reg
) & BIT(rgate
->bit_rdy
));
206 } while (bit_status
&& --timeout
);
209 static const struct clk_ops ready_gate_clk_ops
= {
210 .enable
= ready_gate_clk_enable
,
211 .disable
= ready_gate_clk_disable
,
212 .is_enabled
= clk_gate_is_enabled
,
215 static struct clk_hw
*clk_register_ready_gate(struct device
*dev
,
216 const char *name
, const char *parent_name
,
217 void __iomem
*reg
, u8 bit_idx
, u8 bit_rdy
,
218 unsigned long flags
, spinlock_t
*lock
)
220 struct stm32_ready_gate
*rgate
;
221 struct clk_init_data init
= { NULL
};
225 rgate
= kzalloc(sizeof(*rgate
), GFP_KERNEL
);
227 return ERR_PTR(-ENOMEM
);
230 init
.ops
= &ready_gate_clk_ops
;
232 init
.parent_names
= &parent_name
;
233 init
.num_parents
= 1;
235 rgate
->bit_rdy
= bit_rdy
;
236 rgate
->gate
.lock
= lock
;
237 rgate
->gate
.reg
= reg
;
238 rgate
->gate
.bit_idx
= bit_idx
;
239 rgate
->gate
.hw
.init
= &init
;
241 hw
= &rgate
->gate
.hw
;
242 ret
= clk_hw_register(dev
, hw
);
262 struct composite_clk_cfg
{
263 struct gate_cfg
*gate
;
264 struct muxdiv_cfg
*mux
;
265 struct muxdiv_cfg
*div
;
267 const char * const *parent_name
;
272 struct composite_clk_gcfg_t
{
274 const struct clk_ops
*ops
;
278 * General config definition of a composite clock (only clock diviser for rate)
280 struct composite_clk_gcfg
{
281 struct composite_clk_gcfg_t
*mux
;
282 struct composite_clk_gcfg_t
*div
;
283 struct composite_clk_gcfg_t
*gate
;
286 #define M_CFG_MUX(_mux_ops, _mux_flags)\
287 .mux = &(struct composite_clk_gcfg_t) { _mux_flags, _mux_ops}
289 #define M_CFG_DIV(_rate_ops, _rate_flags)\
290 .div = &(struct composite_clk_gcfg_t) {_rate_flags, _rate_ops}
292 #define M_CFG_GATE(_gate_ops, _gate_flags)\
293 .gate = &(struct composite_clk_gcfg_t) { _gate_flags, _gate_ops}
295 static struct clk_mux
*_get_cmux(void __iomem
*reg
, u8 shift
, u8 width
,
296 u32 flags
, spinlock_t
*lock
)
300 mux
= kzalloc(sizeof(*mux
), GFP_KERNEL
);
302 return ERR_PTR(-ENOMEM
);
306 mux
->mask
= (1 << width
) - 1;
313 static struct clk_divider
*_get_cdiv(void __iomem
*reg
, u8 shift
, u8 width
,
314 u32 flags
, spinlock_t
*lock
)
316 struct clk_divider
*div
;
318 div
= kzalloc(sizeof(*div
), GFP_KERNEL
);
321 return ERR_PTR(-ENOMEM
);
332 static struct clk_gate
*_get_cgate(void __iomem
*reg
, u8 bit_idx
, u32 flags
,
335 struct clk_gate
*gate
;
337 gate
= kzalloc(sizeof(*gate
), GFP_KERNEL
);
339 return ERR_PTR(-ENOMEM
);
342 gate
->bit_idx
= bit_idx
;
349 struct composite_cfg
{
350 struct clk_hw
*mux_hw
;
351 struct clk_hw
*div_hw
;
352 struct clk_hw
*gate_hw
;
354 const struct clk_ops
*mux_ops
;
355 const struct clk_ops
*div_ops
;
356 const struct clk_ops
*gate_ops
;
359 static void get_cfg_composite_div(const struct composite_clk_gcfg
*gcfg
,
360 const struct composite_clk_cfg
*cfg
,
361 struct composite_cfg
*composite
, spinlock_t
*lock
)
363 struct clk_mux
*mux
= NULL
;
364 struct clk_divider
*div
= NULL
;
365 struct clk_gate
*gate
= NULL
;
366 const struct clk_ops
*mux_ops
, *div_ops
, *gate_ops
;
367 struct clk_hw
*mux_hw
;
368 struct clk_hw
*div_hw
;
369 struct clk_hw
*gate_hw
;
371 mux_ops
= div_ops
= gate_ops
= NULL
;
372 mux_hw
= div_hw
= gate_hw
= NULL
;
374 if (gcfg
->mux
&& cfg
->mux
) {
375 mux
= _get_cmux(base
+ cfg
->mux
->offset
,
378 gcfg
->mux
->flags
, lock
);
382 mux_ops
= gcfg
->mux
->ops
?
383 gcfg
->mux
->ops
: &clk_mux_ops
;
387 if (gcfg
->div
&& cfg
->div
) {
388 div
= _get_cdiv(base
+ cfg
->div
->offset
,
391 gcfg
->div
->flags
, lock
);
395 div_ops
= gcfg
->div
->ops
?
396 gcfg
->div
->ops
: &clk_divider_ops
;
400 if (gcfg
->gate
&& cfg
->gate
) {
401 gate
= _get_cgate(base
+ cfg
->gate
->offset
,
403 gcfg
->gate
->flags
, lock
);
407 gate_ops
= gcfg
->gate
->ops
?
408 gcfg
->gate
->ops
: &clk_gate_ops
;
412 composite
->mux_hw
= mux_hw
;
413 composite
->mux_ops
= mux_ops
;
415 composite
->div_hw
= div_hw
;
416 composite
->div_ops
= div_ops
;
418 composite
->gate_hw
= gate_hw
;
419 composite
->gate_ops
= gate_ops
;
429 #define to_timer_ker(_hw) container_of(_hw, struct timer_ker, hw)
431 static unsigned long timer_ker_recalc_rate(struct clk_hw
*hw
,
432 unsigned long parent_rate
)
434 struct timer_ker
*clk_elem
= to_timer_ker(hw
);
436 u32 dppre_shift
= clk_elem
->dppre_shift
;
440 timpre
= (readl(base
+ RCC_CFGR
) >> 15) & 0x01;
442 prescaler
= (readl(base
+ RCC_D2CFGR
) >> dppre_shift
) & 0x03;
449 else if (timpre
&& prescaler
> 4)
452 return parent_rate
* mul
;
455 static const struct clk_ops timer_ker_ops
= {
456 .recalc_rate
= timer_ker_recalc_rate
,
459 static struct clk_hw
*clk_register_stm32_timer_ker(struct device
*dev
,
460 const char *name
, const char *parent_name
,
465 struct timer_ker
*element
;
466 struct clk_init_data init
;
470 element
= kzalloc(sizeof(*element
), GFP_KERNEL
);
472 return ERR_PTR(-ENOMEM
);
475 init
.ops
= &timer_ker_ops
;
477 init
.parent_names
= &parent_name
;
478 init
.num_parents
= 1;
480 element
->hw
.init
= &init
;
481 element
->lock
= lock
;
482 element
->dppre_shift
= dppre_shift
;
485 err
= clk_hw_register(dev
, hw
);
495 static const struct clk_div_table d1cpre_div_table
[] = {
496 { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
497 { 4, 1 }, { 5, 1 }, { 6, 1 }, { 7, 1},
498 { 8, 2 }, { 9, 4 }, { 10, 8 }, { 11, 16 },
499 { 12, 64 }, { 13, 128 }, { 14, 256 },
504 static const struct clk_div_table ppre_div_table
[] = {
505 { 0, 1 }, { 1, 1 }, { 2, 1 }, { 3, 1},
506 { 4, 2 }, { 5, 4 }, { 6, 8 }, { 7, 16 },
510 static void register_core_and_bus_clocks(void)
513 hws
[SYS_D1CPRE
] = clk_hw_register_divider_table(NULL
, "d1cpre",
514 "sys_ck", CLK_IGNORE_UNUSED
, base
+ RCC_D1CFGR
, 8, 4, 0,
515 d1cpre_div_table
, &stm32rcc_lock
);
517 hws
[HCLK
] = clk_hw_register_divider_table(NULL
, "hclk", "d1cpre",
518 CLK_IGNORE_UNUSED
, base
+ RCC_D1CFGR
, 0, 4, 0,
519 d1cpre_div_table
, &stm32rcc_lock
);
523 hws
[CPU_SYSTICK
] = clk_hw_register_fixed_factor(NULL
, "systick",
526 /* * APB3 peripheral */
527 hws
[PCLK3
] = clk_hw_register_divider_table(NULL
, "pclk3", "hclk", 0,
528 base
+ RCC_D1CFGR
, 4, 3, 0,
529 ppre_div_table
, &stm32rcc_lock
);
532 /* * APB1 peripheral */
533 hws
[PCLK1
] = clk_hw_register_divider_table(NULL
, "pclk1", "hclk", 0,
534 base
+ RCC_D2CFGR
, 4, 3, 0,
535 ppre_div_table
, &stm32rcc_lock
);
537 /* Timers prescaler clocks */
538 clk_register_stm32_timer_ker(NULL
, "tim1_ker", "pclk1", 0,
541 /* * APB2 peripheral */
542 hws
[PCLK2
] = clk_hw_register_divider_table(NULL
, "pclk2", "hclk", 0,
543 base
+ RCC_D2CFGR
, 8, 3, 0, ppre_div_table
,
546 clk_register_stm32_timer_ker(NULL
, "tim2_ker", "pclk2", 0, 8,
550 /* * APB4 peripheral */
551 hws
[PCLK4
] = clk_hw_register_divider_table(NULL
, "pclk4", "hclk", 0,
552 base
+ RCC_D3CFGR
, 4, 3, 0,
553 ppre_div_table
, &stm32rcc_lock
);
556 /* MUX clock configuration */
557 struct stm32_mux_clk
{
559 const char * const *parents
;
567 #define M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, _flags)\
570 .parents = _parents,\
571 .num_parents = ARRAY_SIZE(_parents),\
572 .offset = _mux_offset,\
573 .shift = _mux_shift,\
574 .width = _mux_width,\
578 #define M_MCLOC(_name, _parents, _mux_offset, _mux_shift, _mux_width)\
579 M_MCLOCF(_name, _parents, _mux_offset, _mux_shift, _mux_width, 0)\
581 static const struct stm32_mux_clk stm32_mclk[] __initconst = {
582 M_MCLOC("per_ck", per_src
, RCC_D1CCIPR
, 28, 3),
583 M_MCLOC("pllsrc", pll_src
, RCC_PLLCKSELR
, 0, 3),
584 M_MCLOC("sys_ck", sys_src
, RCC_CFGR
, 0, 3),
585 M_MCLOC("tracein_ck", tracein_src
, RCC_CFGR
, 0, 3),
588 /* Oscillary clock configuration */
589 struct stm32_osc_clk
{
598 #define OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, _flags)\
602 .gate_offset = _gate_offset,\
603 .bit_idx = _bit_idx,\
604 .bit_rdy = _bit_rdy,\
608 #define OSC_CLK(_name, _parent, _gate_offset, _bit_idx, _bit_rdy)\
609 OSC_CLKF(_name, _parent, _gate_offset, _bit_idx, _bit_rdy, 0)
611 static const struct stm32_osc_clk stm32_oclk
[] __initconst
= {
612 OSC_CLKF("hsi_ck", "hsidiv", RCC_CR
, 0, 2, CLK_IGNORE_UNUSED
),
613 OSC_CLKF("hsi_ker", "hsidiv", RCC_CR
, 1, 2, CLK_IGNORE_UNUSED
),
614 OSC_CLKF("csi_ck", "clk-csi", RCC_CR
, 7, 8, CLK_IGNORE_UNUSED
),
615 OSC_CLKF("csi_ker", "clk-csi", RCC_CR
, 9, 8, CLK_IGNORE_UNUSED
),
616 OSC_CLKF("rc48_ck", "clk-rc48", RCC_CR
, 12, 13, CLK_IGNORE_UNUSED
),
617 OSC_CLKF("lsi_ck", "clk-lsi", RCC_CSR
, 0, 1, CLK_IGNORE_UNUSED
),
620 /* PLL configuration */
621 struct st32h7_pll_cfg
{
629 struct stm32_pll_data
{
631 const char *parent_name
;
633 const struct st32h7_pll_cfg
*cfg
;
636 static const struct st32h7_pll_cfg stm32h7_pll1
= {
638 .offset_divr
= RCC_PLL1DIVR
,
640 .offset_frac
= RCC_PLL1FRACR
,
644 static const struct st32h7_pll_cfg stm32h7_pll2
= {
646 .offset_divr
= RCC_PLL2DIVR
,
648 .offset_frac
= RCC_PLL2FRACR
,
652 static const struct st32h7_pll_cfg stm32h7_pll3
= {
654 .offset_divr
= RCC_PLL3DIVR
,
656 .offset_frac
= RCC_PLL3FRACR
,
660 static const struct stm32_pll_data stm32_pll
[] = {
661 { "vco1", "pllsrc", CLK_IGNORE_UNUSED
, &stm32h7_pll1
},
662 { "vco2", "pllsrc", 0, &stm32h7_pll2
},
663 { "vco3", "pllsrc", 0, &stm32h7_pll3
},
666 struct stm32_fractional_divider
{
675 void __iomem
*freg_status
;
677 void __iomem
*freg_value
;
686 struct stm32_pll_obj
{
688 struct stm32_fractional_divider div
;
689 struct stm32_ready_gate rgate
;
693 #define to_pll(_hw) container_of(_hw, struct stm32_pll_obj, hw)
695 static int pll_is_enabled(struct clk_hw
*hw
)
697 struct stm32_pll_obj
*clk_elem
= to_pll(hw
);
698 struct clk_hw
*_hw
= &clk_elem
->rgate
.gate
.hw
;
700 __clk_hw_set_clk(_hw
, hw
);
702 return ready_gate_clk_ops
.is_enabled(_hw
);
705 static int pll_enable(struct clk_hw
*hw
)
707 struct stm32_pll_obj
*clk_elem
= to_pll(hw
);
708 struct clk_hw
*_hw
= &clk_elem
->rgate
.gate
.hw
;
710 __clk_hw_set_clk(_hw
, hw
);
712 return ready_gate_clk_ops
.enable(_hw
);
715 static void pll_disable(struct clk_hw
*hw
)
717 struct stm32_pll_obj
*clk_elem
= to_pll(hw
);
718 struct clk_hw
*_hw
= &clk_elem
->rgate
.gate
.hw
;
720 __clk_hw_set_clk(_hw
, hw
);
722 ready_gate_clk_ops
.disable(_hw
);
725 static int pll_frac_is_enabled(struct clk_hw
*hw
)
727 struct stm32_pll_obj
*clk_elem
= to_pll(hw
);
728 struct stm32_fractional_divider
*fd
= &clk_elem
->div
;
730 return (readl(fd
->freg_status
) >> fd
->freg_bit
) & 0x01;
733 static unsigned long pll_read_frac(struct clk_hw
*hw
)
735 struct stm32_pll_obj
*clk_elem
= to_pll(hw
);
736 struct stm32_fractional_divider
*fd
= &clk_elem
->div
;
738 return (readl(fd
->freg_value
) >> fd
->fshift
) &
739 GENMASK(fd
->fwidth
- 1, 0);
742 static unsigned long pll_fd_recalc_rate(struct clk_hw
*hw
,
743 unsigned long parent_rate
)
745 struct stm32_pll_obj
*clk_elem
= to_pll(hw
);
746 struct stm32_fractional_divider
*fd
= &clk_elem
->div
;
751 val
= readl(fd
->mreg
);
752 mask
= GENMASK(fd
->mwidth
- 1, 0) << fd
->mshift
;
753 m
= (val
& mask
) >> fd
->mshift
;
755 val
= readl(fd
->nreg
);
756 mask
= GENMASK(fd
->nwidth
- 1, 0) << fd
->nshift
;
757 n
= ((val
& mask
) >> fd
->nshift
) + 1;
762 rate
= (u64
)parent_rate
* n
;
765 if (pll_frac_is_enabled(hw
)) {
766 val
= pll_read_frac(hw
);
767 rate1
= (u64
)parent_rate
* (u64
)val
;
768 do_div(rate1
, (m
* 8191));
774 static const struct clk_ops pll_ops
= {
775 .enable
= pll_enable
,
776 .disable
= pll_disable
,
777 .is_enabled
= pll_is_enabled
,
778 .recalc_rate
= pll_fd_recalc_rate
,
781 static struct clk_hw
*clk_register_stm32_pll(struct device
*dev
,
785 const struct st32h7_pll_cfg
*cfg
,
788 struct stm32_pll_obj
*pll
;
789 struct clk_init_data init
= { NULL
};
792 struct stm32_fractional_divider
*div
= NULL
;
793 struct stm32_ready_gate
*rgate
;
795 pll
= kzalloc(sizeof(*pll
), GFP_KERNEL
);
797 return ERR_PTR(-ENOMEM
);
802 init
.parent_names
= &parent
;
803 init
.num_parents
= 1;
804 pll
->hw
.init
= &init
;
809 rgate
->bit_rdy
= cfg
->bit_idx
+ 1;
810 rgate
->gate
.lock
= lock
;
811 rgate
->gate
.reg
= base
+ RCC_CR
;
812 rgate
->gate
.bit_idx
= cfg
->bit_idx
;
816 div
->mreg
= base
+ RCC_PLLCKSELR
;
817 div
->mshift
= cfg
->divm
;
819 div
->nreg
= base
+ cfg
->offset_divr
;
823 div
->freg_status
= base
+ RCC_PLLCFGR
;
824 div
->freg_bit
= cfg
->bit_frac_en
;
825 div
->freg_value
= base
+ cfg
->offset_frac
;
831 ret
= clk_hw_register(dev
, hw
);
841 static unsigned long odf_divider_recalc_rate(struct clk_hw
*hw
,
842 unsigned long parent_rate
)
844 return clk_divider_ops
.recalc_rate(hw
, parent_rate
);
847 static int odf_divider_determine_rate(struct clk_hw
*hw
,
848 struct clk_rate_request
*req
)
850 return clk_divider_ops
.determine_rate(hw
, req
);
853 static int odf_divider_set_rate(struct clk_hw
*hw
, unsigned long rate
,
854 unsigned long parent_rate
)
860 hwp
= clk_hw_get_parent(hw
);
862 pll_status
= pll_is_enabled(hwp
);
867 ret
= clk_divider_ops
.set_rate(hw
, rate
, parent_rate
);
875 static const struct clk_ops odf_divider_ops
= {
876 .recalc_rate
= odf_divider_recalc_rate
,
877 .determine_rate
= odf_divider_determine_rate
,
878 .set_rate
= odf_divider_set_rate
,
881 static int odf_gate_enable(struct clk_hw
*hw
)
887 if (clk_gate_ops
.is_enabled(hw
))
890 hwp
= clk_hw_get_parent(hw
);
892 pll_status
= pll_is_enabled(hwp
);
897 ret
= clk_gate_ops
.enable(hw
);
905 static void odf_gate_disable(struct clk_hw
*hw
)
910 if (!clk_gate_ops
.is_enabled(hw
))
913 hwp
= clk_hw_get_parent(hw
);
915 pll_status
= pll_is_enabled(hwp
);
920 clk_gate_ops
.disable(hw
);
926 static const struct clk_ops odf_gate_ops
= {
927 .enable
= odf_gate_enable
,
928 .disable
= odf_gate_disable
,
929 .is_enabled
= clk_gate_is_enabled
,
932 static struct composite_clk_gcfg odf_clk_gcfg
= {
933 M_CFG_DIV(&odf_divider_ops
, 0),
934 M_CFG_GATE(&odf_gate_ops
, 0),
937 #define M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
938 _rate_shift, _rate_width, _flags)\
941 .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
942 .gate = &(struct gate_cfg) {_gate_offset, _bit_idx },\
944 .parent_name = &(const char *) {_parent},\
949 #define M_ODF(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
950 _rate_shift, _rate_width)\
951 M_ODF_F(_name, _parent, _gate_offset, _bit_idx, _rate_offset,\
952 _rate_shift, _rate_width, 0)\
954 static const struct composite_clk_cfg stm32_odf[3][3] = {
956 M_ODF_F("pll1_p", "vco1", RCC_PLLCFGR
, 16, RCC_PLL1DIVR
, 9, 7,
958 M_ODF_F("pll1_q", "vco1", RCC_PLLCFGR
, 17, RCC_PLL1DIVR
, 16, 7,
960 M_ODF_F("pll1_r", "vco1", RCC_PLLCFGR
, 18, RCC_PLL1DIVR
, 24, 7,
965 M_ODF("pll2_p", "vco2", RCC_PLLCFGR
, 19, RCC_PLL2DIVR
, 9, 7),
966 M_ODF("pll2_q", "vco2", RCC_PLLCFGR
, 20, RCC_PLL2DIVR
, 16, 7),
967 M_ODF("pll2_r", "vco2", RCC_PLLCFGR
, 21, RCC_PLL2DIVR
, 24, 7),
970 M_ODF("pll3_p", "vco3", RCC_PLLCFGR
, 22, RCC_PLL3DIVR
, 9, 7),
971 M_ODF("pll3_q", "vco3", RCC_PLLCFGR
, 23, RCC_PLL3DIVR
, 16, 7),
972 M_ODF("pll3_r", "vco3", RCC_PLLCFGR
, 24, RCC_PLL3DIVR
, 24, 7),
985 #define PER_CLKF(_gate_offset, _bit_idx, _name, _parent, _flags)\
987 .gate_offset = _gate_offset,\
988 .bit_idx = _bit_idx,\
994 #define PER_CLK(_gate_offset, _bit_idx, _name, _parent)\
995 PER_CLKF(_gate_offset, _bit_idx, _name, _parent, 0)
997 static const struct pclk_t pclk
[] = {
998 PER_CLK(RCC_AHB3ENR
, 31, "d1sram1", "hclk"),
999 PER_CLK(RCC_AHB3ENR
, 30, "itcm", "hclk"),
1000 PER_CLK(RCC_AHB3ENR
, 29, "dtcm2", "hclk"),
1001 PER_CLK(RCC_AHB3ENR
, 28, "dtcm1", "hclk"),
1002 PER_CLK(RCC_AHB3ENR
, 8, "flitf", "hclk"),
1003 PER_CLK(RCC_AHB3ENR
, 5, "jpgdec", "hclk"),
1004 PER_CLK(RCC_AHB3ENR
, 4, "dma2d", "hclk"),
1005 PER_CLK(RCC_AHB3ENR
, 0, "mdma", "hclk"),
1006 PER_CLK(RCC_AHB1ENR
, 28, "usb2ulpi", "hclk"),
1007 PER_CLK(RCC_AHB1ENR
, 26, "usb1ulpi", "hclk"),
1008 PER_CLK(RCC_AHB1ENR
, 17, "eth1rx", "hclk"),
1009 PER_CLK(RCC_AHB1ENR
, 16, "eth1tx", "hclk"),
1010 PER_CLK(RCC_AHB1ENR
, 15, "eth1mac", "hclk"),
1011 PER_CLK(RCC_AHB1ENR
, 14, "art", "hclk"),
1012 PER_CLK(RCC_AHB1ENR
, 1, "dma2", "hclk"),
1013 PER_CLK(RCC_AHB1ENR
, 0, "dma1", "hclk"),
1014 PER_CLK(RCC_AHB2ENR
, 31, "d2sram3", "hclk"),
1015 PER_CLK(RCC_AHB2ENR
, 30, "d2sram2", "hclk"),
1016 PER_CLK(RCC_AHB2ENR
, 29, "d2sram1", "hclk"),
1017 PER_CLK(RCC_AHB2ENR
, 5, "hash", "hclk"),
1018 PER_CLK(RCC_AHB2ENR
, 4, "crypt", "hclk"),
1019 PER_CLK(RCC_AHB2ENR
, 0, "camitf", "hclk"),
1020 PER_CLK(RCC_AHB4ENR
, 28, "bkpram", "hclk"),
1021 PER_CLK(RCC_AHB4ENR
, 25, "hsem", "hclk"),
1022 PER_CLK(RCC_AHB4ENR
, 21, "bdma", "hclk"),
1023 PER_CLK(RCC_AHB4ENR
, 19, "crc", "hclk"),
1024 PER_CLK(RCC_AHB4ENR
, 10, "gpiok", "hclk"),
1025 PER_CLK(RCC_AHB4ENR
, 9, "gpioj", "hclk"),
1026 PER_CLK(RCC_AHB4ENR
, 8, "gpioi", "hclk"),
1027 PER_CLK(RCC_AHB4ENR
, 7, "gpioh", "hclk"),
1028 PER_CLK(RCC_AHB4ENR
, 6, "gpiog", "hclk"),
1029 PER_CLK(RCC_AHB4ENR
, 5, "gpiof", "hclk"),
1030 PER_CLK(RCC_AHB4ENR
, 4, "gpioe", "hclk"),
1031 PER_CLK(RCC_AHB4ENR
, 3, "gpiod", "hclk"),
1032 PER_CLK(RCC_AHB4ENR
, 2, "gpioc", "hclk"),
1033 PER_CLK(RCC_AHB4ENR
, 1, "gpiob", "hclk"),
1034 PER_CLK(RCC_AHB4ENR
, 0, "gpioa", "hclk"),
1035 PER_CLK(RCC_APB3ENR
, 6, "wwdg1", "pclk3"),
1036 PER_CLK(RCC_APB1LENR
, 29, "dac12", "pclk1"),
1037 PER_CLK(RCC_APB1LENR
, 11, "wwdg2", "pclk1"),
1038 PER_CLK(RCC_APB1LENR
, 8, "tim14", "tim1_ker"),
1039 PER_CLK(RCC_APB1LENR
, 7, "tim13", "tim1_ker"),
1040 PER_CLK(RCC_APB1LENR
, 6, "tim12", "tim1_ker"),
1041 PER_CLK(RCC_APB1LENR
, 5, "tim7", "tim1_ker"),
1042 PER_CLK(RCC_APB1LENR
, 4, "tim6", "tim1_ker"),
1043 PER_CLK(RCC_APB1LENR
, 3, "tim5", "tim1_ker"),
1044 PER_CLK(RCC_APB1LENR
, 2, "tim4", "tim1_ker"),
1045 PER_CLK(RCC_APB1LENR
, 1, "tim3", "tim1_ker"),
1046 PER_CLK(RCC_APB1LENR
, 0, "tim2", "tim1_ker"),
1047 PER_CLK(RCC_APB1HENR
, 5, "mdios", "pclk1"),
1048 PER_CLK(RCC_APB1HENR
, 4, "opamp", "pclk1"),
1049 PER_CLK(RCC_APB1HENR
, 1, "crs", "pclk1"),
1050 PER_CLK(RCC_APB2ENR
, 18, "tim17", "tim2_ker"),
1051 PER_CLK(RCC_APB2ENR
, 17, "tim16", "tim2_ker"),
1052 PER_CLK(RCC_APB2ENR
, 16, "tim15", "tim2_ker"),
1053 PER_CLK(RCC_APB2ENR
, 1, "tim8", "tim2_ker"),
1054 PER_CLK(RCC_APB2ENR
, 0, "tim1", "tim2_ker"),
1055 PER_CLK(RCC_APB4ENR
, 26, "tmpsens", "pclk4"),
1056 PER_CLK(RCC_APB4ENR
, 16, "rtcapb", "pclk4"),
1057 PER_CLK(RCC_APB4ENR
, 15, "vref", "pclk4"),
1058 PER_CLK(RCC_APB4ENR
, 14, "comp12", "pclk4"),
1059 PER_CLK(RCC_APB4ENR
, 1, "syscfg", "pclk4"),
1063 #define KER_CLKF(_gate_offset, _bit_idx,\
1064 _mux_offset, _mux_shift, _mux_width,\
1065 _name, _parent_name,\
1068 .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
1069 .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
1071 .parent_name = _parent_name, \
1072 .num_parents = ARRAY_SIZE(_parent_name),\
1076 #define KER_CLK(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
1077 _name, _parent_name) \
1078 KER_CLKF(_gate_offset, _bit_idx, _mux_offset, _mux_shift, _mux_width,\
1079 _name, _parent_name, 0)\
1081 #define KER_CLKF_NOMUX(_gate_offset, _bit_idx,\
1082 _name, _parent_name,\
1085 .gate = &(struct gate_cfg) {_gate_offset, _bit_idx},\
1088 .parent_name = _parent_name, \
1093 static const struct composite_clk_cfg kclk
[] = {
1094 KER_CLK(RCC_AHB3ENR
, 16, RCC_D1CCIPR
, 16, 1, "sdmmc1", sdmmc_src
),
1095 KER_CLKF(RCC_AHB3ENR
, 14, RCC_D1CCIPR
, 4, 2, "quadspi", qspi_src
,
1097 KER_CLKF(RCC_AHB3ENR
, 12, RCC_D1CCIPR
, 0, 2, "fmc", fmc_src
,
1099 KER_CLK(RCC_AHB1ENR
, 27, RCC_D2CCIP2R
, 20, 2, "usb2otg", usbotg_src
),
1100 KER_CLK(RCC_AHB1ENR
, 25, RCC_D2CCIP2R
, 20, 2, "usb1otg", usbotg_src
),
1101 KER_CLK(RCC_AHB1ENR
, 5, RCC_D3CCIPR
, 16, 2, "adc12", adc_src
),
1102 KER_CLK(RCC_AHB2ENR
, 9, RCC_D1CCIPR
, 16, 1, "sdmmc2", sdmmc_src
),
1103 KER_CLK(RCC_AHB2ENR
, 6, RCC_D2CCIP2R
, 8, 2, "rng", rng_src
),
1104 KER_CLK(RCC_AHB4ENR
, 24, RCC_D3CCIPR
, 16, 2, "adc3", adc_src
),
1105 KER_CLKF(RCC_APB3ENR
, 4, RCC_D1CCIPR
, 8, 1, "dsi", dsi_src
,
1106 CLK_SET_RATE_PARENT
),
1107 KER_CLKF_NOMUX(RCC_APB3ENR
, 3, "ltdc", ltdc_src
, CLK_SET_RATE_PARENT
),
1108 KER_CLK(RCC_APB1LENR
, 31, RCC_D2CCIP2R
, 0, 3, "usart8", usart_src2
),
1109 KER_CLK(RCC_APB1LENR
, 30, RCC_D2CCIP2R
, 0, 3, "usart7", usart_src2
),
1110 KER_CLK(RCC_APB1LENR
, 27, RCC_D2CCIP2R
, 22, 2, "hdmicec", cec_src
),
1111 KER_CLK(RCC_APB1LENR
, 23, RCC_D2CCIP2R
, 12, 2, "i2c3", i2c_src1
),
1112 KER_CLK(RCC_APB1LENR
, 22, RCC_D2CCIP2R
, 12, 2, "i2c2", i2c_src1
),
1113 KER_CLK(RCC_APB1LENR
, 21, RCC_D2CCIP2R
, 12, 2, "i2c1", i2c_src1
),
1114 KER_CLK(RCC_APB1LENR
, 20, RCC_D2CCIP2R
, 0, 3, "uart5", usart_src2
),
1115 KER_CLK(RCC_APB1LENR
, 19, RCC_D2CCIP2R
, 0, 3, "uart4", usart_src2
),
1116 KER_CLK(RCC_APB1LENR
, 18, RCC_D2CCIP2R
, 0, 3, "usart3", usart_src2
),
1117 KER_CLK(RCC_APB1LENR
, 17, RCC_D2CCIP2R
, 0, 3, "usart2", usart_src2
),
1118 KER_CLK(RCC_APB1LENR
, 16, RCC_D2CCIP1R
, 20, 2, "spdifrx", spdifrx_src
),
1119 KER_CLK(RCC_APB1LENR
, 15, RCC_D2CCIP1R
, 16, 3, "spi3", spi_src1
),
1120 KER_CLK(RCC_APB1LENR
, 14, RCC_D2CCIP1R
, 16, 3, "spi2", spi_src1
),
1121 KER_CLK(RCC_APB1LENR
, 9, RCC_D2CCIP2R
, 28, 3, "lptim1", lptim_src1
),
1122 KER_CLK(RCC_APB1HENR
, 8, RCC_D2CCIP1R
, 28, 2, "fdcan", fdcan_src
),
1123 KER_CLK(RCC_APB1HENR
, 2, RCC_D2CCIP1R
, 31, 1, "swp", swp_src
),
1124 KER_CLK(RCC_APB2ENR
, 29, RCC_CFGR
, 14, 1, "hrtim", hrtim_src
),
1125 KER_CLK(RCC_APB2ENR
, 28, RCC_D2CCIP1R
, 24, 1, "dfsdm1", dfsdm1_src
),
1126 KER_CLKF(RCC_APB2ENR
, 24, RCC_D2CCIP1R
, 6, 3, "sai3", sai_src
,
1127 CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT
),
1128 KER_CLKF(RCC_APB2ENR
, 23, RCC_D2CCIP1R
, 6, 3, "sai2", sai_src
,
1129 CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT
),
1130 KER_CLKF(RCC_APB2ENR
, 22, RCC_D2CCIP1R
, 0, 3, "sai1", sai_src
,
1131 CLK_SET_RATE_PARENT
| CLK_SET_RATE_NO_REPARENT
),
1132 KER_CLK(RCC_APB2ENR
, 20, RCC_D2CCIP1R
, 16, 3, "spi5", spi_src2
),
1133 KER_CLK(RCC_APB2ENR
, 13, RCC_D2CCIP1R
, 16, 3, "spi4", spi_src2
),
1134 KER_CLK(RCC_APB2ENR
, 12, RCC_D2CCIP1R
, 16, 3, "spi1", spi_src1
),
1135 KER_CLK(RCC_APB2ENR
, 5, RCC_D2CCIP2R
, 3, 3, "usart6", usart_src1
),
1136 KER_CLK(RCC_APB2ENR
, 4, RCC_D2CCIP2R
, 3, 3, "usart1", usart_src1
),
1137 KER_CLK(RCC_APB4ENR
, 21, RCC_D3CCIPR
, 24, 3, "sai4b", sai_src
),
1138 KER_CLK(RCC_APB4ENR
, 21, RCC_D3CCIPR
, 21, 3, "sai4a", sai_src
),
1139 KER_CLK(RCC_APB4ENR
, 12, RCC_D3CCIPR
, 13, 3, "lptim5", lptim_src2
),
1140 KER_CLK(RCC_APB4ENR
, 11, RCC_D3CCIPR
, 13, 3, "lptim4", lptim_src2
),
1141 KER_CLK(RCC_APB4ENR
, 10, RCC_D3CCIPR
, 13, 3, "lptim3", lptim_src2
),
1142 KER_CLK(RCC_APB4ENR
, 9, RCC_D3CCIPR
, 10, 3, "lptim2", lptim_src2
),
1143 KER_CLK(RCC_APB4ENR
, 7, RCC_D3CCIPR
, 8, 2, "i2c4", i2c_src2
),
1144 KER_CLK(RCC_APB4ENR
, 5, RCC_D3CCIPR
, 28, 3, "spi6", spi_src3
),
1145 KER_CLK(RCC_APB4ENR
, 3, RCC_D3CCIPR
, 0, 3, "lpuart1", lpuart1_src
),
1148 static struct composite_clk_gcfg kernel_clk_cfg
= {
1150 M_CFG_GATE(NULL
, 0),
1155 * RTC & LSE registers are protected against parasitic write access.
1156 * PWR_CR_DBP bit must be set to enable write access to RTC registers.
1160 /* STM32_PWR_CR bit field */
1161 #define PWR_CR_DBP BIT(8)
1163 static struct composite_clk_gcfg rtc_clk_cfg
= {
1165 M_CFG_GATE(NULL
, 0),
1168 static const struct composite_clk_cfg rtc_clk
=
1169 KER_CLK(RCC_BDCR
, 15, RCC_BDCR
, 8, 2, "rtc_ck", rtc_src
);
1171 /* Micro-controller output clock */
1172 static struct composite_clk_gcfg mco_clk_cfg
= {
1174 M_CFG_DIV(NULL
, CLK_DIVIDER_ONE_BASED
| CLK_DIVIDER_ALLOW_ZERO
),
1177 #define M_MCO_F(_name, _parents, _mux_offset, _mux_shift, _mux_width,\
1178 _rate_offset, _rate_shift, _rate_width,\
1181 .mux = &(struct muxdiv_cfg) {_mux_offset, _mux_shift, _mux_width },\
1182 .div = &(struct muxdiv_cfg) {_rate_offset, _rate_shift, _rate_width},\
1185 .parent_name = _parents,\
1186 .num_parents = ARRAY_SIZE(_parents),\
1190 static const struct composite_clk_cfg mco_clk
[] = {
1191 M_MCO_F("mco1", mco_src1
, RCC_CFGR
, 22, 4, RCC_CFGR
, 18, 4, 0),
1192 M_MCO_F("mco2", mco_src2
, RCC_CFGR
, 29, 3, RCC_CFGR
, 25, 4, 0),
1195 static void __init
stm32h7_rcc_init(struct device_node
*np
)
1197 struct clk_hw_onecell_data
*clk_data
;
1198 struct composite_cfg c_cfg
;
1200 const char *hse_clk
, *lse_clk
, *i2s_clk
;
1201 struct regmap
*pdrm
;
1203 clk_data
= kzalloc(struct_size(clk_data
, hws
, STM32H7_MAX_CLKS
),
1208 clk_data
->num
= STM32H7_MAX_CLKS
;
1210 hws
= clk_data
->hws
;
1212 for (n
= 0; n
< STM32H7_MAX_CLKS
; n
++)
1213 hws
[n
] = ERR_PTR(-ENOENT
);
1215 /* get RCC base @ from DT */
1216 base
= of_iomap(np
, 0);
1218 pr_err("%pOFn: unable to map resource", np
);
1222 pdrm
= syscon_regmap_lookup_by_phandle(np
, "st,syscfg");
1224 pr_warn("%s: Unable to get syscfg\n", __func__
);
1226 /* In any case disable backup domain write protection
1227 * and will never be enabled.
1228 * Needed by LSE & RTC clocks.
1230 regmap_update_bits(pdrm
, PWR_CR
, PWR_CR_DBP
, PWR_CR_DBP
);
1232 /* Put parent names from DT */
1233 hse_clk
= of_clk_get_parent_name(np
, 0);
1234 lse_clk
= of_clk_get_parent_name(np
, 1);
1235 i2s_clk
= of_clk_get_parent_name(np
, 2);
1237 sai_src
[3] = i2s_clk
;
1238 spi_src1
[3] = i2s_clk
;
1240 /* Register Internal oscillators */
1241 clk_hw_register_fixed_rate(NULL
, "clk-hsi", NULL
, 0, 64000000);
1242 clk_hw_register_fixed_rate(NULL
, "clk-csi", NULL
, 0, 4000000);
1243 clk_hw_register_fixed_rate(NULL
, "clk-lsi", NULL
, 0, 32000);
1244 clk_hw_register_fixed_rate(NULL
, "clk-rc48", NULL
, 0, 48000);
1246 /* This clock is coming from outside. Frequencies unknown */
1247 hws
[CK_DSI_PHY
] = clk_hw_register_fixed_rate(NULL
, "ck_dsi_phy", NULL
,
1250 hws
[HSI_DIV
] = clk_hw_register_divider(NULL
, "hsidiv", "clk-hsi", 0,
1251 base
+ RCC_CR
, 3, 2, CLK_DIVIDER_POWER_OF_TWO
,
1254 hws
[HSE_1M
] = clk_hw_register_divider(NULL
, "hse_1M", "hse_ck", 0,
1255 base
+ RCC_CFGR
, 8, 6, CLK_DIVIDER_ONE_BASED
|
1256 CLK_DIVIDER_ALLOW_ZERO
,
1259 /* Mux system clocks */
1260 for (n
= 0; n
< ARRAY_SIZE(stm32_mclk
); n
++)
1261 hws
[MCLK_BANK
+ n
] = clk_hw_register_mux(NULL
,
1263 stm32_mclk
[n
].parents
,
1264 stm32_mclk
[n
].num_parents
,
1265 stm32_mclk
[n
].flags
,
1266 stm32_mclk
[n
].offset
+ base
,
1267 stm32_mclk
[n
].shift
,
1268 stm32_mclk
[n
].width
,
1272 register_core_and_bus_clocks();
1274 /* Oscillary clocks */
1275 for (n
= 0; n
< ARRAY_SIZE(stm32_oclk
); n
++)
1276 hws
[OSC_BANK
+ n
] = clk_register_ready_gate(NULL
,
1278 stm32_oclk
[n
].parent
,
1279 stm32_oclk
[n
].gate_offset
+ base
,
1280 stm32_oclk
[n
].bit_idx
,
1281 stm32_oclk
[n
].bit_rdy
,
1282 stm32_oclk
[n
].flags
,
1285 hws
[HSE_CK
] = clk_register_ready_gate(NULL
,
1293 hws
[LSE_CK
] = clk_register_ready_gate(NULL
,
1301 hws
[CSI_KER_DIV122
+ n
] = clk_hw_register_fixed_factor(NULL
,
1302 "csi_ker_div122", "csi_ker", 0, 1, 122);
1305 for (n
= 0; n
< ARRAY_SIZE(stm32_pll
); n
++) {
1308 /* Register the VCO */
1309 clk_register_stm32_pll(NULL
, stm32_pll
[n
].name
,
1310 stm32_pll
[n
].parent_name
, stm32_pll
[n
].flags
,
1314 /* Register the 3 output dividers */
1315 for (odf
= 0; odf
< 3; odf
++) {
1316 int idx
= n
* 3 + odf
;
1318 get_cfg_composite_div(&odf_clk_gcfg
, &stm32_odf
[n
][odf
],
1319 &c_cfg
, &stm32rcc_lock
);
1321 hws
[ODF_BANK
+ idx
] = clk_hw_register_composite(NULL
,
1322 stm32_odf
[n
][odf
].name
,
1323 stm32_odf
[n
][odf
].parent_name
,
1324 stm32_odf
[n
][odf
].num_parents
,
1325 c_cfg
.mux_hw
, c_cfg
.mux_ops
,
1326 c_cfg
.div_hw
, c_cfg
.div_ops
,
1327 c_cfg
.gate_hw
, c_cfg
.gate_ops
,
1328 stm32_odf
[n
][odf
].flags
);
1332 /* Peripheral clocks */
1333 for (n
= 0; n
< ARRAY_SIZE(pclk
); n
++)
1334 hws
[PERIF_BANK
+ n
] = clk_hw_register_gate(NULL
, pclk
[n
].name
,
1336 pclk
[n
].flags
, base
+ pclk
[n
].gate_offset
,
1337 pclk
[n
].bit_idx
, pclk
[n
].flags
, &stm32rcc_lock
);
1340 for (n
= 0; n
< ARRAY_SIZE(kclk
); n
++) {
1341 get_cfg_composite_div(&kernel_clk_cfg
, &kclk
[n
], &c_cfg
,
1344 hws
[KERN_BANK
+ n
] = clk_hw_register_composite(NULL
,
1346 kclk
[n
].parent_name
,
1347 kclk
[n
].num_parents
,
1348 c_cfg
.mux_hw
, c_cfg
.mux_ops
,
1349 c_cfg
.div_hw
, c_cfg
.div_ops
,
1350 c_cfg
.gate_hw
, c_cfg
.gate_ops
,
1354 /* RTC clock (default state is off) */
1355 clk_hw_register_fixed_rate(NULL
, "off", NULL
, 0, 0);
1357 get_cfg_composite_div(&rtc_clk_cfg
, &rtc_clk
, &c_cfg
, &stm32rcc_lock
);
1359 hws
[RTC_CK
] = clk_hw_register_composite(NULL
,
1361 rtc_clk
.parent_name
,
1362 rtc_clk
.num_parents
,
1363 c_cfg
.mux_hw
, c_cfg
.mux_ops
,
1364 c_cfg
.div_hw
, c_cfg
.div_ops
,
1365 c_cfg
.gate_hw
, c_cfg
.gate_ops
,
1368 /* Micro-controller clocks */
1369 for (n
= 0; n
< ARRAY_SIZE(mco_clk
); n
++) {
1370 get_cfg_composite_div(&mco_clk_cfg
, &mco_clk
[n
], &c_cfg
,
1373 hws
[MCO_BANK
+ n
] = clk_hw_register_composite(NULL
,
1375 mco_clk
[n
].parent_name
,
1376 mco_clk
[n
].num_parents
,
1377 c_cfg
.mux_hw
, c_cfg
.mux_ops
,
1378 c_cfg
.div_hw
, c_cfg
.div_ops
,
1379 c_cfg
.gate_hw
, c_cfg
.gate_ops
,
1383 of_clk_add_hw_provider(np
, of_clk_hw_onecell_get
, clk_data
);
1391 /* The RCC node is a clock and reset controller, and these
1392 * functionalities are supported by different drivers that
1393 * matches the same compatible strings.
1395 CLK_OF_DECLARE_DRIVER(stm32h7_rcc
, "st,stm32h743-rcc", stm32h7_rcc_init
);