2 * Marvell Armada 37xx SoC Peripheral clocks
4 * Copyright (C) 2016 Marvell
6 * Gregory CLEMENT <gregory.clement@free-electrons.com>
8 * This file is licensed under the terms of the GNU General Public
9 * License version 2 or later. This program is licensed "as is"
10 * without any warranty of any kind, whether express or implied.
12 * Most of the peripheral clocks can be modelled like this:
13 * _____ _______ _______
14 * TBG-A-P --| | | | | | ______
15 * TBG-B-P --| Mux |--| /div1 |--| /div2 |--| Gate |--> perip_clk
16 * TBG-A-S --| | | | | | |______|
17 * TBG-B-S --|_____| |_______| |_______|
19 * However some clocks may use only one or two block or and use the
20 * xtal clock as parent.
23 #include <linux/clk-provider.h>
24 #include <linux/mfd/syscon.h>
26 #include <linux/of_device.h>
27 #include <linux/platform_device.h>
28 #include <linux/regmap.h>
29 #include <linux/slab.h>
38 #define LOAD_LEVEL_NR 4
40 #define ARMADA_37XX_NB_L0L1 0x18
41 #define ARMADA_37XX_NB_L2L3 0x1C
42 #define ARMADA_37XX_NB_TBG_DIV_OFF 13
43 #define ARMADA_37XX_NB_TBG_DIV_MASK 0x7
44 #define ARMADA_37XX_NB_CLK_SEL_OFF 11
45 #define ARMADA_37XX_NB_CLK_SEL_MASK 0x1
46 #define ARMADA_37XX_NB_TBG_SEL_OFF 9
47 #define ARMADA_37XX_NB_TBG_SEL_MASK 0x3
48 #define ARMADA_37XX_NB_CONFIG_SHIFT 16
49 #define ARMADA_37XX_NB_DYN_MOD 0x24
50 #define ARMADA_37XX_NB_DFS_EN 31
51 #define ARMADA_37XX_NB_CPU_LOAD 0x30
52 #define ARMADA_37XX_NB_CPU_LOAD_MASK 0x3
53 #define ARMADA_37XX_DVFS_LOAD_0 0
54 #define ARMADA_37XX_DVFS_LOAD_1 1
55 #define ARMADA_37XX_DVFS_LOAD_2 2
56 #define ARMADA_37XX_DVFS_LOAD_3 3
58 struct clk_periph_driver_data
{
59 struct clk_hw_onecell_data
*hw_data
;
63 struct clk_double_div
{
73 void __iomem
*reg_mux
;
76 void __iomem
*reg_div
;
78 struct regmap
*nb_pm_base
;
81 #define to_clk_double_div(_hw) container_of(_hw, struct clk_double_div, hw)
82 #define to_clk_pm_cpu(_hw) container_of(_hw, struct clk_pm_cpu, hw)
84 struct clk_periph_data
{
86 const char * const *parent_names
;
88 struct clk_hw
*mux_hw
;
89 struct clk_hw
*rate_hw
;
90 struct clk_hw
*gate_hw
;
91 struct clk_hw
*muxrate_hw
;
95 static const struct clk_div_table clk_table6
[] = {
96 { .val
= 1, .div
= 1, },
97 { .val
= 2, .div
= 2, },
98 { .val
= 3, .div
= 3, },
99 { .val
= 4, .div
= 4, },
100 { .val
= 5, .div
= 5, },
101 { .val
= 6, .div
= 6, },
102 { .val
= 0, .div
= 0, }, /* last entry */
105 static const struct clk_div_table clk_table1
[] = {
106 { .val
= 0, .div
= 1, },
107 { .val
= 1, .div
= 2, },
108 { .val
= 0, .div
= 0, }, /* last entry */
111 static const struct clk_div_table clk_table2
[] = {
112 { .val
= 0, .div
= 2, },
113 { .val
= 1, .div
= 4, },
114 { .val
= 0, .div
= 0, }, /* last entry */
117 static const struct clk_ops clk_double_div_ops
;
118 static const struct clk_ops clk_pm_cpu_ops
;
120 #define PERIPH_GATE(_name, _bit) \
121 struct clk_gate gate_##_name = { \
122 .reg = (void *)CLK_DIS, \
124 .hw.init = &(struct clk_init_data){ \
125 .ops = &clk_gate_ops, \
129 #define PERIPH_MUX(_name, _shift) \
130 struct clk_mux mux_##_name = { \
131 .reg = (void *)TBG_SEL, \
134 .hw.init = &(struct clk_init_data){ \
135 .ops = &clk_mux_ro_ops, \
139 #define PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2) \
140 struct clk_double_div rate_##_name = { \
141 .reg1 = (void *)_reg1, \
142 .reg2 = (void *)_reg2, \
145 .hw.init = &(struct clk_init_data){ \
146 .ops = &clk_double_div_ops, \
150 #define PERIPH_DIV(_name, _reg, _shift, _table) \
151 struct clk_divider rate_##_name = { \
152 .reg = (void *)_reg, \
155 .hw.init = &(struct clk_init_data){ \
156 .ops = &clk_divider_ro_ops, \
160 #define PERIPH_PM_CPU(_name, _shift1, _reg, _shift2) \
161 struct clk_pm_cpu muxrate_##_name = { \
162 .reg_mux = (void *)TBG_SEL, \
164 .shift_mux = _shift1, \
165 .reg_div = (void *)_reg, \
166 .shift_div = _shift2, \
167 .hw.init = &(struct clk_init_data){ \
168 .ops = &clk_pm_cpu_ops, \
172 #define PERIPH_CLK_FULL_DD(_name, _bit, _shift, _reg1, _reg2, _shift1, _shift2)\
173 static PERIPH_GATE(_name, _bit); \
174 static PERIPH_MUX(_name, _shift); \
175 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
177 #define PERIPH_CLK_FULL(_name, _bit, _shift, _reg, _shift1, _table) \
178 static PERIPH_GATE(_name, _bit); \
179 static PERIPH_MUX(_name, _shift); \
180 static PERIPH_DIV(_name, _reg, _shift1, _table);
182 #define PERIPH_CLK_GATE_DIV(_name, _bit, _reg, _shift, _table) \
183 static PERIPH_GATE(_name, _bit); \
184 static PERIPH_DIV(_name, _reg, _shift, _table);
186 #define PERIPH_CLK_MUX_DD(_name, _shift, _reg1, _reg2, _shift1, _shift2)\
187 static PERIPH_MUX(_name, _shift); \
188 static PERIPH_DOUBLEDIV(_name, _reg1, _reg2, _shift1, _shift2);
190 #define REF_CLK_FULL(_name) \
192 .parent_names = (const char *[]){ "TBG-A-P", \
193 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
195 .mux_hw = &mux_##_name.hw, \
196 .gate_hw = &gate_##_name.hw, \
197 .rate_hw = &rate_##_name.hw, \
200 #define REF_CLK_FULL_DD(_name) \
202 .parent_names = (const char *[]){ "TBG-A-P", \
203 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
205 .mux_hw = &mux_##_name.hw, \
206 .gate_hw = &gate_##_name.hw, \
207 .rate_hw = &rate_##_name.hw, \
208 .is_double_div = true, \
211 #define REF_CLK_GATE(_name, _parent_name) \
213 .parent_names = (const char *[]){ _parent_name}, \
215 .gate_hw = &gate_##_name.hw, \
218 #define REF_CLK_GATE_DIV(_name, _parent_name) \
220 .parent_names = (const char *[]){ _parent_name}, \
222 .gate_hw = &gate_##_name.hw, \
223 .rate_hw = &rate_##_name.hw, \
226 #define REF_CLK_PM_CPU(_name) \
228 .parent_names = (const char *[]){ "TBG-A-P", \
229 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
231 .muxrate_hw = &muxrate_##_name.hw, \
234 #define REF_CLK_MUX_DD(_name) \
236 .parent_names = (const char *[]){ "TBG-A-P", \
237 "TBG-B-P", "TBG-A-S", "TBG-B-S"}, \
239 .mux_hw = &mux_##_name.hw, \
240 .rate_hw = &rate_##_name.hw, \
241 .is_double_div = true, \
244 /* NB periph clocks */
245 PERIPH_CLK_FULL_DD(mmc
, 2, 0, DIV_SEL2
, DIV_SEL2
, 16, 13);
246 PERIPH_CLK_FULL_DD(sata_host
, 3, 2, DIV_SEL2
, DIV_SEL2
, 10, 7);
247 PERIPH_CLK_FULL_DD(sec_at
, 6, 4, DIV_SEL1
, DIV_SEL1
, 3, 0);
248 PERIPH_CLK_FULL_DD(sec_dap
, 7, 6, DIV_SEL1
, DIV_SEL1
, 9, 6);
249 PERIPH_CLK_FULL_DD(tscem
, 8, 8, DIV_SEL1
, DIV_SEL1
, 15, 12);
250 PERIPH_CLK_FULL(tscem_tmx
, 10, 10, DIV_SEL1
, 18, clk_table6
);
251 static PERIPH_GATE(avs
, 11);
252 PERIPH_CLK_FULL_DD(pwm
, 13, 14, DIV_SEL0
, DIV_SEL0
, 3, 0);
253 PERIPH_CLK_FULL_DD(sqf
, 12, 12, DIV_SEL1
, DIV_SEL1
, 27, 24);
254 static PERIPH_GATE(i2c_2
, 16);
255 static PERIPH_GATE(i2c_1
, 17);
256 PERIPH_CLK_GATE_DIV(ddr_phy
, 19, DIV_SEL0
, 18, clk_table2
);
257 PERIPH_CLK_FULL_DD(ddr_fclk
, 21, 16, DIV_SEL0
, DIV_SEL0
, 15, 12);
258 PERIPH_CLK_FULL(trace
, 22, 18, DIV_SEL0
, 20, clk_table6
);
259 PERIPH_CLK_FULL(counter
, 23, 20, DIV_SEL0
, 23, clk_table6
);
260 PERIPH_CLK_FULL_DD(eip97
, 24, 24, DIV_SEL2
, DIV_SEL2
, 22, 19);
261 static PERIPH_PM_CPU(cpu
, 22, DIV_SEL0
, 28);
263 static struct clk_periph_data data_nb
[] = {
264 REF_CLK_FULL_DD(mmc
),
265 REF_CLK_FULL_DD(sata_host
),
266 REF_CLK_FULL_DD(sec_at
),
267 REF_CLK_FULL_DD(sec_dap
),
268 REF_CLK_FULL_DD(tscem
),
269 REF_CLK_FULL(tscem_tmx
),
270 REF_CLK_GATE(avs
, "xtal"),
271 REF_CLK_FULL_DD(sqf
),
272 REF_CLK_FULL_DD(pwm
),
273 REF_CLK_GATE(i2c_2
, "xtal"),
274 REF_CLK_GATE(i2c_1
, "xtal"),
275 REF_CLK_GATE_DIV(ddr_phy
, "TBG-A-S"),
276 REF_CLK_FULL_DD(ddr_fclk
),
278 REF_CLK_FULL(counter
),
279 REF_CLK_FULL_DD(eip97
),
284 /* SB periph clocks */
285 PERIPH_CLK_MUX_DD(gbe_50
, 6, DIV_SEL2
, DIV_SEL2
, 6, 9);
286 PERIPH_CLK_MUX_DD(gbe_core
, 8, DIV_SEL1
, DIV_SEL1
, 18, 21);
287 PERIPH_CLK_MUX_DD(gbe_125
, 10, DIV_SEL1
, DIV_SEL1
, 6, 9);
288 static PERIPH_GATE(gbe1_50
, 0);
289 static PERIPH_GATE(gbe0_50
, 1);
290 static PERIPH_GATE(gbe1_125
, 2);
291 static PERIPH_GATE(gbe0_125
, 3);
292 PERIPH_CLK_GATE_DIV(gbe1_core
, 4, DIV_SEL1
, 13, clk_table1
);
293 PERIPH_CLK_GATE_DIV(gbe0_core
, 5, DIV_SEL1
, 14, clk_table1
);
294 PERIPH_CLK_GATE_DIV(gbe_bm
, 12, DIV_SEL1
, 0, clk_table1
);
295 PERIPH_CLK_FULL_DD(sdio
, 11, 14, DIV_SEL0
, DIV_SEL0
, 3, 6);
296 PERIPH_CLK_FULL_DD(usb32_usb2_sys
, 16, 16, DIV_SEL0
, DIV_SEL0
, 9, 12);
297 PERIPH_CLK_FULL_DD(usb32_ss_sys
, 17, 18, DIV_SEL0
, DIV_SEL0
, 15, 18);
299 static struct clk_periph_data data_sb
[] = {
300 REF_CLK_MUX_DD(gbe_50
),
301 REF_CLK_MUX_DD(gbe_core
),
302 REF_CLK_MUX_DD(gbe_125
),
303 REF_CLK_GATE(gbe1_50
, "gbe_50"),
304 REF_CLK_GATE(gbe0_50
, "gbe_50"),
305 REF_CLK_GATE(gbe1_125
, "gbe_125"),
306 REF_CLK_GATE(gbe0_125
, "gbe_125"),
307 REF_CLK_GATE_DIV(gbe1_core
, "gbe_core"),
308 REF_CLK_GATE_DIV(gbe0_core
, "gbe_core"),
309 REF_CLK_GATE_DIV(gbe_bm
, "gbe_core"),
310 REF_CLK_FULL_DD(sdio
),
311 REF_CLK_FULL_DD(usb32_usb2_sys
),
312 REF_CLK_FULL_DD(usb32_ss_sys
),
316 static unsigned int get_div(void __iomem
*reg
, int shift
)
320 val
= (readl(reg
) >> shift
) & 0x7;
326 static unsigned long clk_double_div_recalc_rate(struct clk_hw
*hw
,
327 unsigned long parent_rate
)
329 struct clk_double_div
*double_div
= to_clk_double_div(hw
);
332 div
= get_div(double_div
->reg1
, double_div
->shift1
);
333 div
*= get_div(double_div
->reg2
, double_div
->shift2
);
335 return DIV_ROUND_UP_ULL((u64
)parent_rate
, div
);
338 static const struct clk_ops clk_double_div_ops
= {
339 .recalc_rate
= clk_double_div_recalc_rate
,
342 static void armada_3700_pm_dvfs_update_regs(unsigned int load_level
,
344 unsigned int *offset
)
346 if (load_level
<= ARMADA_37XX_DVFS_LOAD_1
)
347 *reg
= ARMADA_37XX_NB_L0L1
;
349 *reg
= ARMADA_37XX_NB_L2L3
;
351 if (load_level
== ARMADA_37XX_DVFS_LOAD_0
||
352 load_level
== ARMADA_37XX_DVFS_LOAD_2
)
353 *offset
+= ARMADA_37XX_NB_CONFIG_SHIFT
;
356 static bool armada_3700_pm_dvfs_is_enabled(struct regmap
*base
)
358 unsigned int val
, reg
= ARMADA_37XX_NB_DYN_MOD
;
363 regmap_read(base
, reg
, &val
);
365 return !!(val
& BIT(ARMADA_37XX_NB_DFS_EN
));
368 static unsigned int armada_3700_pm_dvfs_get_cpu_div(struct regmap
*base
)
370 unsigned int reg
= ARMADA_37XX_NB_CPU_LOAD
;
371 unsigned int offset
= ARMADA_37XX_NB_TBG_DIV_OFF
;
372 unsigned int load_level
, div
;
375 * This function is always called after the function
376 * armada_3700_pm_dvfs_is_enabled, so no need to check again
377 * if the base is valid.
379 regmap_read(base
, reg
, &load_level
);
382 * The register and the offset inside this register accessed to
383 * read the current divider depend on the load level
385 load_level
&= ARMADA_37XX_NB_CPU_LOAD_MASK
;
386 armada_3700_pm_dvfs_update_regs(load_level
, ®
, &offset
);
388 regmap_read(base
, reg
, &div
);
390 return (div
>> offset
) & ARMADA_37XX_NB_TBG_DIV_MASK
;
393 static unsigned int armada_3700_pm_dvfs_get_cpu_parent(struct regmap
*base
)
395 unsigned int reg
= ARMADA_37XX_NB_CPU_LOAD
;
396 unsigned int offset
= ARMADA_37XX_NB_TBG_SEL_OFF
;
397 unsigned int load_level
, sel
;
400 * This function is always called after the function
401 * armada_3700_pm_dvfs_is_enabled, so no need to check again
402 * if the base is valid
404 regmap_read(base
, reg
, &load_level
);
407 * The register and the offset inside this register accessed to
408 * read the current divider depend on the load level
410 load_level
&= ARMADA_37XX_NB_CPU_LOAD_MASK
;
411 armada_3700_pm_dvfs_update_regs(load_level
, ®
, &offset
);
413 regmap_read(base
, reg
, &sel
);
415 return (sel
>> offset
) & ARMADA_37XX_NB_TBG_SEL_MASK
;
418 static u8
clk_pm_cpu_get_parent(struct clk_hw
*hw
)
420 struct clk_pm_cpu
*pm_cpu
= to_clk_pm_cpu(hw
);
421 int num_parents
= clk_hw_get_num_parents(hw
);
424 if (armada_3700_pm_dvfs_is_enabled(pm_cpu
->nb_pm_base
)) {
425 val
= armada_3700_pm_dvfs_get_cpu_parent(pm_cpu
->nb_pm_base
);
427 val
= readl(pm_cpu
->reg_mux
) >> pm_cpu
->shift_mux
;
428 val
&= pm_cpu
->mask_mux
;
431 if (val
>= num_parents
)
437 static int clk_pm_cpu_set_parent(struct clk_hw
*hw
, u8 index
)
439 struct clk_pm_cpu
*pm_cpu
= to_clk_pm_cpu(hw
);
440 struct regmap
*base
= pm_cpu
->nb_pm_base
;
444 * We set the clock parent only if the DVFS is available but
447 if (IS_ERR(base
) || armada_3700_pm_dvfs_is_enabled(base
))
450 /* Set the parent clock for all the load level */
451 for (load_level
= 0; load_level
< LOAD_LEVEL_NR
; load_level
++) {
452 unsigned int reg
, mask
, val
,
453 offset
= ARMADA_37XX_NB_TBG_SEL_OFF
;
455 armada_3700_pm_dvfs_update_regs(load_level
, ®
, &offset
);
457 val
= index
<< offset
;
458 mask
= ARMADA_37XX_NB_TBG_SEL_MASK
<< offset
;
459 regmap_update_bits(base
, reg
, mask
, val
);
464 static unsigned long clk_pm_cpu_recalc_rate(struct clk_hw
*hw
,
465 unsigned long parent_rate
)
467 struct clk_pm_cpu
*pm_cpu
= to_clk_pm_cpu(hw
);
470 if (armada_3700_pm_dvfs_is_enabled(pm_cpu
->nb_pm_base
))
471 div
= armada_3700_pm_dvfs_get_cpu_div(pm_cpu
->nb_pm_base
);
473 div
= get_div(pm_cpu
->reg_div
, pm_cpu
->shift_div
);
474 return DIV_ROUND_UP_ULL((u64
)parent_rate
, div
);
477 static long clk_pm_cpu_round_rate(struct clk_hw
*hw
, unsigned long rate
,
478 unsigned long *parent_rate
)
480 struct clk_pm_cpu
*pm_cpu
= to_clk_pm_cpu(hw
);
481 struct regmap
*base
= pm_cpu
->nb_pm_base
;
482 unsigned int div
= *parent_rate
/ rate
;
483 unsigned int load_level
;
484 /* only available when DVFS is enabled */
485 if (!armada_3700_pm_dvfs_is_enabled(base
))
488 for (load_level
= 0; load_level
< LOAD_LEVEL_NR
; load_level
++) {
489 unsigned int reg
, val
, offset
= ARMADA_37XX_NB_TBG_DIV_OFF
;
491 armada_3700_pm_dvfs_update_regs(load_level
, ®
, &offset
);
493 regmap_read(base
, reg
, &val
);
496 val
&= ARMADA_37XX_NB_TBG_DIV_MASK
;
499 * We found a load level matching the target
500 * divider, switch to this load level and
503 return *parent_rate
/ div
;
506 /* We didn't find any valid divider */
510 static int clk_pm_cpu_set_rate(struct clk_hw
*hw
, unsigned long rate
,
511 unsigned long parent_rate
)
513 struct clk_pm_cpu
*pm_cpu
= to_clk_pm_cpu(hw
);
514 struct regmap
*base
= pm_cpu
->nb_pm_base
;
515 unsigned int div
= parent_rate
/ rate
;
516 unsigned int load_level
;
518 /* only available when DVFS is enabled */
519 if (!armada_3700_pm_dvfs_is_enabled(base
))
522 for (load_level
= 0; load_level
< LOAD_LEVEL_NR
; load_level
++) {
523 unsigned int reg
, mask
, val
,
524 offset
= ARMADA_37XX_NB_TBG_DIV_OFF
;
526 armada_3700_pm_dvfs_update_regs(load_level
, ®
, &offset
);
528 regmap_read(base
, reg
, &val
);
530 val
&= ARMADA_37XX_NB_TBG_DIV_MASK
;
534 * We found a load level matching the target
535 * divider, switch to this load level and
538 reg
= ARMADA_37XX_NB_CPU_LOAD
;
539 mask
= ARMADA_37XX_NB_CPU_LOAD_MASK
;
540 regmap_update_bits(base
, reg
, mask
, load_level
);
546 /* We didn't find any valid divider */
550 static const struct clk_ops clk_pm_cpu_ops
= {
551 .get_parent
= clk_pm_cpu_get_parent
,
552 .set_parent
= clk_pm_cpu_set_parent
,
553 .round_rate
= clk_pm_cpu_round_rate
,
554 .set_rate
= clk_pm_cpu_set_rate
,
555 .recalc_rate
= clk_pm_cpu_recalc_rate
,
558 static const struct of_device_id armada_3700_periph_clock_of_match
[] = {
559 { .compatible
= "marvell,armada-3700-periph-clock-nb",
561 { .compatible
= "marvell,armada-3700-periph-clock-sb",
566 static int armada_3700_add_composite_clk(const struct clk_periph_data
*data
,
567 void __iomem
*reg
, spinlock_t
*lock
,
568 struct device
*dev
, struct clk_hw
**hw
)
570 const struct clk_ops
*mux_ops
= NULL
, *gate_ops
= NULL
,
572 struct clk_hw
*mux_hw
= NULL
, *gate_hw
= NULL
, *rate_hw
= NULL
;
577 mux_hw
= data
->mux_hw
;
578 mux
= to_clk_mux(mux_hw
);
580 mux_ops
= mux_hw
->init
->ops
;
581 mux
->reg
= reg
+ (u64
)mux
->reg
;
585 struct clk_gate
*gate
;
587 gate_hw
= data
->gate_hw
;
588 gate
= to_clk_gate(gate_hw
);
590 gate_ops
= gate_hw
->init
->ops
;
591 gate
->reg
= reg
+ (u64
)gate
->reg
;
592 gate
->flags
= CLK_GATE_SET_TO_DISABLE
;
596 rate_hw
= data
->rate_hw
;
597 rate_ops
= rate_hw
->init
->ops
;
598 if (data
->is_double_div
) {
599 struct clk_double_div
*rate
;
601 rate
= to_clk_double_div(rate_hw
);
602 rate
->reg1
= reg
+ (u64
)rate
->reg1
;
603 rate
->reg2
= reg
+ (u64
)rate
->reg2
;
605 struct clk_divider
*rate
= to_clk_divider(rate_hw
);
606 const struct clk_div_table
*clkt
;
609 rate
->reg
= reg
+ (u64
)rate
->reg
;
610 for (clkt
= rate
->table
; clkt
->div
; clkt
++)
612 rate
->width
= order_base_2(table_size
);
617 if (data
->muxrate_hw
) {
618 struct clk_pm_cpu
*pmcpu_clk
;
619 struct clk_hw
*muxrate_hw
= data
->muxrate_hw
;
622 pmcpu_clk
= to_clk_pm_cpu(muxrate_hw
);
623 pmcpu_clk
->reg_mux
= reg
+ (u64
)pmcpu_clk
->reg_mux
;
624 pmcpu_clk
->reg_div
= reg
+ (u64
)pmcpu_clk
->reg_div
;
627 rate_hw
= muxrate_hw
;
628 mux_ops
= muxrate_hw
->init
->ops
;
629 rate_ops
= muxrate_hw
->init
->ops
;
631 map
= syscon_regmap_lookup_by_compatible(
632 "marvell,armada-3700-nb-pm");
633 pmcpu_clk
->nb_pm_base
= map
;
636 *hw
= clk_hw_register_composite(dev
, data
->name
, data
->parent_names
,
637 data
->num_parents
, mux_hw
,
638 mux_ops
, rate_hw
, rate_ops
,
639 gate_hw
, gate_ops
, CLK_IGNORE_UNUSED
);
641 return PTR_ERR_OR_ZERO(*hw
);
644 static int armada_3700_periph_clock_probe(struct platform_device
*pdev
)
646 struct clk_periph_driver_data
*driver_data
;
647 struct device_node
*np
= pdev
->dev
.of_node
;
648 const struct clk_periph_data
*data
;
649 struct device
*dev
= &pdev
->dev
;
650 int num_periph
= 0, i
, ret
;
651 struct resource
*res
;
654 data
= of_device_get_match_data(dev
);
658 while (data
[num_periph
].name
)
661 res
= platform_get_resource(pdev
, IORESOURCE_MEM
, 0);
662 reg
= devm_ioremap_resource(dev
, res
);
666 driver_data
= devm_kzalloc(dev
, sizeof(*driver_data
), GFP_KERNEL
);
670 driver_data
->hw_data
= devm_kzalloc(dev
, sizeof(*driver_data
->hw_data
) +
671 sizeof(*driver_data
->hw_data
->hws
) * num_periph
,
673 if (!driver_data
->hw_data
)
675 driver_data
->hw_data
->num
= num_periph
;
677 spin_lock_init(&driver_data
->lock
);
679 for (i
= 0; i
< num_periph
; i
++) {
680 struct clk_hw
**hw
= &driver_data
->hw_data
->hws
[i
];
682 if (armada_3700_add_composite_clk(&data
[i
], reg
,
683 &driver_data
->lock
, dev
, hw
))
684 dev_err(dev
, "Can't register periph clock %s\n",
688 ret
= of_clk_add_hw_provider(np
, of_clk_hw_onecell_get
,
689 driver_data
->hw_data
);
691 for (i
= 0; i
< num_periph
; i
++)
692 clk_hw_unregister(driver_data
->hw_data
->hws
[i
]);
696 platform_set_drvdata(pdev
, driver_data
);
700 static int armada_3700_periph_clock_remove(struct platform_device
*pdev
)
702 struct clk_periph_driver_data
*data
= platform_get_drvdata(pdev
);
703 struct clk_hw_onecell_data
*hw_data
= data
->hw_data
;
706 of_clk_del_provider(pdev
->dev
.of_node
);
708 for (i
= 0; i
< hw_data
->num
; i
++)
709 clk_hw_unregister(hw_data
->hws
[i
]);
714 static struct platform_driver armada_3700_periph_clock_driver
= {
715 .probe
= armada_3700_periph_clock_probe
,
716 .remove
= armada_3700_periph_clock_remove
,
718 .name
= "marvell-armada-3700-periph-clock",
719 .of_match_table
= armada_3700_periph_clock_of_match
,
723 builtin_platform_driver(armada_3700_periph_clock_driver
);