1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020-2022 MaxLinear, Inc.
4 * Copyright (C) 2020 Intel Corporation.
5 * Zhu Yixin <yzhu@maxlinear.com>
6 * Rahul Tanwar <rtanwar@maxlinear.com>
8 #include <linux/clk-provider.h>
9 #include <linux/device.h>
14 #define GATE_HW_REG_STAT(reg) ((reg) + 0x0)
15 #define GATE_HW_REG_EN(reg) ((reg) + 0x4)
16 #define GATE_HW_REG_DIS(reg) ((reg) + 0x8)
17 #define MAX_DDIV_REG 8
18 #define MAX_DIVIDER_VAL 64
20 #define to_lgm_clk_mux(_hw) container_of(_hw, struct lgm_clk_mux, hw)
21 #define to_lgm_clk_divider(_hw) container_of(_hw, struct lgm_clk_divider, hw)
22 #define to_lgm_clk_gate(_hw) container_of(_hw, struct lgm_clk_gate, hw)
23 #define to_lgm_clk_ddiv(_hw) container_of(_hw, struct lgm_clk_ddiv, hw)
25 static struct clk_hw
*lgm_clk_register_fixed(struct lgm_clk_provider
*ctx
,
26 const struct lgm_clk_branch
*list
)
29 if (list
->div_flags
& CLOCK_FLAG_VAL_INIT
)
30 lgm_set_clk_val(ctx
->membase
, list
->div_off
, list
->div_shift
,
31 list
->div_width
, list
->div_val
);
33 return clk_hw_register_fixed_rate(NULL
, list
->name
,
34 list
->parent_data
[0].name
,
35 list
->flags
, list
->mux_flags
);
38 static u8
lgm_clk_mux_get_parent(struct clk_hw
*hw
)
40 struct lgm_clk_mux
*mux
= to_lgm_clk_mux(hw
);
43 if (mux
->flags
& MUX_CLK_SW
)
46 val
= lgm_get_clk_val(mux
->membase
, mux
->reg
, mux
->shift
,
48 return clk_mux_val_to_index(hw
, NULL
, mux
->flags
, val
);
51 static int lgm_clk_mux_set_parent(struct clk_hw
*hw
, u8 index
)
53 struct lgm_clk_mux
*mux
= to_lgm_clk_mux(hw
);
56 val
= clk_mux_index_to_val(NULL
, mux
->flags
, index
);
57 if (mux
->flags
& MUX_CLK_SW
)
60 lgm_set_clk_val(mux
->membase
, mux
->reg
, mux
->shift
,
66 static int lgm_clk_mux_determine_rate(struct clk_hw
*hw
,
67 struct clk_rate_request
*req
)
69 struct lgm_clk_mux
*mux
= to_lgm_clk_mux(hw
);
71 return clk_mux_determine_rate_flags(hw
, req
, mux
->flags
);
74 static const struct clk_ops lgm_clk_mux_ops
= {
75 .get_parent
= lgm_clk_mux_get_parent
,
76 .set_parent
= lgm_clk_mux_set_parent
,
77 .determine_rate
= lgm_clk_mux_determine_rate
,
80 static struct clk_hw
*
81 lgm_clk_register_mux(struct lgm_clk_provider
*ctx
,
82 const struct lgm_clk_branch
*list
)
84 unsigned long cflags
= list
->mux_flags
;
85 struct device
*dev
= ctx
->dev
;
86 u8 shift
= list
->mux_shift
;
87 u8 width
= list
->mux_width
;
88 struct clk_init_data init
= {};
89 struct lgm_clk_mux
*mux
;
90 u32 reg
= list
->mux_off
;
94 mux
= devm_kzalloc(dev
, sizeof(*mux
), GFP_KERNEL
);
96 return ERR_PTR(-ENOMEM
);
98 init
.name
= list
->name
;
99 init
.ops
= &lgm_clk_mux_ops
;
100 init
.flags
= list
->flags
;
101 init
.parent_data
= list
->parent_data
;
102 init
.num_parents
= list
->num_parents
;
104 mux
->membase
= ctx
->membase
;
109 mux
->hw
.init
= &init
;
112 ret
= devm_clk_hw_register(dev
, hw
);
116 if (cflags
& CLOCK_FLAG_VAL_INIT
)
117 lgm_set_clk_val(mux
->membase
, reg
, shift
, width
, list
->mux_val
);
123 lgm_clk_divider_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
125 struct lgm_clk_divider
*divider
= to_lgm_clk_divider(hw
);
128 val
= lgm_get_clk_val(divider
->membase
, divider
->reg
,
129 divider
->shift
, divider
->width
);
131 return divider_recalc_rate(hw
, parent_rate
, val
, divider
->table
,
132 divider
->flags
, divider
->width
);
136 lgm_clk_divider_round_rate(struct clk_hw
*hw
, unsigned long rate
,
137 unsigned long *prate
)
139 struct lgm_clk_divider
*divider
= to_lgm_clk_divider(hw
);
141 return divider_round_rate(hw
, rate
, prate
, divider
->table
,
142 divider
->width
, divider
->flags
);
146 lgm_clk_divider_set_rate(struct clk_hw
*hw
, unsigned long rate
,
149 struct lgm_clk_divider
*divider
= to_lgm_clk_divider(hw
);
152 value
= divider_get_val(rate
, prate
, divider
->table
,
153 divider
->width
, divider
->flags
);
157 lgm_set_clk_val(divider
->membase
, divider
->reg
,
158 divider
->shift
, divider
->width
, value
);
163 static int lgm_clk_divider_enable_disable(struct clk_hw
*hw
, int enable
)
165 struct lgm_clk_divider
*div
= to_lgm_clk_divider(hw
);
167 if (div
->flags
!= DIV_CLK_NO_MASK
)
168 lgm_set_clk_val(div
->membase
, div
->reg
, div
->shift_gate
,
169 div
->width_gate
, enable
);
173 static int lgm_clk_divider_enable(struct clk_hw
*hw
)
175 return lgm_clk_divider_enable_disable(hw
, 1);
178 static void lgm_clk_divider_disable(struct clk_hw
*hw
)
180 lgm_clk_divider_enable_disable(hw
, 0);
183 static const struct clk_ops lgm_clk_divider_ops
= {
184 .recalc_rate
= lgm_clk_divider_recalc_rate
,
185 .round_rate
= lgm_clk_divider_round_rate
,
186 .set_rate
= lgm_clk_divider_set_rate
,
187 .enable
= lgm_clk_divider_enable
,
188 .disable
= lgm_clk_divider_disable
,
191 static struct clk_hw
*
192 lgm_clk_register_divider(struct lgm_clk_provider
*ctx
,
193 const struct lgm_clk_branch
*list
)
195 unsigned long cflags
= list
->div_flags
;
196 struct device
*dev
= ctx
->dev
;
197 struct lgm_clk_divider
*div
;
198 struct clk_init_data init
= {};
199 u8 shift
= list
->div_shift
;
200 u8 width
= list
->div_width
;
201 u8 shift_gate
= list
->div_shift_gate
;
202 u8 width_gate
= list
->div_width_gate
;
203 u32 reg
= list
->div_off
;
207 div
= devm_kzalloc(dev
, sizeof(*div
), GFP_KERNEL
);
209 return ERR_PTR(-ENOMEM
);
211 init
.name
= list
->name
;
212 init
.ops
= &lgm_clk_divider_ops
;
213 init
.flags
= list
->flags
;
214 init
.parent_data
= list
->parent_data
;
215 init
.num_parents
= 1;
217 div
->membase
= ctx
->membase
;
221 div
->shift_gate
= shift_gate
;
222 div
->width_gate
= width_gate
;
224 div
->table
= list
->div_table
;
225 div
->hw
.init
= &init
;
228 ret
= devm_clk_hw_register(dev
, hw
);
232 if (cflags
& CLOCK_FLAG_VAL_INIT
)
233 lgm_set_clk_val(div
->membase
, reg
, shift
, width
, list
->div_val
);
238 static struct clk_hw
*
239 lgm_clk_register_fixed_factor(struct lgm_clk_provider
*ctx
,
240 const struct lgm_clk_branch
*list
)
244 hw
= clk_hw_register_fixed_factor(ctx
->dev
, list
->name
,
245 list
->parent_data
[0].name
, list
->flags
,
246 list
->mult
, list
->div
);
250 if (list
->div_flags
& CLOCK_FLAG_VAL_INIT
)
251 lgm_set_clk_val(ctx
->membase
, list
->div_off
, list
->div_shift
,
252 list
->div_width
, list
->div_val
);
257 static int lgm_clk_gate_enable(struct clk_hw
*hw
)
259 struct lgm_clk_gate
*gate
= to_lgm_clk_gate(hw
);
262 reg
= GATE_HW_REG_EN(gate
->reg
);
263 lgm_set_clk_val(gate
->membase
, reg
, gate
->shift
, 1, 1);
268 static void lgm_clk_gate_disable(struct clk_hw
*hw
)
270 struct lgm_clk_gate
*gate
= to_lgm_clk_gate(hw
);
273 reg
= GATE_HW_REG_DIS(gate
->reg
);
274 lgm_set_clk_val(gate
->membase
, reg
, gate
->shift
, 1, 1);
277 static int lgm_clk_gate_is_enabled(struct clk_hw
*hw
)
279 struct lgm_clk_gate
*gate
= to_lgm_clk_gate(hw
);
280 unsigned int reg
, ret
;
282 reg
= GATE_HW_REG_STAT(gate
->reg
);
283 ret
= lgm_get_clk_val(gate
->membase
, reg
, gate
->shift
, 1);
288 static const struct clk_ops lgm_clk_gate_ops
= {
289 .enable
= lgm_clk_gate_enable
,
290 .disable
= lgm_clk_gate_disable
,
291 .is_enabled
= lgm_clk_gate_is_enabled
,
294 static struct clk_hw
*
295 lgm_clk_register_gate(struct lgm_clk_provider
*ctx
,
296 const struct lgm_clk_branch
*list
)
298 unsigned long cflags
= list
->gate_flags
;
299 const char *pname
= list
->parent_data
[0].name
;
300 struct device
*dev
= ctx
->dev
;
301 u8 shift
= list
->gate_shift
;
302 struct clk_init_data init
= {};
303 struct lgm_clk_gate
*gate
;
304 u32 reg
= list
->gate_off
;
308 gate
= devm_kzalloc(dev
, sizeof(*gate
), GFP_KERNEL
);
310 return ERR_PTR(-ENOMEM
);
312 init
.name
= list
->name
;
313 init
.ops
= &lgm_clk_gate_ops
;
314 init
.flags
= list
->flags
;
315 init
.parent_names
= pname
? &pname
: NULL
;
316 init
.num_parents
= pname
? 1 : 0;
318 gate
->membase
= ctx
->membase
;
321 gate
->flags
= cflags
;
322 gate
->hw
.init
= &init
;
325 ret
= devm_clk_hw_register(dev
, hw
);
329 if (cflags
& CLOCK_FLAG_VAL_INIT
) {
330 lgm_set_clk_val(gate
->membase
, reg
, shift
, 1, list
->gate_val
);
336 int lgm_clk_register_branches(struct lgm_clk_provider
*ctx
,
337 const struct lgm_clk_branch
*list
,
343 for (idx
= 0; idx
< nr_clk
; idx
++, list
++) {
344 switch (list
->type
) {
346 hw
= lgm_clk_register_fixed(ctx
, list
);
349 hw
= lgm_clk_register_mux(ctx
, list
);
351 case CLK_TYPE_DIVIDER
:
352 hw
= lgm_clk_register_divider(ctx
, list
);
354 case CLK_TYPE_FIXED_FACTOR
:
355 hw
= lgm_clk_register_fixed_factor(ctx
, list
);
358 if (list
->gate_flags
& GATE_CLK_HW
) {
359 hw
= lgm_clk_register_gate(ctx
, list
);
362 * GATE_CLKs can be controlled either from
363 * CGU clk driver i.e. this driver or directly
364 * from power management driver/daemon. It is
365 * dependent on the power policy/profile requirements
366 * of the end product. To override control of gate
367 * clks from this driver, provide NULL for this index
368 * of gate clk provider.
375 dev_err(ctx
->dev
, "invalid clk type\n");
381 "register clk: %s, type: %u failed!\n",
382 list
->name
, list
->type
);
385 ctx
->clk_data
.hws
[list
->id
] = hw
;
392 lgm_clk_ddiv_recalc_rate(struct clk_hw
*hw
, unsigned long parent_rate
)
394 struct lgm_clk_ddiv
*ddiv
= to_lgm_clk_ddiv(hw
);
395 unsigned int div0
, div1
, exdiv
;
398 div0
= lgm_get_clk_val(ddiv
->membase
, ddiv
->reg
,
399 ddiv
->shift0
, ddiv
->width0
) + 1;
400 div1
= lgm_get_clk_val(ddiv
->membase
, ddiv
->reg
,
401 ddiv
->shift1
, ddiv
->width1
) + 1;
402 exdiv
= lgm_get_clk_val(ddiv
->membase
, ddiv
->reg
,
403 ddiv
->shift2
, ddiv
->width2
);
404 prate
= (u64
)parent_rate
;
409 do_div(prate
, ddiv
->div
);
416 static int lgm_clk_ddiv_enable(struct clk_hw
*hw
)
418 struct lgm_clk_ddiv
*ddiv
= to_lgm_clk_ddiv(hw
);
420 lgm_set_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift_gate
,
421 ddiv
->width_gate
, 1);
425 static void lgm_clk_ddiv_disable(struct clk_hw
*hw
)
427 struct lgm_clk_ddiv
*ddiv
= to_lgm_clk_ddiv(hw
);
429 lgm_set_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift_gate
,
430 ddiv
->width_gate
, 0);
434 lgm_clk_get_ddiv_val(u32 div
, u32
*ddiv1
, u32
*ddiv2
)
441 if (div
> MAX_DIVIDER_VAL
)
442 div
= MAX_DIVIDER_VAL
;
445 for (idx
= 2; idx
<= MAX_DDIV_REG
; idx
++) {
446 temp
= DIV_ROUND_UP_ULL((u64
)div
, idx
);
447 if (div
% idx
== 0 && temp
<= MAX_DDIV_REG
)
451 if (idx
> MAX_DDIV_REG
)
462 lgm_clk_ddiv_set_rate(struct clk_hw
*hw
, unsigned long rate
,
465 struct lgm_clk_ddiv
*ddiv
= to_lgm_clk_ddiv(hw
);
466 u32 div
, ddiv1
, ddiv2
;
468 div
= DIV_ROUND_CLOSEST_ULL((u64
)prate
, rate
);
470 if (lgm_get_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift2
, 1)) {
471 div
= DIV_ROUND_CLOSEST_ULL((u64
)div
, 5);
478 if (lgm_clk_get_ddiv_val(div
, &ddiv1
, &ddiv2
))
481 lgm_set_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift0
, ddiv
->width0
,
484 lgm_set_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift1
, ddiv
->width1
,
491 lgm_clk_ddiv_round_rate(struct clk_hw
*hw
, unsigned long rate
,
492 unsigned long *prate
)
494 struct lgm_clk_ddiv
*ddiv
= to_lgm_clk_ddiv(hw
);
495 u32 div
, ddiv1
, ddiv2
;
498 div
= DIV_ROUND_CLOSEST_ULL((u64
)*prate
, rate
);
500 /* if predivide bit is enabled, modify div by factor of 2.5 */
501 if (lgm_get_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift2
, 1)) {
503 div
= DIV_ROUND_CLOSEST_ULL((u64
)div
, 5);
509 if (lgm_clk_get_ddiv_val(div
, &ddiv1
, &ddiv2
) != 0)
510 if (lgm_clk_get_ddiv_val(div
+ 1, &ddiv1
, &ddiv2
) != 0)
514 do_div(rate64
, ddiv1
);
515 do_div(rate64
, ddiv2
);
517 /* if predivide bit is enabled, modify rounded rate by factor of 2.5 */
518 if (lgm_get_clk_val(ddiv
->membase
, ddiv
->reg
, ddiv
->shift2
, 1)) {
520 rate64
= DIV_ROUND_CLOSEST_ULL(rate64
, 5);
526 static const struct clk_ops lgm_clk_ddiv_ops
= {
527 .recalc_rate
= lgm_clk_ddiv_recalc_rate
,
528 .enable
= lgm_clk_ddiv_enable
,
529 .disable
= lgm_clk_ddiv_disable
,
530 .set_rate
= lgm_clk_ddiv_set_rate
,
531 .round_rate
= lgm_clk_ddiv_round_rate
,
534 int lgm_clk_register_ddiv(struct lgm_clk_provider
*ctx
,
535 const struct lgm_clk_ddiv_data
*list
,
538 struct device
*dev
= ctx
->dev
;
543 for (idx
= 0; idx
< nr_clk
; idx
++, list
++) {
544 struct clk_init_data init
= {};
545 struct lgm_clk_ddiv
*ddiv
;
547 ddiv
= devm_kzalloc(dev
, sizeof(*ddiv
), GFP_KERNEL
);
551 init
.name
= list
->name
;
552 init
.ops
= &lgm_clk_ddiv_ops
;
553 init
.flags
= list
->flags
;
554 init
.parent_data
= list
->parent_data
;
555 init
.num_parents
= 1;
557 ddiv
->membase
= ctx
->membase
;
558 ddiv
->reg
= list
->reg
;
559 ddiv
->shift0
= list
->shift0
;
560 ddiv
->width0
= list
->width0
;
561 ddiv
->shift1
= list
->shift1
;
562 ddiv
->width1
= list
->width1
;
563 ddiv
->shift_gate
= list
->shift_gate
;
564 ddiv
->width_gate
= list
->width_gate
;
565 ddiv
->shift2
= list
->ex_shift
;
566 ddiv
->width2
= list
->ex_width
;
567 ddiv
->flags
= list
->div_flags
;
570 ddiv
->hw
.init
= &init
;
573 ret
= devm_clk_hw_register(dev
, hw
);
575 dev_err(dev
, "register clk: %s failed!\n", list
->name
);
578 ctx
->clk_data
.hws
[list
->id
] = hw
;