1 // SPDX-License-Identifier: GPL-2.0-only
3 * mmp mix(div and mux) clock operation source file
5 * Copyright (C) 2014 Marvell
6 * Chao Xie <chao.xie@marvell.com>
9 #include <linux/clk-provider.h>
10 #include <linux/slab.h>
12 #include <linux/err.h>
17 * The mix clock is a clock combined mux and div type clock.
18 * Because the div field and mux field need to be set at same
19 * time, we can not divide it into 2 types of clock
22 #define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
24 static unsigned int _get_maxdiv(struct mmp_clk_mix
*mix
)
26 unsigned int div_mask
= (1 << mix
->reg_info
.width_div
) - 1;
27 unsigned int maxdiv
= 0;
28 struct clk_div_table
*clkt
;
30 if (mix
->div_flags
& CLK_DIVIDER_ONE_BASED
)
32 if (mix
->div_flags
& CLK_DIVIDER_POWER_OF_TWO
)
35 for (clkt
= mix
->div_table
; clkt
->div
; clkt
++)
36 if (clkt
->div
> maxdiv
)
43 static unsigned int _get_div(struct mmp_clk_mix
*mix
, unsigned int val
)
45 struct clk_div_table
*clkt
;
47 if (mix
->div_flags
& CLK_DIVIDER_ONE_BASED
)
49 if (mix
->div_flags
& CLK_DIVIDER_POWER_OF_TWO
)
52 for (clkt
= mix
->div_table
; clkt
->div
; clkt
++)
61 static unsigned int _get_mux(struct mmp_clk_mix
*mix
, unsigned int val
)
63 int num_parents
= clk_hw_get_num_parents(&mix
->hw
);
66 if (mix
->mux_flags
& CLK_MUX_INDEX_BIT
)
68 if (mix
->mux_flags
& CLK_MUX_INDEX_ONE
)
71 for (i
= 0; i
< num_parents
; i
++)
72 if (mix
->mux_table
[i
] == val
)
80 static unsigned int _get_div_val(struct mmp_clk_mix
*mix
, unsigned int div
)
82 struct clk_div_table
*clkt
;
84 if (mix
->div_flags
& CLK_DIVIDER_ONE_BASED
)
86 if (mix
->div_flags
& CLK_DIVIDER_POWER_OF_TWO
)
89 for (clkt
= mix
->div_table
; clkt
->div
; clkt
++)
99 static unsigned int _get_mux_val(struct mmp_clk_mix
*mix
, unsigned int mux
)
102 return mix
->mux_table
[mux
];
107 static void _filter_clk_table(struct mmp_clk_mix
*mix
,
108 struct mmp_clk_mix_clk_table
*table
,
109 unsigned int table_size
)
112 struct mmp_clk_mix_clk_table
*item
;
113 struct clk_hw
*parent
, *hw
;
114 unsigned long parent_rate
;
118 for (i
= 0; i
< table_size
; i
++) {
120 parent
= clk_hw_get_parent_by_index(hw
, item
->parent_index
);
121 parent_rate
= clk_hw_get_rate(parent
);
122 if (parent_rate
% item
->rate
) {
125 item
->divisor
= parent_rate
/ item
->rate
;
131 static int _set_rate(struct mmp_clk_mix
*mix
, u32 mux_val
, u32 div_val
,
132 unsigned int change_mux
, unsigned int change_div
)
134 struct mmp_clk_mix_reg_info
*ri
= &mix
->reg_info
;
137 int ret
, timeout
= 50;
138 unsigned long flags
= 0;
140 if (!change_mux
&& !change_div
)
144 spin_lock_irqsave(mix
->lock
, flags
);
146 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
147 || mix
->type
== MMP_CLK_MIX_TYPE_V2
)
148 mux_div
= readl(ri
->reg_clk_ctrl
);
150 mux_div
= readl(ri
->reg_clk_sel
);
153 width
= ri
->width_div
;
154 shift
= ri
->shift_div
;
155 mux_div
&= ~MMP_CLK_BITS_MASK(width
, shift
);
156 mux_div
|= MMP_CLK_BITS_SET_VAL(div_val
, width
, shift
);
160 width
= ri
->width_mux
;
161 shift
= ri
->shift_mux
;
162 mux_div
&= ~MMP_CLK_BITS_MASK(width
, shift
);
163 mux_div
|= MMP_CLK_BITS_SET_VAL(mux_val
, width
, shift
);
166 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
) {
167 writel(mux_div
, ri
->reg_clk_ctrl
);
168 } else if (mix
->type
== MMP_CLK_MIX_TYPE_V2
) {
169 mux_div
|= (1 << ri
->bit_fc
);
170 writel(mux_div
, ri
->reg_clk_ctrl
);
173 fc_req
= readl(ri
->reg_clk_ctrl
);
175 if (!(fc_req
& (1 << ri
->bit_fc
)))
180 pr_err("%s:%s cannot do frequency change\n",
181 __func__
, clk_hw_get_name(&mix
->hw
));
186 fc_req
= readl(ri
->reg_clk_ctrl
);
187 fc_req
|= 1 << ri
->bit_fc
;
188 writel(fc_req
, ri
->reg_clk_ctrl
);
189 writel(mux_div
, ri
->reg_clk_sel
);
190 fc_req
&= ~(1 << ri
->bit_fc
);
196 spin_unlock_irqrestore(mix
->lock
, flags
);
201 static int mmp_clk_mix_determine_rate(struct clk_hw
*hw
,
202 struct clk_rate_request
*req
)
204 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
205 struct mmp_clk_mix_clk_table
*item
;
206 struct clk_hw
*parent
, *parent_best
;
207 unsigned long parent_rate
, mix_rate
, mix_rate_best
, parent_rate_best
;
208 unsigned long gap
, gap_best
;
215 parent_rate_best
= 0;
216 gap_best
= ULONG_MAX
;
220 for (i
= 0; i
< mix
->table_size
; i
++) {
221 item
= &mix
->table
[i
];
222 if (item
->valid
== 0)
224 parent
= clk_hw_get_parent_by_index(hw
,
226 parent_rate
= clk_hw_get_rate(parent
);
227 mix_rate
= parent_rate
/ item
->divisor
;
228 gap
= abs(mix_rate
- req
->rate
);
229 if (!parent_best
|| gap
< gap_best
) {
230 parent_best
= parent
;
231 parent_rate_best
= parent_rate
;
232 mix_rate_best
= mix_rate
;
239 for (i
= 0; i
< clk_hw_get_num_parents(hw
); i
++) {
240 parent
= clk_hw_get_parent_by_index(hw
, i
);
241 parent_rate
= clk_hw_get_rate(parent
);
242 div_val_max
= _get_maxdiv(mix
);
243 for (j
= 0; j
< div_val_max
; j
++) {
244 div
= _get_div(mix
, j
);
245 mix_rate
= parent_rate
/ div
;
246 gap
= abs(mix_rate
- req
->rate
);
247 if (!parent_best
|| gap
< gap_best
) {
248 parent_best
= parent
;
249 parent_rate_best
= parent_rate
;
250 mix_rate_best
= mix_rate
;
263 req
->best_parent_rate
= parent_rate_best
;
264 req
->best_parent_hw
= parent_best
;
265 req
->rate
= mix_rate_best
;
270 static int mmp_clk_mix_set_rate_and_parent(struct clk_hw
*hw
,
272 unsigned long parent_rate
,
275 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
277 u32 div_val
, mux_val
;
279 div
= parent_rate
/ rate
;
280 div_val
= _get_div_val(mix
, div
);
281 mux_val
= _get_mux_val(mix
, index
);
283 return _set_rate(mix
, mux_val
, div_val
, 1, 1);
286 static u8
mmp_clk_mix_get_parent(struct clk_hw
*hw
)
288 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
289 struct mmp_clk_mix_reg_info
*ri
= &mix
->reg_info
;
290 unsigned long flags
= 0;
296 spin_lock_irqsave(mix
->lock
, flags
);
298 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
299 || mix
->type
== MMP_CLK_MIX_TYPE_V2
)
300 mux_div
= readl(ri
->reg_clk_ctrl
);
302 mux_div
= readl(ri
->reg_clk_sel
);
305 spin_unlock_irqrestore(mix
->lock
, flags
);
307 width
= mix
->reg_info
.width_mux
;
308 shift
= mix
->reg_info
.shift_mux
;
310 mux_val
= MMP_CLK_BITS_GET_VAL(mux_div
, width
, shift
);
312 return _get_mux(mix
, mux_val
);
315 static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw
*hw
,
316 unsigned long parent_rate
)
318 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
319 struct mmp_clk_mix_reg_info
*ri
= &mix
->reg_info
;
320 unsigned long flags
= 0;
326 spin_lock_irqsave(mix
->lock
, flags
);
328 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
329 || mix
->type
== MMP_CLK_MIX_TYPE_V2
)
330 mux_div
= readl(ri
->reg_clk_ctrl
);
332 mux_div
= readl(ri
->reg_clk_sel
);
335 spin_unlock_irqrestore(mix
->lock
, flags
);
337 width
= mix
->reg_info
.width_div
;
338 shift
= mix
->reg_info
.shift_div
;
340 div
= _get_div(mix
, MMP_CLK_BITS_GET_VAL(mux_div
, width
, shift
));
342 return parent_rate
/ div
;
345 static int mmp_clk_set_parent(struct clk_hw
*hw
, u8 index
)
347 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
348 struct mmp_clk_mix_clk_table
*item
;
350 u32 div_val
, mux_val
;
353 for (i
= 0; i
< mix
->table_size
; i
++) {
354 item
= &mix
->table
[i
];
355 if (item
->valid
== 0)
357 if (item
->parent_index
== index
)
360 if (i
< mix
->table_size
) {
361 div_val
= _get_div_val(mix
, item
->divisor
);
362 mux_val
= _get_mux_val(mix
, item
->parent_index
);
366 mux_val
= _get_mux_val(mix
, index
);
370 return _set_rate(mix
, mux_val
, div_val
, 1, div_val
? 1 : 0);
373 static int mmp_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
374 unsigned long best_parent_rate
)
376 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
377 struct mmp_clk_mix_clk_table
*item
;
378 unsigned long parent_rate
;
379 unsigned int best_divisor
;
380 struct clk_hw
*parent
;
383 best_divisor
= best_parent_rate
/ rate
;
386 for (i
= 0; i
< mix
->table_size
; i
++) {
387 item
= &mix
->table
[i
];
388 if (item
->valid
== 0)
390 parent
= clk_hw_get_parent_by_index(hw
,
392 parent_rate
= clk_hw_get_rate(parent
);
393 if (parent_rate
== best_parent_rate
394 && item
->divisor
== best_divisor
)
397 if (i
< mix
->table_size
)
398 return _set_rate(mix
,
399 _get_mux_val(mix
, item
->parent_index
),
400 _get_div_val(mix
, item
->divisor
),
405 for (i
= 0; i
< clk_hw_get_num_parents(hw
); i
++) {
406 parent
= clk_hw_get_parent_by_index(hw
, i
);
407 parent_rate
= clk_hw_get_rate(parent
);
408 if (parent_rate
== best_parent_rate
)
411 if (i
< clk_hw_get_num_parents(hw
))
412 return _set_rate(mix
, _get_mux_val(mix
, i
),
413 _get_div_val(mix
, best_divisor
), 1, 1);
419 static int mmp_clk_mix_init(struct clk_hw
*hw
)
421 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
424 _filter_clk_table(mix
, mix
->table
, mix
->table_size
);
429 const struct clk_ops mmp_clk_mix_ops
= {
430 .determine_rate
= mmp_clk_mix_determine_rate
,
431 .set_rate_and_parent
= mmp_clk_mix_set_rate_and_parent
,
432 .set_rate
= mmp_clk_set_rate
,
433 .set_parent
= mmp_clk_set_parent
,
434 .get_parent
= mmp_clk_mix_get_parent
,
435 .recalc_rate
= mmp_clk_mix_recalc_rate
,
436 .init
= mmp_clk_mix_init
,
439 struct clk
*mmp_clk_register_mix(struct device
*dev
,
441 const char * const *parent_names
,
444 struct mmp_clk_mix_config
*config
,
447 struct mmp_clk_mix
*mix
;
449 struct clk_init_data init
;
451 mix
= kzalloc(sizeof(*mix
), GFP_KERNEL
);
453 return ERR_PTR(-ENOMEM
);
456 init
.flags
= flags
| CLK_GET_RATE_NOCACHE
;
457 init
.parent_names
= parent_names
;
458 init
.num_parents
= num_parents
;
459 init
.ops
= &mmp_clk_mix_ops
;
461 memcpy(&mix
->reg_info
, &config
->reg_info
, sizeof(config
->reg_info
));
463 mix
->table
= kmemdup_array(config
->table
, config
->table_size
,
464 sizeof(*mix
->table
), GFP_KERNEL
);
468 mix
->table_size
= config
->table_size
;
471 if (config
->mux_table
) {
472 mix
->mux_table
= kmemdup_array(config
->mux_table
, num_parents
,
473 sizeof(*mix
->mux_table
), GFP_KERNEL
);
474 if (!mix
->mux_table
) {
480 mix
->div_flags
= config
->div_flags
;
481 mix
->mux_flags
= config
->mux_flags
;
483 mix
->hw
.init
= &init
;
485 if (config
->reg_info
.bit_fc
>= 32)
486 mix
->type
= MMP_CLK_MIX_TYPE_V1
;
487 else if (config
->reg_info
.reg_clk_sel
)
488 mix
->type
= MMP_CLK_MIX_TYPE_V3
;
490 mix
->type
= MMP_CLK_MIX_TYPE_V2
;
491 clk
= clk_register(dev
, &mix
->hw
);
494 kfree(mix
->mux_table
);
503 return ERR_PTR(-ENOMEM
);