2 * mmp mix(div and mux) clock operation source file
4 * Copyright (C) 2014 Marvell
5 * Chao Xie <chao.xie@marvell.com>
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
12 #include <linux/clk-provider.h>
13 #include <linux/slab.h>
15 #include <linux/err.h>
20 * The mix clock is a clock combined mux and div type clock.
21 * Because the div field and mux field need to be set at same
22 * time, we can not divide it into 2 types of clock
25 #define to_clk_mix(hw) container_of(hw, struct mmp_clk_mix, hw)
27 static unsigned int _get_maxdiv(struct mmp_clk_mix
*mix
)
29 unsigned int div_mask
= (1 << mix
->reg_info
.width_div
) - 1;
30 unsigned int maxdiv
= 0;
31 struct clk_div_table
*clkt
;
33 if (mix
->div_flags
& CLK_DIVIDER_ONE_BASED
)
35 if (mix
->div_flags
& CLK_DIVIDER_POWER_OF_TWO
)
38 for (clkt
= mix
->div_table
; clkt
->div
; clkt
++)
39 if (clkt
->div
> maxdiv
)
46 static unsigned int _get_div(struct mmp_clk_mix
*mix
, unsigned int val
)
48 struct clk_div_table
*clkt
;
50 if (mix
->div_flags
& CLK_DIVIDER_ONE_BASED
)
52 if (mix
->div_flags
& CLK_DIVIDER_POWER_OF_TWO
)
55 for (clkt
= mix
->div_table
; clkt
->div
; clkt
++)
64 static unsigned int _get_mux(struct mmp_clk_mix
*mix
, unsigned int val
)
66 int num_parents
= __clk_get_num_parents(mix
->hw
.clk
);
69 if (mix
->mux_flags
& CLK_MUX_INDEX_BIT
)
71 if (mix
->mux_flags
& CLK_MUX_INDEX_ONE
)
74 for (i
= 0; i
< num_parents
; i
++)
75 if (mix
->mux_table
[i
] == val
)
83 static unsigned int _get_div_val(struct mmp_clk_mix
*mix
, unsigned int div
)
85 struct clk_div_table
*clkt
;
87 if (mix
->div_flags
& CLK_DIVIDER_ONE_BASED
)
89 if (mix
->div_flags
& CLK_DIVIDER_POWER_OF_TWO
)
92 for (clkt
= mix
->div_table
; clkt
->div
; clkt
++)
102 static unsigned int _get_mux_val(struct mmp_clk_mix
*mix
, unsigned int mux
)
105 return mix
->mux_table
[mux
];
110 static void _filter_clk_table(struct mmp_clk_mix
*mix
,
111 struct mmp_clk_mix_clk_table
*table
,
112 unsigned int table_size
)
115 struct mmp_clk_mix_clk_table
*item
;
116 struct clk
*parent
, *clk
;
117 unsigned long parent_rate
;
121 for (i
= 0; i
< table_size
; i
++) {
123 parent
= clk_get_parent_by_index(clk
, item
->parent_index
);
124 parent_rate
= __clk_get_rate(parent
);
125 if (parent_rate
% item
->rate
) {
128 item
->divisor
= parent_rate
/ item
->rate
;
134 static int _set_rate(struct mmp_clk_mix
*mix
, u32 mux_val
, u32 div_val
,
135 unsigned int change_mux
, unsigned int change_div
)
137 struct mmp_clk_mix_reg_info
*ri
= &mix
->reg_info
;
140 int ret
, timeout
= 50;
141 unsigned long flags
= 0;
143 if (!change_mux
&& !change_div
)
147 spin_lock_irqsave(mix
->lock
, flags
);
149 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
150 || mix
->type
== MMP_CLK_MIX_TYPE_V2
)
151 mux_div
= readl(ri
->reg_clk_ctrl
);
153 mux_div
= readl(ri
->reg_clk_sel
);
156 width
= ri
->width_div
;
157 shift
= ri
->shift_div
;
158 mux_div
&= ~MMP_CLK_BITS_MASK(width
, shift
);
159 mux_div
|= MMP_CLK_BITS_SET_VAL(div_val
, width
, shift
);
163 width
= ri
->width_mux
;
164 shift
= ri
->shift_mux
;
165 mux_div
&= ~MMP_CLK_BITS_MASK(width
, shift
);
166 mux_div
|= MMP_CLK_BITS_SET_VAL(mux_val
, width
, shift
);
169 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
) {
170 writel(mux_div
, ri
->reg_clk_ctrl
);
171 } else if (mix
->type
== MMP_CLK_MIX_TYPE_V2
) {
172 mux_div
|= (1 << ri
->bit_fc
);
173 writel(mux_div
, ri
->reg_clk_ctrl
);
176 fc_req
= readl(ri
->reg_clk_ctrl
);
178 if (!(fc_req
& (1 << ri
->bit_fc
)))
183 pr_err("%s:%s cannot do frequency change\n",
184 __func__
, __clk_get_name(mix
->hw
.clk
));
189 fc_req
= readl(ri
->reg_clk_ctrl
);
190 fc_req
|= 1 << ri
->bit_fc
;
191 writel(fc_req
, ri
->reg_clk_ctrl
);
192 writel(mux_div
, ri
->reg_clk_sel
);
193 fc_req
&= ~(1 << ri
->bit_fc
);
199 spin_unlock_irqrestore(mix
->lock
, flags
);
204 static long mmp_clk_mix_determine_rate(struct clk_hw
*hw
, unsigned long rate
,
205 unsigned long min_rate
,
206 unsigned long max_rate
,
207 unsigned long *best_parent_rate
,
208 struct clk_hw
**best_parent_clk
)
210 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
211 struct mmp_clk_mix_clk_table
*item
;
212 struct clk
*parent
, *parent_best
, *mix_clk
;
213 unsigned long parent_rate
, mix_rate
, mix_rate_best
, parent_rate_best
;
214 unsigned long gap
, gap_best
;
223 parent_rate_best
= 0;
228 for (i
= 0; i
< mix
->table_size
; i
++) {
229 item
= &mix
->table
[i
];
230 if (item
->valid
== 0)
232 parent
= clk_get_parent_by_index(mix_clk
,
234 parent_rate
= __clk_get_rate(parent
);
235 mix_rate
= parent_rate
/ item
->divisor
;
236 gap
= abs(mix_rate
- rate
);
237 if (parent_best
== NULL
|| gap
< gap_best
) {
238 parent_best
= parent
;
239 parent_rate_best
= parent_rate
;
240 mix_rate_best
= mix_rate
;
247 for (i
= 0; i
< __clk_get_num_parents(mix_clk
); i
++) {
248 parent
= clk_get_parent_by_index(mix_clk
, i
);
249 parent_rate
= __clk_get_rate(parent
);
250 div_val_max
= _get_maxdiv(mix
);
251 for (j
= 0; j
< div_val_max
; j
++) {
252 div
= _get_div(mix
, j
);
253 mix_rate
= parent_rate
/ div
;
254 gap
= abs(mix_rate
- rate
);
255 if (parent_best
== NULL
|| gap
< gap_best
) {
256 parent_best
= parent
;
257 parent_rate_best
= parent_rate
;
258 mix_rate_best
= mix_rate
;
268 *best_parent_rate
= parent_rate_best
;
269 *best_parent_clk
= __clk_get_hw(parent_best
);
271 return mix_rate_best
;
274 static int mmp_clk_mix_set_rate_and_parent(struct clk_hw
*hw
,
276 unsigned long parent_rate
,
279 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
281 u32 div_val
, mux_val
;
283 div
= parent_rate
/ rate
;
284 div_val
= _get_div_val(mix
, div
);
285 mux_val
= _get_mux_val(mix
, index
);
287 return _set_rate(mix
, mux_val
, div_val
, 1, 1);
290 static u8
mmp_clk_mix_get_parent(struct clk_hw
*hw
)
292 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
293 struct mmp_clk_mix_reg_info
*ri
= &mix
->reg_info
;
294 unsigned long flags
= 0;
300 spin_lock_irqsave(mix
->lock
, flags
);
302 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
303 || mix
->type
== MMP_CLK_MIX_TYPE_V2
)
304 mux_div
= readl(ri
->reg_clk_ctrl
);
306 mux_div
= readl(ri
->reg_clk_sel
);
309 spin_unlock_irqrestore(mix
->lock
, flags
);
311 width
= mix
->reg_info
.width_mux
;
312 shift
= mix
->reg_info
.shift_mux
;
314 mux_val
= MMP_CLK_BITS_GET_VAL(mux_div
, width
, shift
);
316 return _get_mux(mix
, mux_val
);
319 static unsigned long mmp_clk_mix_recalc_rate(struct clk_hw
*hw
,
320 unsigned long parent_rate
)
322 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
323 struct mmp_clk_mix_reg_info
*ri
= &mix
->reg_info
;
324 unsigned long flags
= 0;
330 spin_lock_irqsave(mix
->lock
, flags
);
332 if (mix
->type
== MMP_CLK_MIX_TYPE_V1
333 || mix
->type
== MMP_CLK_MIX_TYPE_V2
)
334 mux_div
= readl(ri
->reg_clk_ctrl
);
336 mux_div
= readl(ri
->reg_clk_sel
);
339 spin_unlock_irqrestore(mix
->lock
, flags
);
341 width
= mix
->reg_info
.width_div
;
342 shift
= mix
->reg_info
.shift_div
;
344 div
= _get_div(mix
, MMP_CLK_BITS_GET_VAL(mux_div
, width
, shift
));
346 return parent_rate
/ div
;
349 static int mmp_clk_set_parent(struct clk_hw
*hw
, u8 index
)
351 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
352 struct mmp_clk_mix_clk_table
*item
;
354 u32 div_val
, mux_val
;
357 for (i
= 0; i
< mix
->table_size
; i
++) {
358 item
= &mix
->table
[i
];
359 if (item
->valid
== 0)
361 if (item
->parent_index
== index
)
364 if (i
< mix
->table_size
) {
365 div_val
= _get_div_val(mix
, item
->divisor
);
366 mux_val
= _get_mux_val(mix
, item
->parent_index
);
370 mux_val
= _get_mux_val(mix
, index
);
374 return _set_rate(mix
, mux_val
, div_val
, 1, div_val
? 1 : 0);
377 static int mmp_clk_set_rate(struct clk_hw
*hw
, unsigned long rate
,
378 unsigned long best_parent_rate
)
380 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
381 struct mmp_clk_mix_clk_table
*item
;
382 unsigned long parent_rate
;
383 unsigned int best_divisor
;
384 struct clk
*mix_clk
, *parent
;
387 best_divisor
= best_parent_rate
/ rate
;
391 for (i
= 0; i
< mix
->table_size
; i
++) {
392 item
= &mix
->table
[i
];
393 if (item
->valid
== 0)
395 parent
= clk_get_parent_by_index(mix_clk
,
397 parent_rate
= __clk_get_rate(parent
);
398 if (parent_rate
== best_parent_rate
399 && item
->divisor
== best_divisor
)
402 if (i
< mix
->table_size
)
403 return _set_rate(mix
,
404 _get_mux_val(mix
, item
->parent_index
),
405 _get_div_val(mix
, item
->divisor
),
410 for (i
= 0; i
< __clk_get_num_parents(mix_clk
); i
++) {
411 parent
= clk_get_parent_by_index(mix_clk
, i
);
412 parent_rate
= __clk_get_rate(parent
);
413 if (parent_rate
== best_parent_rate
)
416 if (i
< __clk_get_num_parents(mix_clk
))
417 return _set_rate(mix
, _get_mux_val(mix
, i
),
418 _get_div_val(mix
, best_divisor
), 1, 1);
424 static void mmp_clk_mix_init(struct clk_hw
*hw
)
426 struct mmp_clk_mix
*mix
= to_clk_mix(hw
);
429 _filter_clk_table(mix
, mix
->table
, mix
->table_size
);
432 const struct clk_ops mmp_clk_mix_ops
= {
433 .determine_rate
= mmp_clk_mix_determine_rate
,
434 .set_rate_and_parent
= mmp_clk_mix_set_rate_and_parent
,
435 .set_rate
= mmp_clk_set_rate
,
436 .set_parent
= mmp_clk_set_parent
,
437 .get_parent
= mmp_clk_mix_get_parent
,
438 .recalc_rate
= mmp_clk_mix_recalc_rate
,
439 .init
= mmp_clk_mix_init
,
442 struct clk
*mmp_clk_register_mix(struct device
*dev
,
444 const char **parent_names
,
447 struct mmp_clk_mix_config
*config
,
450 struct mmp_clk_mix
*mix
;
452 struct clk_init_data init
;
455 mix
= kzalloc(sizeof(*mix
), GFP_KERNEL
);
457 pr_err("%s:%s: could not allocate mmp mix clk\n",
459 return ERR_PTR(-ENOMEM
);
463 init
.flags
= flags
| CLK_GET_RATE_NOCACHE
;
464 init
.parent_names
= parent_names
;
465 init
.num_parents
= num_parents
;
466 init
.ops
= &mmp_clk_mix_ops
;
468 memcpy(&mix
->reg_info
, &config
->reg_info
, sizeof(config
->reg_info
));
470 table_bytes
= sizeof(*config
->table
) * config
->table_size
;
471 mix
->table
= kzalloc(table_bytes
, GFP_KERNEL
);
473 pr_err("%s:%s: could not allocate mmp mix table\n",
476 return ERR_PTR(-ENOMEM
);
478 memcpy(mix
->table
, config
->table
, table_bytes
);
479 mix
->table_size
= config
->table_size
;
482 if (config
->mux_table
) {
483 table_bytes
= sizeof(u32
) * num_parents
;
484 mix
->mux_table
= kzalloc(table_bytes
, GFP_KERNEL
);
485 if (!mix
->mux_table
) {
486 pr_err("%s:%s: could not allocate mmp mix mux-table\n",
490 return ERR_PTR(-ENOMEM
);
492 memcpy(mix
->mux_table
, config
->mux_table
, table_bytes
);
495 mix
->div_flags
= config
->div_flags
;
496 mix
->mux_flags
= config
->mux_flags
;
498 mix
->hw
.init
= &init
;
500 if (config
->reg_info
.bit_fc
>= 32)
501 mix
->type
= MMP_CLK_MIX_TYPE_V1
;
502 else if (config
->reg_info
.reg_clk_sel
)
503 mix
->type
= MMP_CLK_MIX_TYPE_V3
;
505 mix
->type
= MMP_CLK_MIX_TYPE_V2
;
506 clk
= clk_register(dev
, &mix
->hw
);
509 kfree(mix
->mux_table
);