1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014 MundoReader S.L.
4 * Author: Heiko Stuebner <heiko@sntech.de>
6 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
7 * Author: Xing Zheng <zhengxing@rock-chips.com>
12 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
13 * Copyright (c) 2013 Linaro Ltd.
14 * Author: Thomas Abraham <thomas.ab@samsung.com>
17 #include <linux/slab.h>
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/regmap.h>
23 #include <linux/reboot.h>
24 #include <linux/rational.h>
28 * Register a clock branch.
29 * Most clock branches have a form like
35 * sometimes without one of those components.
37 static struct clk
*rockchip_clk_register_branch(const char *name
,
38 const char *const *parent_names
, u8 num_parents
,
40 int muxdiv_offset
, u8 mux_shift
, u8 mux_width
, u8 mux_flags
,
41 int div_offset
, u8 div_shift
, u8 div_width
, u8 div_flags
,
42 struct clk_div_table
*div_table
, int gate_offset
,
43 u8 gate_shift
, u8 gate_flags
, unsigned long flags
,
47 struct clk_mux
*mux
= NULL
;
48 struct clk_gate
*gate
= NULL
;
49 struct clk_divider
*div
= NULL
;
50 const struct clk_ops
*mux_ops
= NULL
, *div_ops
= NULL
,
54 if (num_parents
> 1) {
55 mux
= kzalloc(sizeof(*mux
), GFP_KERNEL
);
57 return ERR_PTR(-ENOMEM
);
59 mux
->reg
= base
+ muxdiv_offset
;
60 mux
->shift
= mux_shift
;
61 mux
->mask
= BIT(mux_width
) - 1;
62 mux
->flags
= mux_flags
;
64 mux_ops
= (mux_flags
& CLK_MUX_READ_ONLY
) ? &clk_mux_ro_ops
68 if (gate_offset
>= 0) {
69 gate
= kzalloc(sizeof(*gate
), GFP_KERNEL
);
75 gate
->flags
= gate_flags
;
76 gate
->reg
= base
+ gate_offset
;
77 gate
->bit_idx
= gate_shift
;
79 gate_ops
= &clk_gate_ops
;
83 div
= kzalloc(sizeof(*div
), GFP_KERNEL
);
89 div
->flags
= div_flags
;
91 div
->reg
= base
+ div_offset
;
93 div
->reg
= base
+ muxdiv_offset
;
94 div
->shift
= div_shift
;
95 div
->width
= div_width
;
97 div
->table
= div_table
;
98 div_ops
= (div_flags
& CLK_DIVIDER_READ_ONLY
)
103 clk
= clk_register_composite(NULL
, name
, parent_names
, num_parents
,
104 mux
? &mux
->hw
: NULL
, mux_ops
,
105 div
? &div
->hw
: NULL
, div_ops
,
106 gate
? &gate
->hw
: NULL
, gate_ops
,
124 struct rockchip_clk_frac
{
125 struct notifier_block clk_nb
;
126 struct clk_fractional_divider div
;
127 struct clk_gate gate
;
130 const struct clk_ops
*mux_ops
;
133 bool rate_change_remuxed
;
137 #define to_rockchip_clk_frac_nb(nb) \
138 container_of(nb, struct rockchip_clk_frac, clk_nb)
140 static int rockchip_clk_frac_notifier_cb(struct notifier_block
*nb
,
141 unsigned long event
, void *data
)
143 struct clk_notifier_data
*ndata
= data
;
144 struct rockchip_clk_frac
*frac
= to_rockchip_clk_frac_nb(nb
);
145 struct clk_mux
*frac_mux
= &frac
->mux
;
148 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
149 __func__
, event
, ndata
->old_rate
, ndata
->new_rate
);
150 if (event
== PRE_RATE_CHANGE
) {
151 frac
->rate_change_idx
=
152 frac
->mux_ops
->get_parent(&frac_mux
->hw
);
153 if (frac
->rate_change_idx
!= frac
->mux_frac_idx
) {
154 frac
->mux_ops
->set_parent(&frac_mux
->hw
,
156 frac
->rate_change_remuxed
= 1;
158 } else if (event
== POST_RATE_CHANGE
) {
160 * The POST_RATE_CHANGE notifier runs directly after the
161 * divider clock is set in clk_change_rate, so we'll have
162 * remuxed back to the original parent before clk_change_rate
163 * reaches the mux itself.
165 if (frac
->rate_change_remuxed
) {
166 frac
->mux_ops
->set_parent(&frac_mux
->hw
,
167 frac
->rate_change_idx
);
168 frac
->rate_change_remuxed
= 0;
172 return notifier_from_errno(ret
);
176 * fractional divider must set that denominator is 20 times larger than
177 * numerator to generate precise clock frequency.
179 static void rockchip_fractional_approximation(struct clk_hw
*hw
,
180 unsigned long rate
, unsigned long *parent_rate
,
181 unsigned long *m
, unsigned long *n
)
183 struct clk_fractional_divider
*fd
= to_clk_fd(hw
);
184 unsigned long p_rate
, p_parent_rate
;
185 struct clk_hw
*p_parent
;
188 p_rate
= clk_hw_get_rate(clk_hw_get_parent(hw
));
189 if ((rate
* 20 > p_rate
) && (p_rate
% rate
!= 0)) {
190 p_parent
= clk_hw_get_parent(clk_hw_get_parent(hw
));
191 p_parent_rate
= clk_hw_get_rate(p_parent
);
192 *parent_rate
= p_parent_rate
;
196 * Get rate closer to *parent_rate to guarantee there is no overflow
197 * for m and n. In the result it will be the nearest rate left shifted
198 * by (scale - fd->nwidth) bits.
200 scale
= fls_long(*parent_rate
/ rate
- 1);
201 if (scale
> fd
->nwidth
)
202 rate
<<= scale
- fd
->nwidth
;
204 rational_best_approximation(rate
, *parent_rate
,
205 GENMASK(fd
->mwidth
- 1, 0), GENMASK(fd
->nwidth
- 1, 0),
209 static struct clk
*rockchip_clk_register_frac_branch(
210 struct rockchip_clk_provider
*ctx
, const char *name
,
211 const char *const *parent_names
, u8 num_parents
,
212 void __iomem
*base
, int muxdiv_offset
, u8 div_flags
,
213 int gate_offset
, u8 gate_shift
, u8 gate_flags
,
214 unsigned long flags
, struct rockchip_clk_branch
*child
,
217 struct rockchip_clk_frac
*frac
;
219 struct clk_gate
*gate
= NULL
;
220 struct clk_fractional_divider
*div
= NULL
;
221 const struct clk_ops
*div_ops
= NULL
, *gate_ops
= NULL
;
223 if (muxdiv_offset
< 0)
224 return ERR_PTR(-EINVAL
);
226 if (child
&& child
->branch_type
!= branch_mux
) {
227 pr_err("%s: fractional child clock for %s can only be a mux\n",
229 return ERR_PTR(-EINVAL
);
232 frac
= kzalloc(sizeof(*frac
), GFP_KERNEL
);
234 return ERR_PTR(-ENOMEM
);
236 if (gate_offset
>= 0) {
238 gate
->flags
= gate_flags
;
239 gate
->reg
= base
+ gate_offset
;
240 gate
->bit_idx
= gate_shift
;
242 gate_ops
= &clk_gate_ops
;
246 div
->flags
= div_flags
;
247 div
->reg
= base
+ muxdiv_offset
;
250 div
->mmask
= GENMASK(div
->mwidth
- 1, 0) << div
->mshift
;
253 div
->nmask
= GENMASK(div
->nwidth
- 1, 0) << div
->nshift
;
255 div
->approximation
= rockchip_fractional_approximation
;
256 div_ops
= &clk_fractional_divider_ops
;
258 clk
= clk_register_composite(NULL
, name
, parent_names
, num_parents
,
261 gate
? &gate
->hw
: NULL
, gate_ops
,
262 flags
| CLK_SET_RATE_UNGATE
);
269 struct clk_mux
*frac_mux
= &frac
->mux
;
270 struct clk_init_data init
;
274 frac
->mux_frac_idx
= match_string(child
->parent_names
,
275 child
->num_parents
, name
);
276 frac
->mux_ops
= &clk_mux_ops
;
277 frac
->clk_nb
.notifier_call
= rockchip_clk_frac_notifier_cb
;
279 frac_mux
->reg
= base
+ child
->muxdiv_offset
;
280 frac_mux
->shift
= child
->mux_shift
;
281 frac_mux
->mask
= BIT(child
->mux_width
) - 1;
282 frac_mux
->flags
= child
->mux_flags
;
283 frac_mux
->lock
= lock
;
284 frac_mux
->hw
.init
= &init
;
286 init
.name
= child
->name
;
287 init
.flags
= child
->flags
| CLK_SET_RATE_PARENT
;
288 init
.ops
= frac
->mux_ops
;
289 init
.parent_names
= child
->parent_names
;
290 init
.num_parents
= child
->num_parents
;
292 mux_clk
= clk_register(NULL
, &frac_mux
->hw
);
293 if (IS_ERR(mux_clk
)) {
298 rockchip_clk_add_lookup(ctx
, mux_clk
, child
->id
);
300 /* notifier on the fraction divider to catch rate changes */
301 if (frac
->mux_frac_idx
>= 0) {
302 pr_debug("%s: found fractional parent in mux at pos %d\n",
303 __func__
, frac
->mux_frac_idx
);
304 ret
= clk_notifier_register(clk
, &frac
->clk_nb
);
306 pr_err("%s: failed to register clock notifier for %s\n",
309 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
310 __func__
, name
, child
->name
);
317 static struct clk
*rockchip_clk_register_factor_branch(const char *name
,
318 const char *const *parent_names
, u8 num_parents
,
319 void __iomem
*base
, unsigned int mult
, unsigned int div
,
320 int gate_offset
, u8 gate_shift
, u8 gate_flags
,
321 unsigned long flags
, spinlock_t
*lock
)
324 struct clk_gate
*gate
= NULL
;
325 struct clk_fixed_factor
*fix
= NULL
;
327 /* without gate, register a simple factor clock */
328 if (gate_offset
== 0) {
329 return clk_register_fixed_factor(NULL
, name
,
330 parent_names
[0], flags
, mult
,
334 gate
= kzalloc(sizeof(*gate
), GFP_KERNEL
);
336 return ERR_PTR(-ENOMEM
);
338 gate
->flags
= gate_flags
;
339 gate
->reg
= base
+ gate_offset
;
340 gate
->bit_idx
= gate_shift
;
343 fix
= kzalloc(sizeof(*fix
), GFP_KERNEL
);
346 return ERR_PTR(-ENOMEM
);
352 clk
= clk_register_composite(NULL
, name
, parent_names
, num_parents
,
354 &fix
->hw
, &clk_fixed_factor_ops
,
355 &gate
->hw
, &clk_gate_ops
, flags
);
364 struct rockchip_clk_provider
* __init
rockchip_clk_init(struct device_node
*np
,
365 void __iomem
*base
, unsigned long nr_clks
)
367 struct rockchip_clk_provider
*ctx
;
368 struct clk
**clk_table
;
371 ctx
= kzalloc(sizeof(struct rockchip_clk_provider
), GFP_KERNEL
);
373 return ERR_PTR(-ENOMEM
);
375 clk_table
= kcalloc(nr_clks
, sizeof(struct clk
*), GFP_KERNEL
);
379 for (i
= 0; i
< nr_clks
; ++i
)
380 clk_table
[i
] = ERR_PTR(-ENOENT
);
382 ctx
->reg_base
= base
;
383 ctx
->clk_data
.clks
= clk_table
;
384 ctx
->clk_data
.clk_num
= nr_clks
;
386 spin_lock_init(&ctx
->lock
);
388 ctx
->grf
= syscon_regmap_lookup_by_phandle(ctx
->cru_node
,
395 return ERR_PTR(-ENOMEM
);
398 void __init
rockchip_clk_of_add_provider(struct device_node
*np
,
399 struct rockchip_clk_provider
*ctx
)
401 if (of_clk_add_provider(np
, of_clk_src_onecell_get
,
403 pr_err("%s: could not register clk provider\n", __func__
);
406 void rockchip_clk_add_lookup(struct rockchip_clk_provider
*ctx
,
407 struct clk
*clk
, unsigned int id
)
409 if (ctx
->clk_data
.clks
&& id
)
410 ctx
->clk_data
.clks
[id
] = clk
;
413 void __init
rockchip_clk_register_plls(struct rockchip_clk_provider
*ctx
,
414 struct rockchip_pll_clock
*list
,
415 unsigned int nr_pll
, int grf_lock_offset
)
420 for (idx
= 0; idx
< nr_pll
; idx
++, list
++) {
421 clk
= rockchip_clk_register_pll(ctx
, list
->type
, list
->name
,
422 list
->parent_names
, list
->num_parents
,
423 list
->con_offset
, grf_lock_offset
,
424 list
->lock_shift
, list
->mode_offset
,
425 list
->mode_shift
, list
->rate_table
,
426 list
->flags
, list
->pll_flags
);
428 pr_err("%s: failed to register clock %s\n", __func__
,
433 rockchip_clk_add_lookup(ctx
, clk
, list
->id
);
437 void __init
rockchip_clk_register_branches(
438 struct rockchip_clk_provider
*ctx
,
439 struct rockchip_clk_branch
*list
,
442 struct clk
*clk
= NULL
;
446 for (idx
= 0; idx
< nr_clk
; idx
++, list
++) {
449 /* catch simple muxes */
450 switch (list
->branch_type
) {
452 clk
= clk_register_mux(NULL
, list
->name
,
453 list
->parent_names
, list
->num_parents
,
454 flags
, ctx
->reg_base
+ list
->muxdiv_offset
,
455 list
->mux_shift
, list
->mux_width
,
456 list
->mux_flags
, &ctx
->lock
);
459 clk
= rockchip_clk_register_muxgrf(list
->name
,
460 list
->parent_names
, list
->num_parents
,
461 flags
, ctx
->grf
, list
->muxdiv_offset
,
462 list
->mux_shift
, list
->mux_width
,
467 clk
= clk_register_divider_table(NULL
,
468 list
->name
, list
->parent_names
[0],
470 ctx
->reg_base
+ list
->muxdiv_offset
,
471 list
->div_shift
, list
->div_width
,
472 list
->div_flags
, list
->div_table
,
475 clk
= clk_register_divider(NULL
, list
->name
,
476 list
->parent_names
[0], flags
,
477 ctx
->reg_base
+ list
->muxdiv_offset
,
478 list
->div_shift
, list
->div_width
,
479 list
->div_flags
, &ctx
->lock
);
481 case branch_fraction_divider
:
482 clk
= rockchip_clk_register_frac_branch(ctx
, list
->name
,
483 list
->parent_names
, list
->num_parents
,
484 ctx
->reg_base
, list
->muxdiv_offset
,
486 list
->gate_offset
, list
->gate_shift
,
487 list
->gate_flags
, flags
, list
->child
,
490 case branch_half_divider
:
491 clk
= rockchip_clk_register_halfdiv(list
->name
,
492 list
->parent_names
, list
->num_parents
,
493 ctx
->reg_base
, list
->muxdiv_offset
,
494 list
->mux_shift
, list
->mux_width
,
495 list
->mux_flags
, list
->div_shift
,
496 list
->div_width
, list
->div_flags
,
497 list
->gate_offset
, list
->gate_shift
,
498 list
->gate_flags
, flags
, &ctx
->lock
);
501 flags
|= CLK_SET_RATE_PARENT
;
503 clk
= clk_register_gate(NULL
, list
->name
,
504 list
->parent_names
[0], flags
,
505 ctx
->reg_base
+ list
->gate_offset
,
506 list
->gate_shift
, list
->gate_flags
, &ctx
->lock
);
508 case branch_composite
:
509 clk
= rockchip_clk_register_branch(list
->name
,
510 list
->parent_names
, list
->num_parents
,
511 ctx
->reg_base
, list
->muxdiv_offset
,
513 list
->mux_width
, list
->mux_flags
,
514 list
->div_offset
, list
->div_shift
, list
->div_width
,
515 list
->div_flags
, list
->div_table
,
516 list
->gate_offset
, list
->gate_shift
,
517 list
->gate_flags
, flags
, &ctx
->lock
);
520 clk
= rockchip_clk_register_mmc(
522 list
->parent_names
, list
->num_parents
,
523 ctx
->reg_base
+ list
->muxdiv_offset
,
527 case branch_inverter
:
528 clk
= rockchip_clk_register_inverter(
529 list
->name
, list
->parent_names
,
531 ctx
->reg_base
+ list
->muxdiv_offset
,
532 list
->div_shift
, list
->div_flags
, &ctx
->lock
);
535 clk
= rockchip_clk_register_factor_branch(
536 list
->name
, list
->parent_names
,
537 list
->num_parents
, ctx
->reg_base
,
538 list
->div_shift
, list
->div_width
,
539 list
->gate_offset
, list
->gate_shift
,
540 list
->gate_flags
, flags
, &ctx
->lock
);
543 clk
= rockchip_clk_register_ddrclk(
544 list
->name
, list
->flags
,
545 list
->parent_names
, list
->num_parents
,
546 list
->muxdiv_offset
, list
->mux_shift
,
547 list
->mux_width
, list
->div_shift
,
548 list
->div_width
, list
->div_flags
,
549 ctx
->reg_base
, &ctx
->lock
);
553 /* none of the cases above matched */
555 pr_err("%s: unknown clock type %d\n",
556 __func__
, list
->branch_type
);
561 pr_err("%s: failed to register clock %s: %ld\n",
562 __func__
, list
->name
, PTR_ERR(clk
));
566 rockchip_clk_add_lookup(ctx
, clk
, list
->id
);
570 void __init
rockchip_clk_register_armclk(struct rockchip_clk_provider
*ctx
,
571 unsigned int lookup_id
,
572 const char *name
, const char *const *parent_names
,
574 const struct rockchip_cpuclk_reg_data
*reg_data
,
575 const struct rockchip_cpuclk_rate_table
*rates
,
580 clk
= rockchip_clk_register_cpuclk(name
, parent_names
, num_parents
,
581 reg_data
, rates
, nrates
,
582 ctx
->reg_base
, &ctx
->lock
);
584 pr_err("%s: failed to register clock %s: %ld\n",
585 __func__
, name
, PTR_ERR(clk
));
589 rockchip_clk_add_lookup(ctx
, clk
, lookup_id
);
592 void __init
rockchip_clk_protect_critical(const char *const clocks
[],
597 /* Protect the clocks that needs to stay on */
598 for (i
= 0; i
< nclocks
; i
++) {
599 struct clk
*clk
= __clk_lookup(clocks
[i
]);
602 clk_prepare_enable(clk
);
606 static void __iomem
*rst_base
;
607 static unsigned int reg_restart
;
608 static void (*cb_restart
)(void);
609 static int rockchip_restart_notify(struct notifier_block
*this,
610 unsigned long mode
, void *cmd
)
615 writel(0xfdb9, rst_base
+ reg_restart
);
619 static struct notifier_block rockchip_restart_handler
= {
620 .notifier_call
= rockchip_restart_notify
,
625 rockchip_register_restart_notifier(struct rockchip_clk_provider
*ctx
,
631 rst_base
= ctx
->reg_base
;
634 ret
= register_restart_handler(&rockchip_restart_handler
);
636 pr_err("%s: cannot register restart handler, %d\n",