1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) 2014 MundoReader S.L.
4 * Author: Heiko Stuebner <heiko@sntech.de>
6 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
7 * Author: Xing Zheng <zhengxing@rock-chips.com>
12 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
13 * Copyright (c) 2013 Linaro Ltd.
14 * Author: Thomas Abraham <thomas.ab@samsung.com>
17 #include <linux/slab.h>
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
21 #include <linux/mfd/syscon.h>
22 #include <linux/regmap.h>
23 #include <linux/reboot.h>
24 #include <linux/rational.h>
28 * Register a clock branch.
29 * Most clock branches have a form like
35 * sometimes without one of those components.
37 static struct clk
*rockchip_clk_register_branch(const char *name
,
38 const char *const *parent_names
, u8 num_parents
,
40 int muxdiv_offset
, u8 mux_shift
, u8 mux_width
, u8 mux_flags
,
41 int div_offset
, u8 div_shift
, u8 div_width
, u8 div_flags
,
42 struct clk_div_table
*div_table
, int gate_offset
,
43 u8 gate_shift
, u8 gate_flags
, unsigned long flags
,
47 struct clk_mux
*mux
= NULL
;
48 struct clk_gate
*gate
= NULL
;
49 struct clk_divider
*div
= NULL
;
50 const struct clk_ops
*mux_ops
= NULL
, *div_ops
= NULL
,
54 if (num_parents
> 1) {
55 mux
= kzalloc(sizeof(*mux
), GFP_KERNEL
);
57 return ERR_PTR(-ENOMEM
);
59 mux
->reg
= base
+ muxdiv_offset
;
60 mux
->shift
= mux_shift
;
61 mux
->mask
= BIT(mux_width
) - 1;
62 mux
->flags
= mux_flags
;
64 mux_ops
= (mux_flags
& CLK_MUX_READ_ONLY
) ? &clk_mux_ro_ops
68 if (gate_offset
>= 0) {
69 gate
= kzalloc(sizeof(*gate
), GFP_KERNEL
);
75 gate
->flags
= gate_flags
;
76 gate
->reg
= base
+ gate_offset
;
77 gate
->bit_idx
= gate_shift
;
79 gate_ops
= &clk_gate_ops
;
83 div
= kzalloc(sizeof(*div
), GFP_KERNEL
);
89 div
->flags
= div_flags
;
91 div
->reg
= base
+ div_offset
;
93 div
->reg
= base
+ muxdiv_offset
;
94 div
->shift
= div_shift
;
95 div
->width
= div_width
;
97 div
->table
= div_table
;
98 div_ops
= (div_flags
& CLK_DIVIDER_READ_ONLY
)
103 hw
= clk_hw_register_composite(NULL
, name
, parent_names
, num_parents
,
104 mux
? &mux
->hw
: NULL
, mux_ops
,
105 div
? &div
->hw
: NULL
, div_ops
,
106 gate
? &gate
->hw
: NULL
, gate_ops
,
122 struct rockchip_clk_frac
{
123 struct notifier_block clk_nb
;
124 struct clk_fractional_divider div
;
125 struct clk_gate gate
;
128 const struct clk_ops
*mux_ops
;
131 bool rate_change_remuxed
;
135 #define to_rockchip_clk_frac_nb(nb) \
136 container_of(nb, struct rockchip_clk_frac, clk_nb)
138 static int rockchip_clk_frac_notifier_cb(struct notifier_block
*nb
,
139 unsigned long event
, void *data
)
141 struct clk_notifier_data
*ndata
= data
;
142 struct rockchip_clk_frac
*frac
= to_rockchip_clk_frac_nb(nb
);
143 struct clk_mux
*frac_mux
= &frac
->mux
;
146 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
147 __func__
, event
, ndata
->old_rate
, ndata
->new_rate
);
148 if (event
== PRE_RATE_CHANGE
) {
149 frac
->rate_change_idx
=
150 frac
->mux_ops
->get_parent(&frac_mux
->hw
);
151 if (frac
->rate_change_idx
!= frac
->mux_frac_idx
) {
152 frac
->mux_ops
->set_parent(&frac_mux
->hw
,
154 frac
->rate_change_remuxed
= 1;
156 } else if (event
== POST_RATE_CHANGE
) {
158 * The POST_RATE_CHANGE notifier runs directly after the
159 * divider clock is set in clk_change_rate, so we'll have
160 * remuxed back to the original parent before clk_change_rate
161 * reaches the mux itself.
163 if (frac
->rate_change_remuxed
) {
164 frac
->mux_ops
->set_parent(&frac_mux
->hw
,
165 frac
->rate_change_idx
);
166 frac
->rate_change_remuxed
= 0;
170 return notifier_from_errno(ret
);
174 * fractional divider must set that denominator is 20 times larger than
175 * numerator to generate precise clock frequency.
177 static void rockchip_fractional_approximation(struct clk_hw
*hw
,
178 unsigned long rate
, unsigned long *parent_rate
,
179 unsigned long *m
, unsigned long *n
)
181 struct clk_fractional_divider
*fd
= to_clk_fd(hw
);
182 unsigned long p_rate
, p_parent_rate
;
183 struct clk_hw
*p_parent
;
186 p_rate
= clk_hw_get_rate(clk_hw_get_parent(hw
));
187 if ((rate
* 20 > p_rate
) && (p_rate
% rate
!= 0)) {
188 p_parent
= clk_hw_get_parent(clk_hw_get_parent(hw
));
189 p_parent_rate
= clk_hw_get_rate(p_parent
);
190 *parent_rate
= p_parent_rate
;
194 * Get rate closer to *parent_rate to guarantee there is no overflow
195 * for m and n. In the result it will be the nearest rate left shifted
196 * by (scale - fd->nwidth) bits.
198 scale
= fls_long(*parent_rate
/ rate
- 1);
199 if (scale
> fd
->nwidth
)
200 rate
<<= scale
- fd
->nwidth
;
202 rational_best_approximation(rate
, *parent_rate
,
203 GENMASK(fd
->mwidth
- 1, 0), GENMASK(fd
->nwidth
- 1, 0),
207 static struct clk
*rockchip_clk_register_frac_branch(
208 struct rockchip_clk_provider
*ctx
, const char *name
,
209 const char *const *parent_names
, u8 num_parents
,
210 void __iomem
*base
, int muxdiv_offset
, u8 div_flags
,
211 int gate_offset
, u8 gate_shift
, u8 gate_flags
,
212 unsigned long flags
, struct rockchip_clk_branch
*child
,
216 struct rockchip_clk_frac
*frac
;
217 struct clk_gate
*gate
= NULL
;
218 struct clk_fractional_divider
*div
= NULL
;
219 const struct clk_ops
*div_ops
= NULL
, *gate_ops
= NULL
;
221 if (muxdiv_offset
< 0)
222 return ERR_PTR(-EINVAL
);
224 if (child
&& child
->branch_type
!= branch_mux
) {
225 pr_err("%s: fractional child clock for %s can only be a mux\n",
227 return ERR_PTR(-EINVAL
);
230 frac
= kzalloc(sizeof(*frac
), GFP_KERNEL
);
232 return ERR_PTR(-ENOMEM
);
234 if (gate_offset
>= 0) {
236 gate
->flags
= gate_flags
;
237 gate
->reg
= base
+ gate_offset
;
238 gate
->bit_idx
= gate_shift
;
240 gate_ops
= &clk_gate_ops
;
244 div
->flags
= div_flags
;
245 div
->reg
= base
+ muxdiv_offset
;
248 div
->mmask
= GENMASK(div
->mwidth
- 1, 0) << div
->mshift
;
251 div
->nmask
= GENMASK(div
->nwidth
- 1, 0) << div
->nshift
;
253 div
->approximation
= rockchip_fractional_approximation
;
254 div_ops
= &clk_fractional_divider_ops
;
256 hw
= clk_hw_register_composite(NULL
, name
, parent_names
, num_parents
,
259 gate
? &gate
->hw
: NULL
, gate_ops
,
260 flags
| CLK_SET_RATE_UNGATE
);
267 struct clk_mux
*frac_mux
= &frac
->mux
;
268 struct clk_init_data init
;
272 frac
->mux_frac_idx
= match_string(child
->parent_names
,
273 child
->num_parents
, name
);
274 frac
->mux_ops
= &clk_mux_ops
;
275 frac
->clk_nb
.notifier_call
= rockchip_clk_frac_notifier_cb
;
277 frac_mux
->reg
= base
+ child
->muxdiv_offset
;
278 frac_mux
->shift
= child
->mux_shift
;
279 frac_mux
->mask
= BIT(child
->mux_width
) - 1;
280 frac_mux
->flags
= child
->mux_flags
;
281 frac_mux
->lock
= lock
;
282 frac_mux
->hw
.init
= &init
;
284 init
.name
= child
->name
;
285 init
.flags
= child
->flags
| CLK_SET_RATE_PARENT
;
286 init
.ops
= frac
->mux_ops
;
287 init
.parent_names
= child
->parent_names
;
288 init
.num_parents
= child
->num_parents
;
290 mux_clk
= clk_register(NULL
, &frac_mux
->hw
);
291 if (IS_ERR(mux_clk
)) {
296 rockchip_clk_add_lookup(ctx
, mux_clk
, child
->id
);
298 /* notifier on the fraction divider to catch rate changes */
299 if (frac
->mux_frac_idx
>= 0) {
300 pr_debug("%s: found fractional parent in mux at pos %d\n",
301 __func__
, frac
->mux_frac_idx
);
302 ret
= clk_notifier_register(hw
->clk
, &frac
->clk_nb
);
304 pr_err("%s: failed to register clock notifier for %s\n",
307 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
308 __func__
, name
, child
->name
);
315 static struct clk
*rockchip_clk_register_factor_branch(const char *name
,
316 const char *const *parent_names
, u8 num_parents
,
317 void __iomem
*base
, unsigned int mult
, unsigned int div
,
318 int gate_offset
, u8 gate_shift
, u8 gate_flags
,
319 unsigned long flags
, spinlock_t
*lock
)
322 struct clk_gate
*gate
= NULL
;
323 struct clk_fixed_factor
*fix
= NULL
;
325 /* without gate, register a simple factor clock */
326 if (gate_offset
== 0) {
327 return clk_register_fixed_factor(NULL
, name
,
328 parent_names
[0], flags
, mult
,
332 gate
= kzalloc(sizeof(*gate
), GFP_KERNEL
);
334 return ERR_PTR(-ENOMEM
);
336 gate
->flags
= gate_flags
;
337 gate
->reg
= base
+ gate_offset
;
338 gate
->bit_idx
= gate_shift
;
341 fix
= kzalloc(sizeof(*fix
), GFP_KERNEL
);
344 return ERR_PTR(-ENOMEM
);
350 hw
= clk_hw_register_composite(NULL
, name
, parent_names
, num_parents
,
352 &fix
->hw
, &clk_fixed_factor_ops
,
353 &gate
->hw
, &clk_gate_ops
, flags
);
363 struct rockchip_clk_provider
*rockchip_clk_init(struct device_node
*np
,
365 unsigned long nr_clks
)
367 struct rockchip_clk_provider
*ctx
;
368 struct clk
**clk_table
;
371 ctx
= kzalloc(sizeof(struct rockchip_clk_provider
), GFP_KERNEL
);
373 return ERR_PTR(-ENOMEM
);
375 clk_table
= kcalloc(nr_clks
, sizeof(struct clk
*), GFP_KERNEL
);
379 for (i
= 0; i
< nr_clks
; ++i
)
380 clk_table
[i
] = ERR_PTR(-ENOENT
);
382 ctx
->reg_base
= base
;
383 ctx
->clk_data
.clks
= clk_table
;
384 ctx
->clk_data
.clk_num
= nr_clks
;
386 spin_lock_init(&ctx
->lock
);
388 ctx
->grf
= syscon_regmap_lookup_by_phandle(ctx
->cru_node
,
395 return ERR_PTR(-ENOMEM
);
397 EXPORT_SYMBOL_GPL(rockchip_clk_init
);
399 void rockchip_clk_of_add_provider(struct device_node
*np
,
400 struct rockchip_clk_provider
*ctx
)
402 if (of_clk_add_provider(np
, of_clk_src_onecell_get
,
404 pr_err("%s: could not register clk provider\n", __func__
);
406 EXPORT_SYMBOL_GPL(rockchip_clk_of_add_provider
);
408 void rockchip_clk_add_lookup(struct rockchip_clk_provider
*ctx
,
409 struct clk
*clk
, unsigned int id
)
411 if (ctx
->clk_data
.clks
&& id
)
412 ctx
->clk_data
.clks
[id
] = clk
;
414 EXPORT_SYMBOL_GPL(rockchip_clk_add_lookup
);
416 void rockchip_clk_register_plls(struct rockchip_clk_provider
*ctx
,
417 struct rockchip_pll_clock
*list
,
418 unsigned int nr_pll
, int grf_lock_offset
)
423 for (idx
= 0; idx
< nr_pll
; idx
++, list
++) {
424 clk
= rockchip_clk_register_pll(ctx
, list
->type
, list
->name
,
425 list
->parent_names
, list
->num_parents
,
426 list
->con_offset
, grf_lock_offset
,
427 list
->lock_shift
, list
->mode_offset
,
428 list
->mode_shift
, list
->rate_table
,
429 list
->flags
, list
->pll_flags
);
431 pr_err("%s: failed to register clock %s\n", __func__
,
436 rockchip_clk_add_lookup(ctx
, clk
, list
->id
);
439 EXPORT_SYMBOL_GPL(rockchip_clk_register_plls
);
441 void rockchip_clk_register_branches(struct rockchip_clk_provider
*ctx
,
442 struct rockchip_clk_branch
*list
,
445 struct clk
*clk
= NULL
;
449 for (idx
= 0; idx
< nr_clk
; idx
++, list
++) {
452 /* catch simple muxes */
453 switch (list
->branch_type
) {
455 clk
= clk_register_mux(NULL
, list
->name
,
456 list
->parent_names
, list
->num_parents
,
457 flags
, ctx
->reg_base
+ list
->muxdiv_offset
,
458 list
->mux_shift
, list
->mux_width
,
459 list
->mux_flags
, &ctx
->lock
);
462 clk
= rockchip_clk_register_muxgrf(list
->name
,
463 list
->parent_names
, list
->num_parents
,
464 flags
, ctx
->grf
, list
->muxdiv_offset
,
465 list
->mux_shift
, list
->mux_width
,
470 clk
= clk_register_divider_table(NULL
,
471 list
->name
, list
->parent_names
[0],
473 ctx
->reg_base
+ list
->muxdiv_offset
,
474 list
->div_shift
, list
->div_width
,
475 list
->div_flags
, list
->div_table
,
478 clk
= clk_register_divider(NULL
, list
->name
,
479 list
->parent_names
[0], flags
,
480 ctx
->reg_base
+ list
->muxdiv_offset
,
481 list
->div_shift
, list
->div_width
,
482 list
->div_flags
, &ctx
->lock
);
484 case branch_fraction_divider
:
485 clk
= rockchip_clk_register_frac_branch(ctx
, list
->name
,
486 list
->parent_names
, list
->num_parents
,
487 ctx
->reg_base
, list
->muxdiv_offset
,
489 list
->gate_offset
, list
->gate_shift
,
490 list
->gate_flags
, flags
, list
->child
,
493 case branch_half_divider
:
494 clk
= rockchip_clk_register_halfdiv(list
->name
,
495 list
->parent_names
, list
->num_parents
,
496 ctx
->reg_base
, list
->muxdiv_offset
,
497 list
->mux_shift
, list
->mux_width
,
498 list
->mux_flags
, list
->div_shift
,
499 list
->div_width
, list
->div_flags
,
500 list
->gate_offset
, list
->gate_shift
,
501 list
->gate_flags
, flags
, &ctx
->lock
);
504 flags
|= CLK_SET_RATE_PARENT
;
506 clk
= clk_register_gate(NULL
, list
->name
,
507 list
->parent_names
[0], flags
,
508 ctx
->reg_base
+ list
->gate_offset
,
509 list
->gate_shift
, list
->gate_flags
, &ctx
->lock
);
511 case branch_composite
:
512 clk
= rockchip_clk_register_branch(list
->name
,
513 list
->parent_names
, list
->num_parents
,
514 ctx
->reg_base
, list
->muxdiv_offset
,
516 list
->mux_width
, list
->mux_flags
,
517 list
->div_offset
, list
->div_shift
, list
->div_width
,
518 list
->div_flags
, list
->div_table
,
519 list
->gate_offset
, list
->gate_shift
,
520 list
->gate_flags
, flags
, &ctx
->lock
);
523 clk
= rockchip_clk_register_mmc(
525 list
->parent_names
, list
->num_parents
,
526 ctx
->reg_base
+ list
->muxdiv_offset
,
530 case branch_inverter
:
531 clk
= rockchip_clk_register_inverter(
532 list
->name
, list
->parent_names
,
534 ctx
->reg_base
+ list
->muxdiv_offset
,
535 list
->div_shift
, list
->div_flags
, &ctx
->lock
);
538 clk
= rockchip_clk_register_factor_branch(
539 list
->name
, list
->parent_names
,
540 list
->num_parents
, ctx
->reg_base
,
541 list
->div_shift
, list
->div_width
,
542 list
->gate_offset
, list
->gate_shift
,
543 list
->gate_flags
, flags
, &ctx
->lock
);
546 clk
= rockchip_clk_register_ddrclk(
547 list
->name
, list
->flags
,
548 list
->parent_names
, list
->num_parents
,
549 list
->muxdiv_offset
, list
->mux_shift
,
550 list
->mux_width
, list
->div_shift
,
551 list
->div_width
, list
->div_flags
,
552 ctx
->reg_base
, &ctx
->lock
);
556 /* none of the cases above matched */
558 pr_err("%s: unknown clock type %d\n",
559 __func__
, list
->branch_type
);
564 pr_err("%s: failed to register clock %s: %ld\n",
565 __func__
, list
->name
, PTR_ERR(clk
));
569 rockchip_clk_add_lookup(ctx
, clk
, list
->id
);
572 EXPORT_SYMBOL_GPL(rockchip_clk_register_branches
);
574 void rockchip_clk_register_armclk(struct rockchip_clk_provider
*ctx
,
575 unsigned int lookup_id
,
576 const char *name
, const char *const *parent_names
,
578 const struct rockchip_cpuclk_reg_data
*reg_data
,
579 const struct rockchip_cpuclk_rate_table
*rates
,
584 clk
= rockchip_clk_register_cpuclk(name
, parent_names
, num_parents
,
585 reg_data
, rates
, nrates
,
586 ctx
->reg_base
, &ctx
->lock
);
588 pr_err("%s: failed to register clock %s: %ld\n",
589 __func__
, name
, PTR_ERR(clk
));
593 rockchip_clk_add_lookup(ctx
, clk
, lookup_id
);
595 EXPORT_SYMBOL_GPL(rockchip_clk_register_armclk
);
597 void rockchip_clk_protect_critical(const char *const clocks
[],
602 /* Protect the clocks that needs to stay on */
603 for (i
= 0; i
< nclocks
; i
++) {
604 struct clk
*clk
= __clk_lookup(clocks
[i
]);
606 clk_prepare_enable(clk
);
609 EXPORT_SYMBOL_GPL(rockchip_clk_protect_critical
);
611 static void __iomem
*rst_base
;
612 static unsigned int reg_restart
;
613 static void (*cb_restart
)(void);
614 static int rockchip_restart_notify(struct notifier_block
*this,
615 unsigned long mode
, void *cmd
)
620 writel(0xfdb9, rst_base
+ reg_restart
);
624 static struct notifier_block rockchip_restart_handler
= {
625 .notifier_call
= rockchip_restart_notify
,
630 rockchip_register_restart_notifier(struct rockchip_clk_provider
*ctx
,
636 rst_base
= ctx
->reg_base
;
639 ret
= register_restart_handler(&rockchip_restart_handler
);
641 pr_err("%s: cannot register restart handler, %d\n",
644 EXPORT_SYMBOL_GPL(rockchip_register_restart_notifier
);