Linux 4.16.11
[linux/fpc-iii.git] / drivers / clk / rockchip / clk.c
blob35dbd63c2f49b03b87866b870b0700aebf333867
1 /*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
5 * Copyright (c) 2016 Rockchip Electronics Co. Ltd.
6 * Author: Xing Zheng <zhengxing@rock-chips.com>
8 * based on
10 * samsung/clk.c
11 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
12 * Copyright (c) 2013 Linaro Ltd.
13 * Author: Thomas Abraham <thomas.ab@samsung.com>
15 * This program is free software; you can redistribute it and/or modify
16 * it under the terms of the GNU General Public License as published by
17 * the Free Software Foundation; either version 2 of the License, or
18 * (at your option) any later version.
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
26 #include <linux/slab.h>
27 #include <linux/clk.h>
28 #include <linux/clk-provider.h>
29 #include <linux/mfd/syscon.h>
30 #include <linux/regmap.h>
31 #include <linux/reboot.h>
32 #include <linux/rational.h>
33 #include "clk.h"
35 /**
36 * Register a clock branch.
37 * Most clock branches have a form like
39 * src1 --|--\
40 * |M |--[GATE]-[DIV]-
41 * src2 --|--/
43 * sometimes without one of those components.
45 static struct clk *rockchip_clk_register_branch(const char *name,
46 const char *const *parent_names, u8 num_parents,
47 void __iomem *base,
48 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
49 u8 div_shift, u8 div_width, u8 div_flags,
50 struct clk_div_table *div_table, int gate_offset,
51 u8 gate_shift, u8 gate_flags, unsigned long flags,
52 spinlock_t *lock)
54 struct clk *clk;
55 struct clk_mux *mux = NULL;
56 struct clk_gate *gate = NULL;
57 struct clk_divider *div = NULL;
58 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
59 *gate_ops = NULL;
61 if (num_parents > 1) {
62 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
63 if (!mux)
64 return ERR_PTR(-ENOMEM);
66 mux->reg = base + muxdiv_offset;
67 mux->shift = mux_shift;
68 mux->mask = BIT(mux_width) - 1;
69 mux->flags = mux_flags;
70 mux->lock = lock;
71 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
72 : &clk_mux_ops;
75 if (gate_offset >= 0) {
76 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
77 if (!gate)
78 goto err_gate;
80 gate->flags = gate_flags;
81 gate->reg = base + gate_offset;
82 gate->bit_idx = gate_shift;
83 gate->lock = lock;
84 gate_ops = &clk_gate_ops;
87 if (div_width > 0) {
88 div = kzalloc(sizeof(*div), GFP_KERNEL);
89 if (!div)
90 goto err_div;
92 div->flags = div_flags;
93 div->reg = base + muxdiv_offset;
94 div->shift = div_shift;
95 div->width = div_width;
96 div->lock = lock;
97 div->table = div_table;
98 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY)
99 ? &clk_divider_ro_ops
100 : &clk_divider_ops;
103 clk = clk_register_composite(NULL, name, parent_names, num_parents,
104 mux ? &mux->hw : NULL, mux_ops,
105 div ? &div->hw : NULL, div_ops,
106 gate ? &gate->hw : NULL, gate_ops,
107 flags);
109 return clk;
110 err_div:
111 kfree(gate);
112 err_gate:
113 kfree(mux);
114 return ERR_PTR(-ENOMEM);
117 struct rockchip_clk_frac {
118 struct notifier_block clk_nb;
119 struct clk_fractional_divider div;
120 struct clk_gate gate;
122 struct clk_mux mux;
123 const struct clk_ops *mux_ops;
124 int mux_frac_idx;
126 bool rate_change_remuxed;
127 int rate_change_idx;
130 #define to_rockchip_clk_frac_nb(nb) \
131 container_of(nb, struct rockchip_clk_frac, clk_nb)
133 static int rockchip_clk_frac_notifier_cb(struct notifier_block *nb,
134 unsigned long event, void *data)
136 struct clk_notifier_data *ndata = data;
137 struct rockchip_clk_frac *frac = to_rockchip_clk_frac_nb(nb);
138 struct clk_mux *frac_mux = &frac->mux;
139 int ret = 0;
141 pr_debug("%s: event %lu, old_rate %lu, new_rate: %lu\n",
142 __func__, event, ndata->old_rate, ndata->new_rate);
143 if (event == PRE_RATE_CHANGE) {
144 frac->rate_change_idx =
145 frac->mux_ops->get_parent(&frac_mux->hw);
146 if (frac->rate_change_idx != frac->mux_frac_idx) {
147 frac->mux_ops->set_parent(&frac_mux->hw,
148 frac->mux_frac_idx);
149 frac->rate_change_remuxed = 1;
151 } else if (event == POST_RATE_CHANGE) {
153 * The POST_RATE_CHANGE notifier runs directly after the
154 * divider clock is set in clk_change_rate, so we'll have
155 * remuxed back to the original parent before clk_change_rate
156 * reaches the mux itself.
158 if (frac->rate_change_remuxed) {
159 frac->mux_ops->set_parent(&frac_mux->hw,
160 frac->rate_change_idx);
161 frac->rate_change_remuxed = 0;
165 return notifier_from_errno(ret);
169 * fractional divider must set that denominator is 20 times larger than
170 * numerator to generate precise clock frequency.
172 static void rockchip_fractional_approximation(struct clk_hw *hw,
173 unsigned long rate, unsigned long *parent_rate,
174 unsigned long *m, unsigned long *n)
176 struct clk_fractional_divider *fd = to_clk_fd(hw);
177 unsigned long p_rate, p_parent_rate;
178 struct clk_hw *p_parent;
179 unsigned long scale;
181 p_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
182 if ((rate * 20 > p_rate) && (p_rate % rate != 0)) {
183 p_parent = clk_hw_get_parent(clk_hw_get_parent(hw));
184 p_parent_rate = clk_hw_get_rate(p_parent);
185 *parent_rate = p_parent_rate;
189 * Get rate closer to *parent_rate to guarantee there is no overflow
190 * for m and n. In the result it will be the nearest rate left shifted
191 * by (scale - fd->nwidth) bits.
193 scale = fls_long(*parent_rate / rate - 1);
194 if (scale > fd->nwidth)
195 rate <<= scale - fd->nwidth;
197 rational_best_approximation(rate, *parent_rate,
198 GENMASK(fd->mwidth - 1, 0), GENMASK(fd->nwidth - 1, 0),
199 m, n);
202 static struct clk *rockchip_clk_register_frac_branch(
203 struct rockchip_clk_provider *ctx, const char *name,
204 const char *const *parent_names, u8 num_parents,
205 void __iomem *base, int muxdiv_offset, u8 div_flags,
206 int gate_offset, u8 gate_shift, u8 gate_flags,
207 unsigned long flags, struct rockchip_clk_branch *child,
208 spinlock_t *lock)
210 struct rockchip_clk_frac *frac;
211 struct clk *clk;
212 struct clk_gate *gate = NULL;
213 struct clk_fractional_divider *div = NULL;
214 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
216 if (muxdiv_offset < 0)
217 return ERR_PTR(-EINVAL);
219 if (child && child->branch_type != branch_mux) {
220 pr_err("%s: fractional child clock for %s can only be a mux\n",
221 __func__, name);
222 return ERR_PTR(-EINVAL);
225 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
226 if (!frac)
227 return ERR_PTR(-ENOMEM);
229 if (gate_offset >= 0) {
230 gate = &frac->gate;
231 gate->flags = gate_flags;
232 gate->reg = base + gate_offset;
233 gate->bit_idx = gate_shift;
234 gate->lock = lock;
235 gate_ops = &clk_gate_ops;
238 div = &frac->div;
239 div->flags = div_flags;
240 div->reg = base + muxdiv_offset;
241 div->mshift = 16;
242 div->mwidth = 16;
243 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
244 div->nshift = 0;
245 div->nwidth = 16;
246 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
247 div->lock = lock;
248 div->approximation = rockchip_fractional_approximation;
249 div_ops = &clk_fractional_divider_ops;
251 clk = clk_register_composite(NULL, name, parent_names, num_parents,
252 NULL, NULL,
253 &div->hw, div_ops,
254 gate ? &gate->hw : NULL, gate_ops,
255 flags | CLK_SET_RATE_UNGATE);
256 if (IS_ERR(clk)) {
257 kfree(frac);
258 return clk;
261 if (child) {
262 struct clk_mux *frac_mux = &frac->mux;
263 struct clk_init_data init;
264 struct clk *mux_clk;
265 int i, ret;
267 frac->mux_frac_idx = -1;
268 for (i = 0; i < child->num_parents; i++) {
269 if (!strcmp(name, child->parent_names[i])) {
270 pr_debug("%s: found fractional parent in mux at pos %d\n",
271 __func__, i);
272 frac->mux_frac_idx = i;
273 break;
277 frac->mux_ops = &clk_mux_ops;
278 frac->clk_nb.notifier_call = rockchip_clk_frac_notifier_cb;
280 frac_mux->reg = base + child->muxdiv_offset;
281 frac_mux->shift = child->mux_shift;
282 frac_mux->mask = BIT(child->mux_width) - 1;
283 frac_mux->flags = child->mux_flags;
284 frac_mux->lock = lock;
285 frac_mux->hw.init = &init;
287 init.name = child->name;
288 init.flags = child->flags | CLK_SET_RATE_PARENT;
289 init.ops = frac->mux_ops;
290 init.parent_names = child->parent_names;
291 init.num_parents = child->num_parents;
293 mux_clk = clk_register(NULL, &frac_mux->hw);
294 if (IS_ERR(mux_clk))
295 return clk;
297 rockchip_clk_add_lookup(ctx, mux_clk, child->id);
299 /* notifier on the fraction divider to catch rate changes */
300 if (frac->mux_frac_idx >= 0) {
301 ret = clk_notifier_register(clk, &frac->clk_nb);
302 if (ret)
303 pr_err("%s: failed to register clock notifier for %s\n",
304 __func__, name);
305 } else {
306 pr_warn("%s: could not find %s as parent of %s, rate changes may not work\n",
307 __func__, name, child->name);
311 return clk;
314 static struct clk *rockchip_clk_register_factor_branch(const char *name,
315 const char *const *parent_names, u8 num_parents,
316 void __iomem *base, unsigned int mult, unsigned int div,
317 int gate_offset, u8 gate_shift, u8 gate_flags,
318 unsigned long flags, spinlock_t *lock)
320 struct clk *clk;
321 struct clk_gate *gate = NULL;
322 struct clk_fixed_factor *fix = NULL;
324 /* without gate, register a simple factor clock */
325 if (gate_offset == 0) {
326 return clk_register_fixed_factor(NULL, name,
327 parent_names[0], flags, mult,
328 div);
331 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
332 if (!gate)
333 return ERR_PTR(-ENOMEM);
335 gate->flags = gate_flags;
336 gate->reg = base + gate_offset;
337 gate->bit_idx = gate_shift;
338 gate->lock = lock;
340 fix = kzalloc(sizeof(*fix), GFP_KERNEL);
341 if (!fix) {
342 kfree(gate);
343 return ERR_PTR(-ENOMEM);
346 fix->mult = mult;
347 fix->div = div;
349 clk = clk_register_composite(NULL, name, parent_names, num_parents,
350 NULL, NULL,
351 &fix->hw, &clk_fixed_factor_ops,
352 &gate->hw, &clk_gate_ops, flags);
353 if (IS_ERR(clk)) {
354 kfree(fix);
355 kfree(gate);
358 return clk;
361 struct rockchip_clk_provider * __init rockchip_clk_init(struct device_node *np,
362 void __iomem *base, unsigned long nr_clks)
364 struct rockchip_clk_provider *ctx;
365 struct clk **clk_table;
366 int i;
368 ctx = kzalloc(sizeof(struct rockchip_clk_provider), GFP_KERNEL);
369 if (!ctx)
370 return ERR_PTR(-ENOMEM);
372 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
373 if (!clk_table)
374 goto err_free;
376 for (i = 0; i < nr_clks; ++i)
377 clk_table[i] = ERR_PTR(-ENOENT);
379 ctx->reg_base = base;
380 ctx->clk_data.clks = clk_table;
381 ctx->clk_data.clk_num = nr_clks;
382 ctx->cru_node = np;
383 spin_lock_init(&ctx->lock);
385 ctx->grf = syscon_regmap_lookup_by_phandle(ctx->cru_node,
386 "rockchip,grf");
388 return ctx;
390 err_free:
391 kfree(ctx);
392 return ERR_PTR(-ENOMEM);
395 void __init rockchip_clk_of_add_provider(struct device_node *np,
396 struct rockchip_clk_provider *ctx)
398 if (of_clk_add_provider(np, of_clk_src_onecell_get,
399 &ctx->clk_data))
400 pr_err("%s: could not register clk provider\n", __func__);
403 void rockchip_clk_add_lookup(struct rockchip_clk_provider *ctx,
404 struct clk *clk, unsigned int id)
406 if (ctx->clk_data.clks && id)
407 ctx->clk_data.clks[id] = clk;
410 void __init rockchip_clk_register_plls(struct rockchip_clk_provider *ctx,
411 struct rockchip_pll_clock *list,
412 unsigned int nr_pll, int grf_lock_offset)
414 struct clk *clk;
415 int idx;
417 for (idx = 0; idx < nr_pll; idx++, list++) {
418 clk = rockchip_clk_register_pll(ctx, list->type, list->name,
419 list->parent_names, list->num_parents,
420 list->con_offset, grf_lock_offset,
421 list->lock_shift, list->mode_offset,
422 list->mode_shift, list->rate_table,
423 list->flags, list->pll_flags);
424 if (IS_ERR(clk)) {
425 pr_err("%s: failed to register clock %s\n", __func__,
426 list->name);
427 continue;
430 rockchip_clk_add_lookup(ctx, clk, list->id);
434 void __init rockchip_clk_register_branches(
435 struct rockchip_clk_provider *ctx,
436 struct rockchip_clk_branch *list,
437 unsigned int nr_clk)
439 struct clk *clk = NULL;
440 unsigned int idx;
441 unsigned long flags;
443 for (idx = 0; idx < nr_clk; idx++, list++) {
444 flags = list->flags;
446 /* catch simple muxes */
447 switch (list->branch_type) {
448 case branch_mux:
449 clk = clk_register_mux(NULL, list->name,
450 list->parent_names, list->num_parents,
451 flags, ctx->reg_base + list->muxdiv_offset,
452 list->mux_shift, list->mux_width,
453 list->mux_flags, &ctx->lock);
454 break;
455 case branch_muxgrf:
456 clk = rockchip_clk_register_muxgrf(list->name,
457 list->parent_names, list->num_parents,
458 flags, ctx->grf, list->muxdiv_offset,
459 list->mux_shift, list->mux_width,
460 list->mux_flags);
461 break;
462 case branch_divider:
463 if (list->div_table)
464 clk = clk_register_divider_table(NULL,
465 list->name, list->parent_names[0],
466 flags,
467 ctx->reg_base + list->muxdiv_offset,
468 list->div_shift, list->div_width,
469 list->div_flags, list->div_table,
470 &ctx->lock);
471 else
472 clk = clk_register_divider(NULL, list->name,
473 list->parent_names[0], flags,
474 ctx->reg_base + list->muxdiv_offset,
475 list->div_shift, list->div_width,
476 list->div_flags, &ctx->lock);
477 break;
478 case branch_fraction_divider:
479 clk = rockchip_clk_register_frac_branch(ctx, list->name,
480 list->parent_names, list->num_parents,
481 ctx->reg_base, list->muxdiv_offset,
482 list->div_flags,
483 list->gate_offset, list->gate_shift,
484 list->gate_flags, flags, list->child,
485 &ctx->lock);
486 break;
487 case branch_gate:
488 flags |= CLK_SET_RATE_PARENT;
490 clk = clk_register_gate(NULL, list->name,
491 list->parent_names[0], flags,
492 ctx->reg_base + list->gate_offset,
493 list->gate_shift, list->gate_flags, &ctx->lock);
494 break;
495 case branch_composite:
496 clk = rockchip_clk_register_branch(list->name,
497 list->parent_names, list->num_parents,
498 ctx->reg_base, list->muxdiv_offset,
499 list->mux_shift,
500 list->mux_width, list->mux_flags,
501 list->div_shift, list->div_width,
502 list->div_flags, list->div_table,
503 list->gate_offset, list->gate_shift,
504 list->gate_flags, flags, &ctx->lock);
505 break;
506 case branch_mmc:
507 clk = rockchip_clk_register_mmc(
508 list->name,
509 list->parent_names, list->num_parents,
510 ctx->reg_base + list->muxdiv_offset,
511 list->div_shift
513 break;
514 case branch_inverter:
515 clk = rockchip_clk_register_inverter(
516 list->name, list->parent_names,
517 list->num_parents,
518 ctx->reg_base + list->muxdiv_offset,
519 list->div_shift, list->div_flags, &ctx->lock);
520 break;
521 case branch_factor:
522 clk = rockchip_clk_register_factor_branch(
523 list->name, list->parent_names,
524 list->num_parents, ctx->reg_base,
525 list->div_shift, list->div_width,
526 list->gate_offset, list->gate_shift,
527 list->gate_flags, flags, &ctx->lock);
528 break;
529 case branch_ddrclk:
530 clk = rockchip_clk_register_ddrclk(
531 list->name, list->flags,
532 list->parent_names, list->num_parents,
533 list->muxdiv_offset, list->mux_shift,
534 list->mux_width, list->div_shift,
535 list->div_width, list->div_flags,
536 ctx->reg_base, &ctx->lock);
537 break;
540 /* none of the cases above matched */
541 if (!clk) {
542 pr_err("%s: unknown clock type %d\n",
543 __func__, list->branch_type);
544 continue;
547 if (IS_ERR(clk)) {
548 pr_err("%s: failed to register clock %s: %ld\n",
549 __func__, list->name, PTR_ERR(clk));
550 continue;
553 rockchip_clk_add_lookup(ctx, clk, list->id);
557 void __init rockchip_clk_register_armclk(struct rockchip_clk_provider *ctx,
558 unsigned int lookup_id,
559 const char *name, const char *const *parent_names,
560 u8 num_parents,
561 const struct rockchip_cpuclk_reg_data *reg_data,
562 const struct rockchip_cpuclk_rate_table *rates,
563 int nrates)
565 struct clk *clk;
567 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
568 reg_data, rates, nrates,
569 ctx->reg_base, &ctx->lock);
570 if (IS_ERR(clk)) {
571 pr_err("%s: failed to register clock %s: %ld\n",
572 __func__, name, PTR_ERR(clk));
573 return;
576 rockchip_clk_add_lookup(ctx, clk, lookup_id);
579 void __init rockchip_clk_protect_critical(const char *const clocks[],
580 int nclocks)
582 int i;
584 /* Protect the clocks that needs to stay on */
585 for (i = 0; i < nclocks; i++) {
586 struct clk *clk = __clk_lookup(clocks[i]);
588 if (clk)
589 clk_prepare_enable(clk);
593 static void __iomem *rst_base;
594 static unsigned int reg_restart;
595 static void (*cb_restart)(void);
596 static int rockchip_restart_notify(struct notifier_block *this,
597 unsigned long mode, void *cmd)
599 if (cb_restart)
600 cb_restart();
602 writel(0xfdb9, rst_base + reg_restart);
603 return NOTIFY_DONE;
606 static struct notifier_block rockchip_restart_handler = {
607 .notifier_call = rockchip_restart_notify,
608 .priority = 128,
611 void __init
612 rockchip_register_restart_notifier(struct rockchip_clk_provider *ctx,
613 unsigned int reg,
614 void (*cb)(void))
616 int ret;
618 rst_base = ctx->reg_base;
619 reg_restart = reg;
620 cb_restart = cb;
621 ret = register_restart_handler(&rockchip_restart_handler);
622 if (ret)
623 pr_err("%s: cannot register restart handler, %d\n",
624 __func__, ret);