Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / clk / baikal-t1 / ccu-div.c
blob4062092d67f905a601e1d8d0856175eaeee0240c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020 BAIKAL ELECTRONICS, JSC
5 * Authors:
6 * Serge Semin <Sergey.Semin@baikalelectronics.ru>
7 * Dmitry Dunaev <dmitry.dunaev@baikalelectronics.ru>
9 * Baikal-T1 CCU Dividers interface driver
12 #define pr_fmt(fmt) "bt1-ccu-div: " fmt
14 #include <linux/kernel.h>
15 #include <linux/printk.h>
16 #include <linux/bits.h>
17 #include <linux/bitfield.h>
18 #include <linux/slab.h>
19 #include <linux/clk-provider.h>
20 #include <linux/of.h>
21 #include <linux/spinlock.h>
22 #include <linux/regmap.h>
23 #include <linux/delay.h>
24 #include <linux/time64.h>
25 #include <linux/debugfs.h>
27 #include "ccu-div.h"
29 #define CCU_DIV_CTL 0x00
30 #define CCU_DIV_CTL_EN BIT(0)
31 #define CCU_DIV_CTL_RST BIT(1)
32 #define CCU_DIV_CTL_SET_CLKDIV BIT(2)
33 #define CCU_DIV_CTL_CLKDIV_FLD 4
34 #define CCU_DIV_CTL_CLKDIV_MASK(_width) \
35 GENMASK((_width) + CCU_DIV_CTL_CLKDIV_FLD - 1, CCU_DIV_CTL_CLKDIV_FLD)
36 #define CCU_DIV_CTL_LOCK_SHIFTED BIT(27)
37 #define CCU_DIV_CTL_LOCK_NORMAL BIT(31)
39 #define CCU_DIV_RST_DELAY_US 1
40 #define CCU_DIV_LOCK_CHECK_RETRIES 50
42 #define CCU_DIV_CLKDIV_MIN 0
43 #define CCU_DIV_CLKDIV_MAX(_mask) \
44 ((_mask) >> CCU_DIV_CTL_CLKDIV_FLD)
47 * Use the next two methods until there are generic field setter and
48 * getter available with non-constant mask support.
50 static inline u32 ccu_div_get(u32 mask, u32 val)
52 return (val & mask) >> CCU_DIV_CTL_CLKDIV_FLD;
55 static inline u32 ccu_div_prep(u32 mask, u32 val)
57 return (val << CCU_DIV_CTL_CLKDIV_FLD) & mask;
60 static inline unsigned long ccu_div_lock_delay_ns(unsigned long ref_clk,
61 unsigned long div)
63 u64 ns = 4ULL * (div ?: 1) * NSEC_PER_SEC;
65 do_div(ns, ref_clk);
67 return ns;
70 static inline unsigned long ccu_div_calc_freq(unsigned long ref_clk,
71 unsigned long div)
73 return ref_clk / (div ?: 1);
76 static int ccu_div_var_update_clkdiv(struct ccu_div *div,
77 unsigned long parent_rate,
78 unsigned long divider)
80 unsigned long nd;
81 u32 val = 0;
82 u32 lock;
83 int count;
85 nd = ccu_div_lock_delay_ns(parent_rate, divider);
87 if (div->features & CCU_DIV_LOCK_SHIFTED)
88 lock = CCU_DIV_CTL_LOCK_SHIFTED;
89 else
90 lock = CCU_DIV_CTL_LOCK_NORMAL;
92 regmap_update_bits(div->sys_regs, div->reg_ctl,
93 CCU_DIV_CTL_SET_CLKDIV, CCU_DIV_CTL_SET_CLKDIV);
96 * Until there is nsec-version of readl_poll_timeout() is available
97 * we have to implement the next polling loop.
99 count = CCU_DIV_LOCK_CHECK_RETRIES;
100 do {
101 ndelay(nd);
102 regmap_read(div->sys_regs, div->reg_ctl, &val);
103 if (val & lock)
104 return 0;
105 } while (--count);
107 return -ETIMEDOUT;
110 static int ccu_div_var_enable(struct clk_hw *hw)
112 struct clk_hw *parent_hw = clk_hw_get_parent(hw);
113 struct ccu_div *div = to_ccu_div(hw);
114 unsigned long flags;
115 u32 val = 0;
116 int ret;
118 if (!parent_hw) {
119 pr_err("Can't enable '%s' with no parent", clk_hw_get_name(hw));
120 return -EINVAL;
123 regmap_read(div->sys_regs, div->reg_ctl, &val);
124 if (val & CCU_DIV_CTL_EN)
125 return 0;
127 spin_lock_irqsave(&div->lock, flags);
128 ret = ccu_div_var_update_clkdiv(div, clk_hw_get_rate(parent_hw),
129 ccu_div_get(div->mask, val));
130 if (!ret)
131 regmap_update_bits(div->sys_regs, div->reg_ctl,
132 CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
133 spin_unlock_irqrestore(&div->lock, flags);
134 if (ret)
135 pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
137 return ret;
140 static int ccu_div_gate_enable(struct clk_hw *hw)
142 struct ccu_div *div = to_ccu_div(hw);
143 unsigned long flags;
145 spin_lock_irqsave(&div->lock, flags);
146 regmap_update_bits(div->sys_regs, div->reg_ctl,
147 CCU_DIV_CTL_EN, CCU_DIV_CTL_EN);
148 spin_unlock_irqrestore(&div->lock, flags);
150 return 0;
153 static void ccu_div_gate_disable(struct clk_hw *hw)
155 struct ccu_div *div = to_ccu_div(hw);
156 unsigned long flags;
158 spin_lock_irqsave(&div->lock, flags);
159 regmap_update_bits(div->sys_regs, div->reg_ctl, CCU_DIV_CTL_EN, 0);
160 spin_unlock_irqrestore(&div->lock, flags);
163 static int ccu_div_gate_is_enabled(struct clk_hw *hw)
165 struct ccu_div *div = to_ccu_div(hw);
166 u32 val = 0;
168 regmap_read(div->sys_regs, div->reg_ctl, &val);
170 return !!(val & CCU_DIV_CTL_EN);
173 static unsigned long ccu_div_var_recalc_rate(struct clk_hw *hw,
174 unsigned long parent_rate)
176 struct ccu_div *div = to_ccu_div(hw);
177 unsigned long divider;
178 u32 val = 0;
180 regmap_read(div->sys_regs, div->reg_ctl, &val);
181 divider = ccu_div_get(div->mask, val);
183 return ccu_div_calc_freq(parent_rate, divider);
186 static inline unsigned long ccu_div_var_calc_divider(unsigned long rate,
187 unsigned long parent_rate,
188 unsigned int mask)
190 unsigned long divider;
192 divider = parent_rate / rate;
193 return clamp_t(unsigned long, divider, CCU_DIV_CLKDIV_MIN,
194 CCU_DIV_CLKDIV_MAX(mask));
197 static long ccu_div_var_round_rate(struct clk_hw *hw, unsigned long rate,
198 unsigned long *parent_rate)
200 struct ccu_div *div = to_ccu_div(hw);
201 unsigned long divider;
203 divider = ccu_div_var_calc_divider(rate, *parent_rate, div->mask);
205 return ccu_div_calc_freq(*parent_rate, divider);
209 * This method is used for the clock divider blocks, which support the
210 * on-the-fly rate change. So due to lacking the EN bit functionality
211 * they can't be gated before the rate adjustment.
213 static int ccu_div_var_set_rate_slow(struct clk_hw *hw, unsigned long rate,
214 unsigned long parent_rate)
216 struct ccu_div *div = to_ccu_div(hw);
217 unsigned long flags, divider;
218 u32 val;
219 int ret;
221 divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
222 if (divider == 1 && div->features & CCU_DIV_SKIP_ONE) {
223 divider = 0;
224 } else if (div->features & CCU_DIV_SKIP_ONE_TO_THREE) {
225 if (divider == 1 || divider == 2)
226 divider = 0;
227 else if (divider == 3)
228 divider = 4;
231 val = ccu_div_prep(div->mask, divider);
233 spin_lock_irqsave(&div->lock, flags);
234 regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, val);
235 ret = ccu_div_var_update_clkdiv(div, parent_rate, divider);
236 spin_unlock_irqrestore(&div->lock, flags);
237 if (ret)
238 pr_err("Divider '%s' lock timed out\n", clk_hw_get_name(hw));
240 return ret;
244 * This method is used for the clock divider blocks, which don't support
245 * the on-the-fly rate change.
247 static int ccu_div_var_set_rate_fast(struct clk_hw *hw, unsigned long rate,
248 unsigned long parent_rate)
250 struct ccu_div *div = to_ccu_div(hw);
251 unsigned long flags, divider;
252 u32 val;
254 divider = ccu_div_var_calc_divider(rate, parent_rate, div->mask);
255 val = ccu_div_prep(div->mask, divider);
258 * Also disable the clock divider block if it was enabled by default
259 * or by the bootloader.
261 spin_lock_irqsave(&div->lock, flags);
262 regmap_update_bits(div->sys_regs, div->reg_ctl,
263 div->mask | CCU_DIV_CTL_EN, val);
264 spin_unlock_irqrestore(&div->lock, flags);
266 return 0;
269 static unsigned long ccu_div_fixed_recalc_rate(struct clk_hw *hw,
270 unsigned long parent_rate)
272 struct ccu_div *div = to_ccu_div(hw);
274 return ccu_div_calc_freq(parent_rate, div->divider);
277 static long ccu_div_fixed_round_rate(struct clk_hw *hw, unsigned long rate,
278 unsigned long *parent_rate)
280 struct ccu_div *div = to_ccu_div(hw);
282 return ccu_div_calc_freq(*parent_rate, div->divider);
285 static int ccu_div_fixed_set_rate(struct clk_hw *hw, unsigned long rate,
286 unsigned long parent_rate)
288 return 0;
291 int ccu_div_reset_domain(struct ccu_div *div)
293 unsigned long flags;
295 if (!div || !(div->features & CCU_DIV_RESET_DOMAIN))
296 return -EINVAL;
298 spin_lock_irqsave(&div->lock, flags);
299 regmap_update_bits(div->sys_regs, div->reg_ctl,
300 CCU_DIV_CTL_RST, CCU_DIV_CTL_RST);
301 spin_unlock_irqrestore(&div->lock, flags);
303 /* The next delay must be enough to cover all the resets. */
304 udelay(CCU_DIV_RST_DELAY_US);
306 return 0;
309 #ifdef CONFIG_DEBUG_FS
311 struct ccu_div_dbgfs_bit {
312 struct ccu_div *div;
313 const char *name;
314 u32 mask;
317 #define CCU_DIV_DBGFS_BIT_ATTR(_name, _mask) { \
318 .name = _name, \
319 .mask = _mask \
322 static const struct ccu_div_dbgfs_bit ccu_div_bits[] = {
323 CCU_DIV_DBGFS_BIT_ATTR("div_en", CCU_DIV_CTL_EN),
324 CCU_DIV_DBGFS_BIT_ATTR("div_rst", CCU_DIV_CTL_RST),
325 CCU_DIV_DBGFS_BIT_ATTR("div_bypass", CCU_DIV_CTL_SET_CLKDIV),
326 CCU_DIV_DBGFS_BIT_ATTR("div_lock", CCU_DIV_CTL_LOCK_NORMAL)
329 #define CCU_DIV_DBGFS_BIT_NUM ARRAY_SIZE(ccu_div_bits)
332 * It can be dangerous to change the Divider settings behind clock framework
333 * back, therefore we don't provide any kernel config based compile time option
334 * for this feature to enable.
336 #undef CCU_DIV_ALLOW_WRITE_DEBUGFS
337 #ifdef CCU_DIV_ALLOW_WRITE_DEBUGFS
339 static int ccu_div_dbgfs_bit_set(void *priv, u64 val)
341 const struct ccu_div_dbgfs_bit *bit = priv;
342 struct ccu_div *div = bit->div;
343 unsigned long flags;
345 spin_lock_irqsave(&div->lock, flags);
346 regmap_update_bits(div->sys_regs, div->reg_ctl,
347 bit->mask, val ? bit->mask : 0);
348 spin_unlock_irqrestore(&div->lock, flags);
350 return 0;
353 static int ccu_div_dbgfs_var_clkdiv_set(void *priv, u64 val)
355 struct ccu_div *div = priv;
356 unsigned long flags;
357 u32 data;
359 val = clamp_t(u64, val, CCU_DIV_CLKDIV_MIN,
360 CCU_DIV_CLKDIV_MAX(div->mask));
361 data = ccu_div_prep(div->mask, val);
363 spin_lock_irqsave(&div->lock, flags);
364 regmap_update_bits(div->sys_regs, div->reg_ctl, div->mask, data);
365 spin_unlock_irqrestore(&div->lock, flags);
367 return 0;
370 #define ccu_div_dbgfs_mode 0644
372 #else /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
374 #define ccu_div_dbgfs_bit_set NULL
375 #define ccu_div_dbgfs_var_clkdiv_set NULL
376 #define ccu_div_dbgfs_mode 0444
378 #endif /* !CCU_DIV_ALLOW_WRITE_DEBUGFS */
380 static int ccu_div_dbgfs_bit_get(void *priv, u64 *val)
382 const struct ccu_div_dbgfs_bit *bit = priv;
383 struct ccu_div *div = bit->div;
384 u32 data = 0;
386 regmap_read(div->sys_regs, div->reg_ctl, &data);
387 *val = !!(data & bit->mask);
389 return 0;
391 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_bit_fops,
392 ccu_div_dbgfs_bit_get, ccu_div_dbgfs_bit_set, "%llu\n");
394 static int ccu_div_dbgfs_var_clkdiv_get(void *priv, u64 *val)
396 struct ccu_div *div = priv;
397 u32 data = 0;
399 regmap_read(div->sys_regs, div->reg_ctl, &data);
400 *val = ccu_div_get(div->mask, data);
402 return 0;
404 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_var_clkdiv_fops,
405 ccu_div_dbgfs_var_clkdiv_get, ccu_div_dbgfs_var_clkdiv_set, "%llu\n");
407 static int ccu_div_dbgfs_fixed_clkdiv_get(void *priv, u64 *val)
409 struct ccu_div *div = priv;
411 *val = div->divider;
413 return 0;
415 DEFINE_DEBUGFS_ATTRIBUTE(ccu_div_dbgfs_fixed_clkdiv_fops,
416 ccu_div_dbgfs_fixed_clkdiv_get, NULL, "%llu\n");
418 static void ccu_div_var_debug_init(struct clk_hw *hw, struct dentry *dentry)
420 struct ccu_div *div = to_ccu_div(hw);
421 struct ccu_div_dbgfs_bit *bits;
422 int didx, bidx, num = 2;
423 const char *name;
425 num += !!(div->flags & CLK_SET_RATE_GATE) +
426 !!(div->features & CCU_DIV_RESET_DOMAIN);
428 bits = kcalloc(num, sizeof(*bits), GFP_KERNEL);
429 if (!bits)
430 return;
432 for (didx = 0, bidx = 0; bidx < CCU_DIV_DBGFS_BIT_NUM; ++bidx) {
433 name = ccu_div_bits[bidx].name;
434 if (!(div->flags & CLK_SET_RATE_GATE) &&
435 !strcmp("div_en", name)) {
436 continue;
439 if (!(div->features & CCU_DIV_RESET_DOMAIN) &&
440 !strcmp("div_rst", name)) {
441 continue;
444 bits[didx] = ccu_div_bits[bidx];
445 bits[didx].div = div;
447 if (div->features & CCU_DIV_LOCK_SHIFTED &&
448 !strcmp("div_lock", name)) {
449 bits[didx].mask = CCU_DIV_CTL_LOCK_SHIFTED;
452 debugfs_create_file_unsafe(bits[didx].name, ccu_div_dbgfs_mode,
453 dentry, &bits[didx],
454 &ccu_div_dbgfs_bit_fops);
455 ++didx;
458 debugfs_create_file_unsafe("div_clkdiv", ccu_div_dbgfs_mode, dentry,
459 div, &ccu_div_dbgfs_var_clkdiv_fops);
462 static void ccu_div_gate_debug_init(struct clk_hw *hw, struct dentry *dentry)
464 struct ccu_div *div = to_ccu_div(hw);
465 struct ccu_div_dbgfs_bit *bit;
467 bit = kmalloc(sizeof(*bit), GFP_KERNEL);
468 if (!bit)
469 return;
471 *bit = ccu_div_bits[0];
472 bit->div = div;
473 debugfs_create_file_unsafe(bit->name, ccu_div_dbgfs_mode, dentry, bit,
474 &ccu_div_dbgfs_bit_fops);
476 debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
477 &ccu_div_dbgfs_fixed_clkdiv_fops);
480 static void ccu_div_fixed_debug_init(struct clk_hw *hw, struct dentry *dentry)
482 struct ccu_div *div = to_ccu_div(hw);
484 debugfs_create_file_unsafe("div_clkdiv", 0400, dentry, div,
485 &ccu_div_dbgfs_fixed_clkdiv_fops);
488 #else /* !CONFIG_DEBUG_FS */
490 #define ccu_div_var_debug_init NULL
491 #define ccu_div_gate_debug_init NULL
492 #define ccu_div_fixed_debug_init NULL
494 #endif /* !CONFIG_DEBUG_FS */
496 static const struct clk_ops ccu_div_var_gate_to_set_ops = {
497 .enable = ccu_div_var_enable,
498 .disable = ccu_div_gate_disable,
499 .is_enabled = ccu_div_gate_is_enabled,
500 .recalc_rate = ccu_div_var_recalc_rate,
501 .round_rate = ccu_div_var_round_rate,
502 .set_rate = ccu_div_var_set_rate_fast,
503 .debug_init = ccu_div_var_debug_init
506 static const struct clk_ops ccu_div_var_nogate_ops = {
507 .recalc_rate = ccu_div_var_recalc_rate,
508 .round_rate = ccu_div_var_round_rate,
509 .set_rate = ccu_div_var_set_rate_slow,
510 .debug_init = ccu_div_var_debug_init
513 static const struct clk_ops ccu_div_gate_ops = {
514 .enable = ccu_div_gate_enable,
515 .disable = ccu_div_gate_disable,
516 .is_enabled = ccu_div_gate_is_enabled,
517 .recalc_rate = ccu_div_fixed_recalc_rate,
518 .round_rate = ccu_div_fixed_round_rate,
519 .set_rate = ccu_div_fixed_set_rate,
520 .debug_init = ccu_div_gate_debug_init
523 static const struct clk_ops ccu_div_fixed_ops = {
524 .recalc_rate = ccu_div_fixed_recalc_rate,
525 .round_rate = ccu_div_fixed_round_rate,
526 .set_rate = ccu_div_fixed_set_rate,
527 .debug_init = ccu_div_fixed_debug_init
530 struct ccu_div *ccu_div_hw_register(const struct ccu_div_init_data *div_init)
532 struct clk_parent_data parent_data = { };
533 struct clk_init_data hw_init = { };
534 struct ccu_div *div;
535 int ret;
537 if (!div_init)
538 return ERR_PTR(-EINVAL);
540 div = kzalloc(sizeof(*div), GFP_KERNEL);
541 if (!div)
542 return ERR_PTR(-ENOMEM);
545 * Note since Baikal-T1 System Controller registers are MMIO-backed
546 * we won't check the regmap IO operations return status, because it
547 * must be zero anyway.
549 div->hw.init = &hw_init;
550 div->id = div_init->id;
551 div->reg_ctl = div_init->base + CCU_DIV_CTL;
552 div->sys_regs = div_init->sys_regs;
553 div->flags = div_init->flags;
554 div->features = div_init->features;
555 spin_lock_init(&div->lock);
557 hw_init.name = div_init->name;
558 hw_init.flags = div_init->flags;
560 if (div_init->type == CCU_DIV_VAR) {
561 if (hw_init.flags & CLK_SET_RATE_GATE)
562 hw_init.ops = &ccu_div_var_gate_to_set_ops;
563 else
564 hw_init.ops = &ccu_div_var_nogate_ops;
565 div->mask = CCU_DIV_CTL_CLKDIV_MASK(div_init->width);
566 } else if (div_init->type == CCU_DIV_GATE) {
567 hw_init.ops = &ccu_div_gate_ops;
568 div->divider = div_init->divider;
569 } else if (div_init->type == CCU_DIV_FIXED) {
570 hw_init.ops = &ccu_div_fixed_ops;
571 div->divider = div_init->divider;
572 } else {
573 ret = -EINVAL;
574 goto err_free_div;
577 if (!div_init->parent_name) {
578 ret = -EINVAL;
579 goto err_free_div;
581 parent_data.fw_name = div_init->parent_name;
582 hw_init.parent_data = &parent_data;
583 hw_init.num_parents = 1;
585 ret = of_clk_hw_register(div_init->np, &div->hw);
586 if (ret)
587 goto err_free_div;
589 return div;
591 err_free_div:
592 kfree(div);
594 return ERR_PTR(ret);
597 void ccu_div_hw_unregister(struct ccu_div *div)
599 clk_hw_unregister(&div->hw);
601 kfree(div);