Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / clk / imx / clk-divider-gate.c
blob0322a843d24506f5680a36d36a5a217bda5fbb6a
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * Copyright 2018 NXP.
4 * Dong Aisheng <aisheng.dong@nxp.com>
5 */
7 #include <linux/clk-provider.h>
8 #include <linux/err.h>
9 #include <linux/io.h>
10 #include <linux/slab.h>
12 #include "clk.h"
14 struct clk_divider_gate {
15 struct clk_divider divider;
16 u32 cached_val;
19 static inline struct clk_divider_gate *to_clk_divider_gate(struct clk_hw *hw)
21 struct clk_divider *div = to_clk_divider(hw);
23 return container_of(div, struct clk_divider_gate, divider);
26 static unsigned long clk_divider_gate_recalc_rate_ro(struct clk_hw *hw,
27 unsigned long parent_rate)
29 struct clk_divider *div = to_clk_divider(hw);
30 unsigned int val;
32 val = readl(div->reg) >> div->shift;
33 val &= clk_div_mask(div->width);
34 if (!val)
35 return 0;
37 return divider_recalc_rate(hw, parent_rate, val, div->table,
38 div->flags, div->width);
41 static unsigned long clk_divider_gate_recalc_rate(struct clk_hw *hw,
42 unsigned long parent_rate)
44 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
45 struct clk_divider *div = to_clk_divider(hw);
46 unsigned long flags;
47 unsigned int val;
49 spin_lock_irqsave(div->lock, flags);
51 if (!clk_hw_is_enabled(hw)) {
52 val = div_gate->cached_val;
53 } else {
54 val = readl(div->reg) >> div->shift;
55 val &= clk_div_mask(div->width);
58 spin_unlock_irqrestore(div->lock, flags);
60 if (!val)
61 return 0;
63 return divider_recalc_rate(hw, parent_rate, val, div->table,
64 div->flags, div->width);
67 static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
68 unsigned long *prate)
70 return clk_divider_ops.round_rate(hw, rate, prate);
73 static int clk_divider_gate_set_rate(struct clk_hw *hw, unsigned long rate,
74 unsigned long parent_rate)
76 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
77 struct clk_divider *div = to_clk_divider(hw);
78 unsigned long flags;
79 int value;
80 u32 val;
82 value = divider_get_val(rate, parent_rate, div->table,
83 div->width, div->flags);
84 if (value < 0)
85 return value;
87 spin_lock_irqsave(div->lock, flags);
89 if (clk_hw_is_enabled(hw)) {
90 val = readl(div->reg);
91 val &= ~(clk_div_mask(div->width) << div->shift);
92 val |= (u32)value << div->shift;
93 writel(val, div->reg);
94 } else {
95 div_gate->cached_val = value;
98 spin_unlock_irqrestore(div->lock, flags);
100 return 0;
103 static int clk_divider_enable(struct clk_hw *hw)
105 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
106 struct clk_divider *div = to_clk_divider(hw);
107 unsigned long flags;
108 u32 val;
110 if (!div_gate->cached_val) {
111 pr_err("%s: no valid preset rate\n", clk_hw_get_name(hw));
112 return -EINVAL;
115 spin_lock_irqsave(div->lock, flags);
116 /* restore div val */
117 val = readl(div->reg);
118 val |= div_gate->cached_val << div->shift;
119 writel(val, div->reg);
121 spin_unlock_irqrestore(div->lock, flags);
123 return 0;
126 static void clk_divider_disable(struct clk_hw *hw)
128 struct clk_divider_gate *div_gate = to_clk_divider_gate(hw);
129 struct clk_divider *div = to_clk_divider(hw);
130 unsigned long flags;
131 u32 val;
133 spin_lock_irqsave(div->lock, flags);
135 /* store the current div val */
136 val = readl(div->reg) >> div->shift;
137 val &= clk_div_mask(div->width);
138 div_gate->cached_val = val;
139 writel(0, div->reg);
141 spin_unlock_irqrestore(div->lock, flags);
144 static int clk_divider_is_enabled(struct clk_hw *hw)
146 struct clk_divider *div = to_clk_divider(hw);
147 u32 val;
149 val = readl(div->reg) >> div->shift;
150 val &= clk_div_mask(div->width);
152 return val ? 1 : 0;
155 static const struct clk_ops clk_divider_gate_ro_ops = {
156 .recalc_rate = clk_divider_gate_recalc_rate_ro,
157 .round_rate = clk_divider_round_rate,
160 static const struct clk_ops clk_divider_gate_ops = {
161 .recalc_rate = clk_divider_gate_recalc_rate,
162 .round_rate = clk_divider_round_rate,
163 .set_rate = clk_divider_gate_set_rate,
164 .enable = clk_divider_enable,
165 .disable = clk_divider_disable,
166 .is_enabled = clk_divider_is_enabled,
170 * NOTE: In order to reuse the most code from the common divider,
171 * we also design our divider following the way that provids an extra
172 * clk_divider_flags, however it's fixed to CLK_DIVIDER_ONE_BASED by
173 * default as our HW is. Besides that it supports only CLK_DIVIDER_READ_ONLY
174 * flag which can be specified by user flexibly.
176 struct clk_hw *imx_clk_hw_divider_gate(const char *name, const char *parent_name,
177 unsigned long flags, void __iomem *reg,
178 u8 shift, u8 width, u8 clk_divider_flags,
179 const struct clk_div_table *table,
180 spinlock_t *lock)
182 struct clk_init_data init;
183 struct clk_divider_gate *div_gate;
184 struct clk_hw *hw;
185 u32 val;
186 int ret;
188 div_gate = kzalloc(sizeof(*div_gate), GFP_KERNEL);
189 if (!div_gate)
190 return ERR_PTR(-ENOMEM);
192 init.name = name;
193 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY)
194 init.ops = &clk_divider_gate_ro_ops;
195 else
196 init.ops = &clk_divider_gate_ops;
197 init.flags = flags;
198 init.parent_names = parent_name ? &parent_name : NULL;
199 init.num_parents = parent_name ? 1 : 0;
201 div_gate->divider.reg = reg;
202 div_gate->divider.shift = shift;
203 div_gate->divider.width = width;
204 div_gate->divider.lock = lock;
205 div_gate->divider.table = table;
206 div_gate->divider.hw.init = &init;
207 div_gate->divider.flags = CLK_DIVIDER_ONE_BASED | clk_divider_flags;
208 /* cache gate status */
209 val = readl(reg) >> shift;
210 val &= clk_div_mask(width);
211 div_gate->cached_val = val;
213 hw = &div_gate->divider.hw;
214 ret = clk_hw_register(NULL, hw);
215 if (ret) {
216 kfree(div_gate);
217 hw = ERR_PTR(ret);
220 return hw;