of: MSI: Simplify irqdomain lookup
[linux/fpc-iii.git] / drivers / clk / rockchip / clk.c
blobbe6c7fd8315df99de06c81ce66118761326a9290
1 /*
2 * Copyright (c) 2014 MundoReader S.L.
3 * Author: Heiko Stuebner <heiko@sntech.de>
5 * based on
7 * samsung/clk.c
8 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
9 * Copyright (c) 2013 Linaro Ltd.
10 * Author: Thomas Abraham <thomas.ab@samsung.com>
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
23 #include <linux/slab.h>
24 #include <linux/clk.h>
25 #include <linux/clk-provider.h>
26 #include <linux/mfd/syscon.h>
27 #include <linux/regmap.h>
28 #include <linux/reboot.h>
29 #include "clk.h"
31 /**
32 * Register a clock branch.
33 * Most clock branches have a form like
35 * src1 --|--\
36 * |M |--[GATE]-[DIV]-
37 * src2 --|--/
39 * sometimes without one of those components.
41 static struct clk *rockchip_clk_register_branch(const char *name,
42 const char *const *parent_names, u8 num_parents, void __iomem *base,
43 int muxdiv_offset, u8 mux_shift, u8 mux_width, u8 mux_flags,
44 u8 div_shift, u8 div_width, u8 div_flags,
45 struct clk_div_table *div_table, int gate_offset,
46 u8 gate_shift, u8 gate_flags, unsigned long flags,
47 spinlock_t *lock)
49 struct clk *clk;
50 struct clk_mux *mux = NULL;
51 struct clk_gate *gate = NULL;
52 struct clk_divider *div = NULL;
53 const struct clk_ops *mux_ops = NULL, *div_ops = NULL,
54 *gate_ops = NULL;
56 if (num_parents > 1) {
57 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
58 if (!mux)
59 return ERR_PTR(-ENOMEM);
61 mux->reg = base + muxdiv_offset;
62 mux->shift = mux_shift;
63 mux->mask = BIT(mux_width) - 1;
64 mux->flags = mux_flags;
65 mux->lock = lock;
66 mux_ops = (mux_flags & CLK_MUX_READ_ONLY) ? &clk_mux_ro_ops
67 : &clk_mux_ops;
70 if (gate_offset >= 0) {
71 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
72 if (!gate)
73 return ERR_PTR(-ENOMEM);
75 gate->flags = gate_flags;
76 gate->reg = base + gate_offset;
77 gate->bit_idx = gate_shift;
78 gate->lock = lock;
79 gate_ops = &clk_gate_ops;
82 if (div_width > 0) {
83 div = kzalloc(sizeof(*div), GFP_KERNEL);
84 if (!div)
85 return ERR_PTR(-ENOMEM);
87 div->flags = div_flags;
88 div->reg = base + muxdiv_offset;
89 div->shift = div_shift;
90 div->width = div_width;
91 div->lock = lock;
92 div->table = div_table;
93 div_ops = &clk_divider_ops;
96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
97 mux ? &mux->hw : NULL, mux_ops,
98 div ? &div->hw : NULL, div_ops,
99 gate ? &gate->hw : NULL, gate_ops,
100 flags);
102 return clk;
105 static struct clk *rockchip_clk_register_frac_branch(const char *name,
106 const char *const *parent_names, u8 num_parents,
107 void __iomem *base, int muxdiv_offset, u8 div_flags,
108 int gate_offset, u8 gate_shift, u8 gate_flags,
109 unsigned long flags, spinlock_t *lock)
111 struct clk *clk;
112 struct clk_gate *gate = NULL;
113 struct clk_fractional_divider *div = NULL;
114 const struct clk_ops *div_ops = NULL, *gate_ops = NULL;
116 if (gate_offset >= 0) {
117 gate = kzalloc(sizeof(*gate), GFP_KERNEL);
118 if (!gate)
119 return ERR_PTR(-ENOMEM);
121 gate->flags = gate_flags;
122 gate->reg = base + gate_offset;
123 gate->bit_idx = gate_shift;
124 gate->lock = lock;
125 gate_ops = &clk_gate_ops;
128 if (muxdiv_offset < 0)
129 return ERR_PTR(-EINVAL);
131 div = kzalloc(sizeof(*div), GFP_KERNEL);
132 if (!div)
133 return ERR_PTR(-ENOMEM);
135 div->flags = div_flags;
136 div->reg = base + muxdiv_offset;
137 div->mshift = 16;
138 div->mwidth = 16;
139 div->mmask = GENMASK(div->mwidth - 1, 0) << div->mshift;
140 div->nshift = 0;
141 div->nwidth = 16;
142 div->nmask = GENMASK(div->nwidth - 1, 0) << div->nshift;
143 div->lock = lock;
144 div_ops = &clk_fractional_divider_ops;
146 clk = clk_register_composite(NULL, name, parent_names, num_parents,
147 NULL, NULL,
148 &div->hw, div_ops,
149 gate ? &gate->hw : NULL, gate_ops,
150 flags);
152 return clk;
155 static DEFINE_SPINLOCK(clk_lock);
156 static struct clk **clk_table;
157 static void __iomem *reg_base;
158 static struct clk_onecell_data clk_data;
159 static struct device_node *cru_node;
160 static struct regmap *grf;
162 void __init rockchip_clk_init(struct device_node *np, void __iomem *base,
163 unsigned long nr_clks)
165 reg_base = base;
166 cru_node = np;
167 grf = ERR_PTR(-EPROBE_DEFER);
169 clk_table = kcalloc(nr_clks, sizeof(struct clk *), GFP_KERNEL);
170 if (!clk_table)
171 pr_err("%s: could not allocate clock lookup table\n", __func__);
173 clk_data.clks = clk_table;
174 clk_data.clk_num = nr_clks;
175 of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data);
178 struct regmap *rockchip_clk_get_grf(void)
180 if (IS_ERR(grf))
181 grf = syscon_regmap_lookup_by_phandle(cru_node, "rockchip,grf");
182 return grf;
185 void rockchip_clk_add_lookup(struct clk *clk, unsigned int id)
187 if (clk_table && id)
188 clk_table[id] = clk;
191 void __init rockchip_clk_register_plls(struct rockchip_pll_clock *list,
192 unsigned int nr_pll, int grf_lock_offset)
194 struct clk *clk;
195 int idx;
197 for (idx = 0; idx < nr_pll; idx++, list++) {
198 clk = rockchip_clk_register_pll(list->type, list->name,
199 list->parent_names, list->num_parents,
200 reg_base, list->con_offset, grf_lock_offset,
201 list->lock_shift, list->mode_offset,
202 list->mode_shift, list->rate_table,
203 list->pll_flags, &clk_lock);
204 if (IS_ERR(clk)) {
205 pr_err("%s: failed to register clock %s\n", __func__,
206 list->name);
207 continue;
210 rockchip_clk_add_lookup(clk, list->id);
214 void __init rockchip_clk_register_branches(
215 struct rockchip_clk_branch *list,
216 unsigned int nr_clk)
218 struct clk *clk = NULL;
219 unsigned int idx;
220 unsigned long flags;
222 for (idx = 0; idx < nr_clk; idx++, list++) {
223 flags = list->flags;
225 /* catch simple muxes */
226 switch (list->branch_type) {
227 case branch_mux:
228 clk = clk_register_mux(NULL, list->name,
229 list->parent_names, list->num_parents,
230 flags, reg_base + list->muxdiv_offset,
231 list->mux_shift, list->mux_width,
232 list->mux_flags, &clk_lock);
233 break;
234 case branch_divider:
235 if (list->div_table)
236 clk = clk_register_divider_table(NULL,
237 list->name, list->parent_names[0],
238 flags, reg_base + list->muxdiv_offset,
239 list->div_shift, list->div_width,
240 list->div_flags, list->div_table,
241 &clk_lock);
242 else
243 clk = clk_register_divider(NULL, list->name,
244 list->parent_names[0], flags,
245 reg_base + list->muxdiv_offset,
246 list->div_shift, list->div_width,
247 list->div_flags, &clk_lock);
248 break;
249 case branch_fraction_divider:
250 clk = rockchip_clk_register_frac_branch(list->name,
251 list->parent_names, list->num_parents,
252 reg_base, list->muxdiv_offset, list->div_flags,
253 list->gate_offset, list->gate_shift,
254 list->gate_flags, flags, &clk_lock);
255 break;
256 case branch_gate:
257 flags |= CLK_SET_RATE_PARENT;
259 clk = clk_register_gate(NULL, list->name,
260 list->parent_names[0], flags,
261 reg_base + list->gate_offset,
262 list->gate_shift, list->gate_flags, &clk_lock);
263 break;
264 case branch_composite:
265 clk = rockchip_clk_register_branch(list->name,
266 list->parent_names, list->num_parents,
267 reg_base, list->muxdiv_offset, list->mux_shift,
268 list->mux_width, list->mux_flags,
269 list->div_shift, list->div_width,
270 list->div_flags, list->div_table,
271 list->gate_offset, list->gate_shift,
272 list->gate_flags, flags, &clk_lock);
273 break;
274 case branch_mmc:
275 clk = rockchip_clk_register_mmc(
276 list->name,
277 list->parent_names, list->num_parents,
278 reg_base + list->muxdiv_offset,
279 list->div_shift
281 break;
282 case branch_inverter:
283 clk = rockchip_clk_register_inverter(
284 list->name, list->parent_names,
285 list->num_parents,
286 reg_base + list->muxdiv_offset,
287 list->div_shift, list->div_flags, &clk_lock);
288 break;
291 /* none of the cases above matched */
292 if (!clk) {
293 pr_err("%s: unknown clock type %d\n",
294 __func__, list->branch_type);
295 continue;
298 if (IS_ERR(clk)) {
299 pr_err("%s: failed to register clock %s: %ld\n",
300 __func__, list->name, PTR_ERR(clk));
301 continue;
304 rockchip_clk_add_lookup(clk, list->id);
308 void __init rockchip_clk_register_armclk(unsigned int lookup_id,
309 const char *name, const char *const *parent_names,
310 u8 num_parents,
311 const struct rockchip_cpuclk_reg_data *reg_data,
312 const struct rockchip_cpuclk_rate_table *rates,
313 int nrates)
315 struct clk *clk;
317 clk = rockchip_clk_register_cpuclk(name, parent_names, num_parents,
318 reg_data, rates, nrates, reg_base,
319 &clk_lock);
320 if (IS_ERR(clk)) {
321 pr_err("%s: failed to register clock %s: %ld\n",
322 __func__, name, PTR_ERR(clk));
323 return;
326 rockchip_clk_add_lookup(clk, lookup_id);
329 void __init rockchip_clk_protect_critical(const char *const clocks[],
330 int nclocks)
332 int i;
334 /* Protect the clocks that needs to stay on */
335 for (i = 0; i < nclocks; i++) {
336 struct clk *clk = __clk_lookup(clocks[i]);
338 if (clk)
339 clk_prepare_enable(clk);
343 static unsigned int reg_restart;
344 static int rockchip_restart_notify(struct notifier_block *this,
345 unsigned long mode, void *cmd)
347 writel(0xfdb9, reg_base + reg_restart);
348 return NOTIFY_DONE;
351 static struct notifier_block rockchip_restart_handler = {
352 .notifier_call = rockchip_restart_notify,
353 .priority = 128,
356 void __init rockchip_register_restart_notifier(unsigned int reg)
358 int ret;
360 reg_restart = reg;
361 ret = register_restart_handler(&rockchip_restart_handler);
362 if (ret)
363 pr_err("%s: cannot register restart handler, %d\n",
364 __func__, ret);