sh_eth: fix EESIPR values for SH77{34|63}
[linux/fpc-iii.git] / drivers / clk / ti / dpll.c
blob4b9a419d8e14133d04bb405b84d79da9b6bc5be5
1 /*
2 * OMAP DPLL clock support
4 * Copyright (C) 2013 Texas Instruments, Inc.
6 * Tero Kristo <t-kristo@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/clk-provider.h>
20 #include <linux/slab.h>
21 #include <linux/err.h>
22 #include <linux/of.h>
23 #include <linux/of_address.h>
24 #include <linux/clk/ti.h>
25 #include "clock.h"
27 #undef pr_fmt
28 #define pr_fmt(fmt) "%s: " fmt, __func__
30 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
31 defined(CONFIG_SOC_DRA7XX)
32 static const struct clk_ops dpll_m4xen_ck_ops = {
33 .enable = &omap3_noncore_dpll_enable,
34 .disable = &omap3_noncore_dpll_disable,
35 .recalc_rate = &omap4_dpll_regm4xen_recalc,
36 .round_rate = &omap4_dpll_regm4xen_round_rate,
37 .set_rate = &omap3_noncore_dpll_set_rate,
38 .set_parent = &omap3_noncore_dpll_set_parent,
39 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
40 .determine_rate = &omap4_dpll_regm4xen_determine_rate,
41 .get_parent = &omap2_init_dpll_parent,
43 #else
44 static const struct clk_ops dpll_m4xen_ck_ops = {};
45 #endif
47 #if defined(CONFIG_ARCH_OMAP3) || defined(CONFIG_ARCH_OMAP4) || \
48 defined(CONFIG_SOC_OMAP5) || defined(CONFIG_SOC_DRA7XX) || \
49 defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
50 static const struct clk_ops dpll_core_ck_ops = {
51 .recalc_rate = &omap3_dpll_recalc,
52 .get_parent = &omap2_init_dpll_parent,
55 static const struct clk_ops dpll_ck_ops = {
56 .enable = &omap3_noncore_dpll_enable,
57 .disable = &omap3_noncore_dpll_disable,
58 .recalc_rate = &omap3_dpll_recalc,
59 .round_rate = &omap2_dpll_round_rate,
60 .set_rate = &omap3_noncore_dpll_set_rate,
61 .set_parent = &omap3_noncore_dpll_set_parent,
62 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
63 .determine_rate = &omap3_noncore_dpll_determine_rate,
64 .get_parent = &omap2_init_dpll_parent,
67 static const struct clk_ops dpll_no_gate_ck_ops = {
68 .recalc_rate = &omap3_dpll_recalc,
69 .get_parent = &omap2_init_dpll_parent,
70 .round_rate = &omap2_dpll_round_rate,
71 .set_rate = &omap3_noncore_dpll_set_rate,
72 .set_parent = &omap3_noncore_dpll_set_parent,
73 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
74 .determine_rate = &omap3_noncore_dpll_determine_rate,
76 #else
77 static const struct clk_ops dpll_core_ck_ops = {};
78 static const struct clk_ops dpll_ck_ops = {};
79 static const struct clk_ops dpll_no_gate_ck_ops = {};
80 const struct clk_hw_omap_ops clkhwops_omap3_dpll = {};
81 #endif
83 #ifdef CONFIG_ARCH_OMAP2
84 static const struct clk_ops omap2_dpll_core_ck_ops = {
85 .get_parent = &omap2_init_dpll_parent,
86 .recalc_rate = &omap2_dpllcore_recalc,
87 .round_rate = &omap2_dpll_round_rate,
88 .set_rate = &omap2_reprogram_dpllcore,
90 #else
91 static const struct clk_ops omap2_dpll_core_ck_ops = {};
92 #endif
94 #ifdef CONFIG_ARCH_OMAP3
95 static const struct clk_ops omap3_dpll_core_ck_ops = {
96 .get_parent = &omap2_init_dpll_parent,
97 .recalc_rate = &omap3_dpll_recalc,
98 .round_rate = &omap2_dpll_round_rate,
100 #else
101 static const struct clk_ops omap3_dpll_core_ck_ops = {};
102 #endif
104 #ifdef CONFIG_ARCH_OMAP3
105 static const struct clk_ops omap3_dpll_ck_ops = {
106 .enable = &omap3_noncore_dpll_enable,
107 .disable = &omap3_noncore_dpll_disable,
108 .get_parent = &omap2_init_dpll_parent,
109 .recalc_rate = &omap3_dpll_recalc,
110 .set_rate = &omap3_noncore_dpll_set_rate,
111 .set_parent = &omap3_noncore_dpll_set_parent,
112 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
113 .determine_rate = &omap3_noncore_dpll_determine_rate,
114 .round_rate = &omap2_dpll_round_rate,
117 static const struct clk_ops omap3_dpll5_ck_ops = {
118 .enable = &omap3_noncore_dpll_enable,
119 .disable = &omap3_noncore_dpll_disable,
120 .get_parent = &omap2_init_dpll_parent,
121 .recalc_rate = &omap3_dpll_recalc,
122 .set_rate = &omap3_dpll5_set_rate,
123 .set_parent = &omap3_noncore_dpll_set_parent,
124 .set_rate_and_parent = &omap3_noncore_dpll_set_rate_and_parent,
125 .determine_rate = &omap3_noncore_dpll_determine_rate,
126 .round_rate = &omap2_dpll_round_rate,
129 static const struct clk_ops omap3_dpll_per_ck_ops = {
130 .enable = &omap3_noncore_dpll_enable,
131 .disable = &omap3_noncore_dpll_disable,
132 .get_parent = &omap2_init_dpll_parent,
133 .recalc_rate = &omap3_dpll_recalc,
134 .set_rate = &omap3_dpll4_set_rate,
135 .set_parent = &omap3_noncore_dpll_set_parent,
136 .set_rate_and_parent = &omap3_dpll4_set_rate_and_parent,
137 .determine_rate = &omap3_noncore_dpll_determine_rate,
138 .round_rate = &omap2_dpll_round_rate,
140 #endif
142 static const struct clk_ops dpll_x2_ck_ops = {
143 .recalc_rate = &omap3_clkoutx2_recalc,
147 * _register_dpll - low level registration of a DPLL clock
148 * @hw: hardware clock definition for the clock
149 * @node: device node for the clock
151 * Finalizes DPLL registration process. In case a failure (clk-ref or
152 * clk-bypass is missing), the clock is added to retry list and
153 * the initialization is retried on later stage.
155 static void __init _register_dpll(struct clk_hw *hw,
156 struct device_node *node)
158 struct clk_hw_omap *clk_hw = to_clk_hw_omap(hw);
159 struct dpll_data *dd = clk_hw->dpll_data;
160 struct clk *clk;
162 clk = of_clk_get(node, 0);
163 if (IS_ERR(clk)) {
164 pr_debug("clk-ref missing for %s, retry later\n",
165 node->name);
166 if (!ti_clk_retry_init(node, hw, _register_dpll))
167 return;
169 goto cleanup;
172 dd->clk_ref = __clk_get_hw(clk);
174 clk = of_clk_get(node, 1);
176 if (IS_ERR(clk)) {
177 pr_debug("clk-bypass missing for %s, retry later\n",
178 node->name);
179 if (!ti_clk_retry_init(node, hw, _register_dpll))
180 return;
182 goto cleanup;
185 dd->clk_bypass = __clk_get_hw(clk);
187 /* register the clock */
188 clk = clk_register(NULL, &clk_hw->hw);
190 if (!IS_ERR(clk)) {
191 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
192 of_clk_add_provider(node, of_clk_src_simple_get, clk);
193 kfree(clk_hw->hw.init->parent_names);
194 kfree(clk_hw->hw.init);
195 return;
198 cleanup:
199 kfree(clk_hw->dpll_data);
200 kfree(clk_hw->hw.init->parent_names);
201 kfree(clk_hw->hw.init);
202 kfree(clk_hw);
205 #if defined(CONFIG_ARCH_OMAP3) && defined(CONFIG_ATAGS)
206 static void __iomem *_get_reg(u8 module, u16 offset)
208 u32 reg;
209 struct clk_omap_reg *reg_setup;
211 reg_setup = (struct clk_omap_reg *)&reg;
213 reg_setup->index = module;
214 reg_setup->offset = offset;
216 return (void __iomem *)reg;
219 struct clk *ti_clk_register_dpll(struct ti_clk *setup)
221 struct clk_hw_omap *clk_hw;
222 struct clk_init_data init = { NULL };
223 struct dpll_data *dd;
224 struct clk *clk;
225 struct ti_clk_dpll *dpll;
226 const struct clk_ops *ops = &omap3_dpll_ck_ops;
227 struct clk *clk_ref;
228 struct clk *clk_bypass;
230 dpll = setup->data;
232 if (dpll->num_parents < 2)
233 return ERR_PTR(-EINVAL);
235 clk_ref = clk_get_sys(NULL, dpll->parents[0]);
236 clk_bypass = clk_get_sys(NULL, dpll->parents[1]);
238 if (IS_ERR_OR_NULL(clk_ref) || IS_ERR_OR_NULL(clk_bypass))
239 return ERR_PTR(-EAGAIN);
241 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
242 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
243 if (!dd || !clk_hw) {
244 clk = ERR_PTR(-ENOMEM);
245 goto cleanup;
248 clk_hw->dpll_data = dd;
249 clk_hw->ops = &clkhwops_omap3_dpll;
250 clk_hw->hw.init = &init;
251 clk_hw->flags = MEMMAP_ADDRESSING;
253 init.name = setup->name;
254 init.ops = ops;
256 init.num_parents = dpll->num_parents;
257 init.parent_names = dpll->parents;
259 dd->control_reg = _get_reg(dpll->module, dpll->control_reg);
260 dd->idlest_reg = _get_reg(dpll->module, dpll->idlest_reg);
261 dd->mult_div1_reg = _get_reg(dpll->module, dpll->mult_div1_reg);
262 dd->autoidle_reg = _get_reg(dpll->module, dpll->autoidle_reg);
264 dd->modes = dpll->modes;
265 dd->div1_mask = dpll->div1_mask;
266 dd->idlest_mask = dpll->idlest_mask;
267 dd->mult_mask = dpll->mult_mask;
268 dd->autoidle_mask = dpll->autoidle_mask;
269 dd->enable_mask = dpll->enable_mask;
270 dd->sddiv_mask = dpll->sddiv_mask;
271 dd->dco_mask = dpll->dco_mask;
272 dd->max_divider = dpll->max_divider;
273 dd->min_divider = dpll->min_divider;
274 dd->max_multiplier = dpll->max_multiplier;
275 dd->auto_recal_bit = dpll->auto_recal_bit;
276 dd->recal_en_bit = dpll->recal_en_bit;
277 dd->recal_st_bit = dpll->recal_st_bit;
279 dd->clk_ref = __clk_get_hw(clk_ref);
280 dd->clk_bypass = __clk_get_hw(clk_bypass);
282 if (dpll->flags & CLKF_CORE)
283 ops = &omap3_dpll_core_ck_ops;
285 if (dpll->flags & CLKF_PER)
286 ops = &omap3_dpll_per_ck_ops;
288 if (dpll->flags & CLKF_J_TYPE)
289 dd->flags |= DPLL_J_TYPE;
291 clk = clk_register(NULL, &clk_hw->hw);
293 if (!IS_ERR(clk))
294 return clk;
296 cleanup:
297 kfree(dd);
298 kfree(clk_hw);
299 return clk;
301 #endif
303 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
304 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
305 defined(CONFIG_SOC_AM43XX)
307 * _register_dpll_x2 - Registers a DPLLx2 clock
308 * @node: device node for this clock
309 * @ops: clk_ops for this clock
310 * @hw_ops: clk_hw_ops for this clock
312 * Initializes a DPLL x 2 clock from device tree data.
314 static void _register_dpll_x2(struct device_node *node,
315 const struct clk_ops *ops,
316 const struct clk_hw_omap_ops *hw_ops)
318 struct clk *clk;
319 struct clk_init_data init = { NULL };
320 struct clk_hw_omap *clk_hw;
321 const char *name = node->name;
322 const char *parent_name;
324 parent_name = of_clk_get_parent_name(node, 0);
325 if (!parent_name) {
326 pr_err("%s must have parent\n", node->name);
327 return;
330 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
331 if (!clk_hw)
332 return;
334 clk_hw->ops = hw_ops;
335 clk_hw->hw.init = &init;
337 init.name = name;
338 init.ops = ops;
339 init.parent_names = &parent_name;
340 init.num_parents = 1;
342 /* register the clock */
343 clk = clk_register(NULL, &clk_hw->hw);
345 if (IS_ERR(clk)) {
346 kfree(clk_hw);
347 } else {
348 omap2_init_clk_hw_omap_clocks(&clk_hw->hw);
349 of_clk_add_provider(node, of_clk_src_simple_get, clk);
352 #endif
355 * of_ti_dpll_setup - Setup function for OMAP DPLL clocks
356 * @node: device node containing the DPLL info
357 * @ops: ops for the DPLL
358 * @ddt: DPLL data template to use
360 * Initializes a DPLL clock from device tree data.
362 static void __init of_ti_dpll_setup(struct device_node *node,
363 const struct clk_ops *ops,
364 const struct dpll_data *ddt)
366 struct clk_hw_omap *clk_hw = NULL;
367 struct clk_init_data *init = NULL;
368 const char **parent_names = NULL;
369 struct dpll_data *dd = NULL;
370 u8 dpll_mode = 0;
372 dd = kzalloc(sizeof(*dd), GFP_KERNEL);
373 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
374 init = kzalloc(sizeof(*init), GFP_KERNEL);
375 if (!dd || !clk_hw || !init)
376 goto cleanup;
378 memcpy(dd, ddt, sizeof(*dd));
380 clk_hw->dpll_data = dd;
381 clk_hw->ops = &clkhwops_omap3_dpll;
382 clk_hw->hw.init = init;
383 clk_hw->flags = MEMMAP_ADDRESSING;
385 init->name = node->name;
386 init->ops = ops;
388 init->num_parents = of_clk_get_parent_count(node);
389 if (!init->num_parents) {
390 pr_err("%s must have parent(s)\n", node->name);
391 goto cleanup;
394 parent_names = kzalloc(sizeof(char *) * init->num_parents, GFP_KERNEL);
395 if (!parent_names)
396 goto cleanup;
398 of_clk_parent_fill(node, parent_names, init->num_parents);
400 init->parent_names = parent_names;
402 dd->control_reg = ti_clk_get_reg_addr(node, 0);
405 * Special case for OMAP2 DPLL, register order is different due to
406 * missing idlest_reg, also clkhwops is different. Detected from
407 * missing idlest_mask.
409 if (!dd->idlest_mask) {
410 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 1);
411 #ifdef CONFIG_ARCH_OMAP2
412 clk_hw->ops = &clkhwops_omap2xxx_dpll;
413 omap2xxx_clkt_dpllcore_init(&clk_hw->hw);
414 #endif
415 } else {
416 dd->idlest_reg = ti_clk_get_reg_addr(node, 1);
417 if (IS_ERR(dd->idlest_reg))
418 goto cleanup;
420 dd->mult_div1_reg = ti_clk_get_reg_addr(node, 2);
423 if (IS_ERR(dd->control_reg) || IS_ERR(dd->mult_div1_reg))
424 goto cleanup;
426 if (dd->autoidle_mask) {
427 dd->autoidle_reg = ti_clk_get_reg_addr(node, 3);
428 if (IS_ERR(dd->autoidle_reg))
429 goto cleanup;
432 if (of_property_read_bool(node, "ti,low-power-stop"))
433 dpll_mode |= 1 << DPLL_LOW_POWER_STOP;
435 if (of_property_read_bool(node, "ti,low-power-bypass"))
436 dpll_mode |= 1 << DPLL_LOW_POWER_BYPASS;
438 if (of_property_read_bool(node, "ti,lock"))
439 dpll_mode |= 1 << DPLL_LOCKED;
441 if (dpll_mode)
442 dd->modes = dpll_mode;
444 _register_dpll(&clk_hw->hw, node);
445 return;
447 cleanup:
448 kfree(dd);
449 kfree(parent_names);
450 kfree(init);
451 kfree(clk_hw);
454 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
455 defined(CONFIG_SOC_DRA7XX)
456 static void __init of_ti_omap4_dpll_x2_setup(struct device_node *node)
458 _register_dpll_x2(node, &dpll_x2_ck_ops, &clkhwops_omap4_dpllmx);
460 CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
461 of_ti_omap4_dpll_x2_setup);
462 #endif
464 #if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
465 static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
467 _register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
469 CLK_OF_DECLARE(ti_am3_dpll_x2_clock, "ti,am3-dpll-x2-clock",
470 of_ti_am3_dpll_x2_setup);
471 #endif
473 #ifdef CONFIG_ARCH_OMAP3
474 static void __init of_ti_omap3_dpll_setup(struct device_node *node)
476 const struct dpll_data dd = {
477 .idlest_mask = 0x1,
478 .enable_mask = 0x7,
479 .autoidle_mask = 0x7,
480 .mult_mask = 0x7ff << 8,
481 .div1_mask = 0x7f,
482 .max_multiplier = 2047,
483 .max_divider = 128,
484 .min_divider = 1,
485 .freqsel_mask = 0xf0,
486 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
489 if ((of_machine_is_compatible("ti,omap3630") ||
490 of_machine_is_compatible("ti,omap36xx")) &&
491 !strcmp(node->name, "dpll5_ck"))
492 of_ti_dpll_setup(node, &omap3_dpll5_ck_ops, &dd);
493 else
494 of_ti_dpll_setup(node, &omap3_dpll_ck_ops, &dd);
496 CLK_OF_DECLARE(ti_omap3_dpll_clock, "ti,omap3-dpll-clock",
497 of_ti_omap3_dpll_setup);
499 static void __init of_ti_omap3_core_dpll_setup(struct device_node *node)
501 const struct dpll_data dd = {
502 .idlest_mask = 0x1,
503 .enable_mask = 0x7,
504 .autoidle_mask = 0x7,
505 .mult_mask = 0x7ff << 16,
506 .div1_mask = 0x7f << 8,
507 .max_multiplier = 2047,
508 .max_divider = 128,
509 .min_divider = 1,
510 .freqsel_mask = 0xf0,
513 of_ti_dpll_setup(node, &omap3_dpll_core_ck_ops, &dd);
515 CLK_OF_DECLARE(ti_omap3_core_dpll_clock, "ti,omap3-dpll-core-clock",
516 of_ti_omap3_core_dpll_setup);
518 static void __init of_ti_omap3_per_dpll_setup(struct device_node *node)
520 const struct dpll_data dd = {
521 .idlest_mask = 0x1 << 1,
522 .enable_mask = 0x7 << 16,
523 .autoidle_mask = 0x7 << 3,
524 .mult_mask = 0x7ff << 8,
525 .div1_mask = 0x7f,
526 .max_multiplier = 2047,
527 .max_divider = 128,
528 .min_divider = 1,
529 .freqsel_mask = 0xf00000,
530 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
533 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
535 CLK_OF_DECLARE(ti_omap3_per_dpll_clock, "ti,omap3-dpll-per-clock",
536 of_ti_omap3_per_dpll_setup);
538 static void __init of_ti_omap3_per_jtype_dpll_setup(struct device_node *node)
540 const struct dpll_data dd = {
541 .idlest_mask = 0x1 << 1,
542 .enable_mask = 0x7 << 16,
543 .autoidle_mask = 0x7 << 3,
544 .mult_mask = 0xfff << 8,
545 .div1_mask = 0x7f,
546 .max_multiplier = 4095,
547 .max_divider = 128,
548 .min_divider = 1,
549 .sddiv_mask = 0xff << 24,
550 .dco_mask = 0xe << 20,
551 .flags = DPLL_J_TYPE,
552 .modes = (1 << DPLL_LOW_POWER_STOP) | (1 << DPLL_LOCKED),
555 of_ti_dpll_setup(node, &omap3_dpll_per_ck_ops, &dd);
557 CLK_OF_DECLARE(ti_omap3_per_jtype_dpll_clock, "ti,omap3-dpll-per-j-type-clock",
558 of_ti_omap3_per_jtype_dpll_setup);
559 #endif
561 static void __init of_ti_omap4_dpll_setup(struct device_node *node)
563 const struct dpll_data dd = {
564 .idlest_mask = 0x1,
565 .enable_mask = 0x7,
566 .autoidle_mask = 0x7,
567 .mult_mask = 0x7ff << 8,
568 .div1_mask = 0x7f,
569 .max_multiplier = 2047,
570 .max_divider = 128,
571 .min_divider = 1,
572 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
575 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
577 CLK_OF_DECLARE(ti_omap4_dpll_clock, "ti,omap4-dpll-clock",
578 of_ti_omap4_dpll_setup);
580 static void __init of_ti_omap5_mpu_dpll_setup(struct device_node *node)
582 const struct dpll_data dd = {
583 .idlest_mask = 0x1,
584 .enable_mask = 0x7,
585 .autoidle_mask = 0x7,
586 .mult_mask = 0x7ff << 8,
587 .div1_mask = 0x7f,
588 .max_multiplier = 2047,
589 .max_divider = 128,
590 .dcc_mask = BIT(22),
591 .dcc_rate = 1400000000, /* DCC beyond 1.4GHz */
592 .min_divider = 1,
593 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
596 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
598 CLK_OF_DECLARE(of_ti_omap5_mpu_dpll_clock, "ti,omap5-mpu-dpll-clock",
599 of_ti_omap5_mpu_dpll_setup);
601 static void __init of_ti_omap4_core_dpll_setup(struct device_node *node)
603 const struct dpll_data dd = {
604 .idlest_mask = 0x1,
605 .enable_mask = 0x7,
606 .autoidle_mask = 0x7,
607 .mult_mask = 0x7ff << 8,
608 .div1_mask = 0x7f,
609 .max_multiplier = 2047,
610 .max_divider = 128,
611 .min_divider = 1,
612 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
615 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
617 CLK_OF_DECLARE(ti_omap4_core_dpll_clock, "ti,omap4-dpll-core-clock",
618 of_ti_omap4_core_dpll_setup);
620 #if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
621 defined(CONFIG_SOC_DRA7XX)
622 static void __init of_ti_omap4_m4xen_dpll_setup(struct device_node *node)
624 const struct dpll_data dd = {
625 .idlest_mask = 0x1,
626 .enable_mask = 0x7,
627 .autoidle_mask = 0x7,
628 .mult_mask = 0x7ff << 8,
629 .div1_mask = 0x7f,
630 .max_multiplier = 2047,
631 .max_divider = 128,
632 .min_divider = 1,
633 .m4xen_mask = 0x800,
634 .lpmode_mask = 1 << 10,
635 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
638 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
640 CLK_OF_DECLARE(ti_omap4_m4xen_dpll_clock, "ti,omap4-dpll-m4xen-clock",
641 of_ti_omap4_m4xen_dpll_setup);
643 static void __init of_ti_omap4_jtype_dpll_setup(struct device_node *node)
645 const struct dpll_data dd = {
646 .idlest_mask = 0x1,
647 .enable_mask = 0x7,
648 .autoidle_mask = 0x7,
649 .mult_mask = 0xfff << 8,
650 .div1_mask = 0xff,
651 .max_multiplier = 4095,
652 .max_divider = 256,
653 .min_divider = 1,
654 .sddiv_mask = 0xff << 24,
655 .flags = DPLL_J_TYPE,
656 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
659 of_ti_dpll_setup(node, &dpll_m4xen_ck_ops, &dd);
661 CLK_OF_DECLARE(ti_omap4_jtype_dpll_clock, "ti,omap4-dpll-j-type-clock",
662 of_ti_omap4_jtype_dpll_setup);
663 #endif
665 static void __init of_ti_am3_no_gate_dpll_setup(struct device_node *node)
667 const struct dpll_data dd = {
668 .idlest_mask = 0x1,
669 .enable_mask = 0x7,
670 .mult_mask = 0x7ff << 8,
671 .div1_mask = 0x7f,
672 .max_multiplier = 2047,
673 .max_divider = 128,
674 .min_divider = 1,
675 .max_rate = 1000000000,
676 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
679 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
681 CLK_OF_DECLARE(ti_am3_no_gate_dpll_clock, "ti,am3-dpll-no-gate-clock",
682 of_ti_am3_no_gate_dpll_setup);
684 static void __init of_ti_am3_jtype_dpll_setup(struct device_node *node)
686 const struct dpll_data dd = {
687 .idlest_mask = 0x1,
688 .enable_mask = 0x7,
689 .mult_mask = 0x7ff << 8,
690 .div1_mask = 0x7f,
691 .max_multiplier = 4095,
692 .max_divider = 256,
693 .min_divider = 2,
694 .flags = DPLL_J_TYPE,
695 .max_rate = 2000000000,
696 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
699 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
701 CLK_OF_DECLARE(ti_am3_jtype_dpll_clock, "ti,am3-dpll-j-type-clock",
702 of_ti_am3_jtype_dpll_setup);
704 static void __init of_ti_am3_no_gate_jtype_dpll_setup(struct device_node *node)
706 const struct dpll_data dd = {
707 .idlest_mask = 0x1,
708 .enable_mask = 0x7,
709 .mult_mask = 0x7ff << 8,
710 .div1_mask = 0x7f,
711 .max_multiplier = 2047,
712 .max_divider = 128,
713 .min_divider = 1,
714 .max_rate = 2000000000,
715 .flags = DPLL_J_TYPE,
716 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
719 of_ti_dpll_setup(node, &dpll_no_gate_ck_ops, &dd);
721 CLK_OF_DECLARE(ti_am3_no_gate_jtype_dpll_clock,
722 "ti,am3-dpll-no-gate-j-type-clock",
723 of_ti_am3_no_gate_jtype_dpll_setup);
725 static void __init of_ti_am3_dpll_setup(struct device_node *node)
727 const struct dpll_data dd = {
728 .idlest_mask = 0x1,
729 .enable_mask = 0x7,
730 .mult_mask = 0x7ff << 8,
731 .div1_mask = 0x7f,
732 .max_multiplier = 2047,
733 .max_divider = 128,
734 .min_divider = 1,
735 .max_rate = 1000000000,
736 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
739 of_ti_dpll_setup(node, &dpll_ck_ops, &dd);
741 CLK_OF_DECLARE(ti_am3_dpll_clock, "ti,am3-dpll-clock", of_ti_am3_dpll_setup);
743 static void __init of_ti_am3_core_dpll_setup(struct device_node *node)
745 const struct dpll_data dd = {
746 .idlest_mask = 0x1,
747 .enable_mask = 0x7,
748 .mult_mask = 0x7ff << 8,
749 .div1_mask = 0x7f,
750 .max_multiplier = 2047,
751 .max_divider = 128,
752 .min_divider = 1,
753 .max_rate = 1000000000,
754 .modes = (1 << DPLL_LOW_POWER_BYPASS) | (1 << DPLL_LOCKED),
757 of_ti_dpll_setup(node, &dpll_core_ck_ops, &dd);
759 CLK_OF_DECLARE(ti_am3_core_dpll_clock, "ti,am3-dpll-core-clock",
760 of_ti_am3_core_dpll_setup);
762 static void __init of_ti_omap2_core_dpll_setup(struct device_node *node)
764 const struct dpll_data dd = {
765 .enable_mask = 0x3,
766 .mult_mask = 0x3ff << 12,
767 .div1_mask = 0xf << 8,
768 .max_divider = 16,
769 .min_divider = 1,
772 of_ti_dpll_setup(node, &omap2_dpll_core_ck_ops, &dd);
774 CLK_OF_DECLARE(ti_omap2_core_dpll_clock, "ti,omap2-dpll-core-clock",
775 of_ti_omap2_core_dpll_setup);