Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / clk / ti / clkctrl.c
blob864c484bde1b4d61b3d8acb7d7120fd1fb4604b7
1 /*
2 * OMAP clkctrl clock support
4 * Copyright (C) 2017 Texas Instruments, Inc.
6 * Tero Kristo <t-kristo@ti.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk-provider.h>
19 #include <linux/slab.h>
20 #include <linux/of.h>
21 #include <linux/of_address.h>
22 #include <linux/clk/ti.h>
23 #include <linux/delay.h>
24 #include <linux/timekeeping.h>
25 #include "clock.h"
27 #define NO_IDLEST 0
29 #define OMAP4_MODULEMODE_MASK 0x3
31 #define MODULEMODE_HWCTRL 0x1
32 #define MODULEMODE_SWCTRL 0x2
34 #define OMAP4_IDLEST_MASK (0x3 << 16)
35 #define OMAP4_IDLEST_SHIFT 16
37 #define OMAP4_STBYST_MASK BIT(18)
38 #define OMAP4_STBYST_SHIFT 18
40 #define CLKCTRL_IDLEST_FUNCTIONAL 0x0
41 #define CLKCTRL_IDLEST_INTERFACE_IDLE 0x2
42 #define CLKCTRL_IDLEST_DISABLED 0x3
44 /* These timeouts are in us */
45 #define OMAP4_MAX_MODULE_READY_TIME 2000
46 #define OMAP4_MAX_MODULE_DISABLE_TIME 5000
48 static bool _early_timeout = true;
50 struct omap_clkctrl_provider {
51 void __iomem *base;
52 struct list_head clocks;
53 char *clkdm_name;
56 struct omap_clkctrl_clk {
57 struct clk_hw *clk;
58 u16 reg_offset;
59 int bit_offset;
60 struct list_head node;
63 union omap4_timeout {
64 u32 cycles;
65 ktime_t start;
68 static const struct omap_clkctrl_data default_clkctrl_data[] __initconst = {
69 { 0 },
72 static u32 _omap4_idlest(u32 val)
74 val &= OMAP4_IDLEST_MASK;
75 val >>= OMAP4_IDLEST_SHIFT;
77 return val;
80 static bool _omap4_is_idle(u32 val)
82 val = _omap4_idlest(val);
84 return val == CLKCTRL_IDLEST_DISABLED;
87 static bool _omap4_is_ready(u32 val)
89 val = _omap4_idlest(val);
91 return val == CLKCTRL_IDLEST_FUNCTIONAL ||
92 val == CLKCTRL_IDLEST_INTERFACE_IDLE;
95 static bool _omap4_is_timeout(union omap4_timeout *time, u32 timeout)
98 * There are two special cases where ktime_to_ns() can't be
99 * used to track the timeouts. First one is during early boot
100 * when the timers haven't been initialized yet. The second
101 * one is during suspend-resume cycle while timekeeping is
102 * being suspended / resumed. Clocksource for the system
103 * can be from a timer that requires pm_runtime access, which
104 * will eventually bring us here with timekeeping_suspended,
105 * during both suspend entry and resume paths. This happens
106 * at least on am43xx platform. Account for flakeyness
107 * with udelay() by multiplying the timeout value by 2.
109 if (unlikely(_early_timeout || timekeeping_suspended)) {
110 if (time->cycles++ < timeout) {
111 udelay(1 * 2);
112 return false;
114 } else {
115 if (!ktime_to_ns(time->start)) {
116 time->start = ktime_get();
117 return false;
120 if (ktime_us_delta(ktime_get(), time->start) < timeout) {
121 cpu_relax();
122 return false;
126 return true;
129 static int __init _omap4_disable_early_timeout(void)
131 _early_timeout = false;
133 return 0;
135 arch_initcall(_omap4_disable_early_timeout);
137 static int _omap4_clkctrl_clk_enable(struct clk_hw *hw)
139 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
140 u32 val;
141 int ret;
142 union omap4_timeout timeout = { 0 };
144 if (clk->clkdm) {
145 ret = ti_clk_ll_ops->clkdm_clk_enable(clk->clkdm, hw->clk);
146 if (ret) {
147 WARN(1,
148 "%s: could not enable %s's clockdomain %s: %d\n",
149 __func__, clk_hw_get_name(hw),
150 clk->clkdm_name, ret);
151 return ret;
155 if (!clk->enable_bit)
156 return 0;
158 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
160 val &= ~OMAP4_MODULEMODE_MASK;
161 val |= clk->enable_bit;
163 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
165 if (test_bit(NO_IDLEST, &clk->flags))
166 return 0;
168 /* Wait until module is enabled */
169 while (!_omap4_is_ready(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
170 if (_omap4_is_timeout(&timeout, OMAP4_MAX_MODULE_READY_TIME)) {
171 pr_err("%s: failed to enable\n", clk_hw_get_name(hw));
172 return -EBUSY;
176 return 0;
179 static void _omap4_clkctrl_clk_disable(struct clk_hw *hw)
181 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
182 u32 val;
183 union omap4_timeout timeout = { 0 };
185 if (!clk->enable_bit)
186 goto exit;
188 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
190 val &= ~OMAP4_MODULEMODE_MASK;
192 ti_clk_ll_ops->clk_writel(val, &clk->enable_reg);
194 if (test_bit(NO_IDLEST, &clk->flags))
195 goto exit;
197 /* Wait until module is disabled */
198 while (!_omap4_is_idle(ti_clk_ll_ops->clk_readl(&clk->enable_reg))) {
199 if (_omap4_is_timeout(&timeout,
200 OMAP4_MAX_MODULE_DISABLE_TIME)) {
201 pr_err("%s: failed to disable\n", clk_hw_get_name(hw));
202 break;
206 exit:
207 if (clk->clkdm)
208 ti_clk_ll_ops->clkdm_clk_disable(clk->clkdm, hw->clk);
211 static int _omap4_clkctrl_clk_is_enabled(struct clk_hw *hw)
213 struct clk_hw_omap *clk = to_clk_hw_omap(hw);
214 u32 val;
216 val = ti_clk_ll_ops->clk_readl(&clk->enable_reg);
218 if (val & clk->enable_bit)
219 return 1;
221 return 0;
224 static const struct clk_ops omap4_clkctrl_clk_ops = {
225 .enable = _omap4_clkctrl_clk_enable,
226 .disable = _omap4_clkctrl_clk_disable,
227 .is_enabled = _omap4_clkctrl_clk_is_enabled,
228 .init = omap2_init_clk_clkdm,
231 static struct clk_hw *_ti_omap4_clkctrl_xlate(struct of_phandle_args *clkspec,
232 void *data)
234 struct omap_clkctrl_provider *provider = data;
235 struct omap_clkctrl_clk *entry;
236 bool found = false;
238 if (clkspec->args_count != 2)
239 return ERR_PTR(-EINVAL);
241 pr_debug("%s: looking for %x:%x\n", __func__,
242 clkspec->args[0], clkspec->args[1]);
244 list_for_each_entry(entry, &provider->clocks, node) {
245 if (entry->reg_offset == clkspec->args[0] &&
246 entry->bit_offset == clkspec->args[1]) {
247 found = true;
248 break;
252 if (!found)
253 return ERR_PTR(-EINVAL);
255 return entry->clk;
258 /* Get clkctrl clock base name based on clkctrl_name or dts node */
259 static const char * __init clkctrl_get_clock_name(struct device_node *np,
260 const char *clkctrl_name,
261 int offset, int index,
262 bool legacy_naming)
264 char *clock_name;
266 /* l4per-clkctrl:1234:0 style naming based on clkctrl_name */
267 if (clkctrl_name && !legacy_naming) {
268 clock_name = kasprintf(GFP_KERNEL, "%s-clkctrl:%04x:%d",
269 clkctrl_name, offset, index);
270 strreplace(clock_name, '_', '-');
272 return clock_name;
275 /* l4per:1234:0 old style naming based on clkctrl_name */
276 if (clkctrl_name)
277 return kasprintf(GFP_KERNEL, "%s_cm:clk:%04x:%d",
278 clkctrl_name, offset, index);
280 /* l4per_cm:1234:0 old style naming based on parent node name */
281 if (legacy_naming)
282 return kasprintf(GFP_KERNEL, "%pOFn:clk:%04x:%d",
283 np->parent, offset, index);
285 /* l4per-clkctrl:1234:0 style naming based on node name */
286 return kasprintf(GFP_KERNEL, "%pOFn:%04x:%d", np, offset, index);
289 static int __init
290 _ti_clkctrl_clk_register(struct omap_clkctrl_provider *provider,
291 struct device_node *node, struct clk_hw *clk_hw,
292 u16 offset, u8 bit, const char * const *parents,
293 int num_parents, const struct clk_ops *ops,
294 const char *clkctrl_name)
296 struct clk_init_data init = { NULL };
297 struct clk *clk;
298 struct omap_clkctrl_clk *clkctrl_clk;
299 int ret = 0;
301 init.name = clkctrl_get_clock_name(node, clkctrl_name, offset, bit,
302 ti_clk_get_features()->flags &
303 TI_CLK_CLKCTRL_COMPAT);
305 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
306 if (!init.name || !clkctrl_clk) {
307 ret = -ENOMEM;
308 goto cleanup;
311 clk_hw->init = &init;
312 init.parent_names = parents;
313 init.num_parents = num_parents;
314 init.ops = ops;
315 init.flags = 0;
317 clk = ti_clk_register(NULL, clk_hw, init.name);
318 if (IS_ERR_OR_NULL(clk)) {
319 ret = -EINVAL;
320 goto cleanup;
323 clkctrl_clk->reg_offset = offset;
324 clkctrl_clk->bit_offset = bit;
325 clkctrl_clk->clk = clk_hw;
327 list_add(&clkctrl_clk->node, &provider->clocks);
329 return 0;
331 cleanup:
332 kfree(init.name);
333 kfree(clkctrl_clk);
334 return ret;
337 static void __init
338 _ti_clkctrl_setup_gate(struct omap_clkctrl_provider *provider,
339 struct device_node *node, u16 offset,
340 const struct omap_clkctrl_bit_data *data,
341 void __iomem *reg, const char *clkctrl_name)
343 struct clk_hw_omap *clk_hw;
345 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
346 if (!clk_hw)
347 return;
349 clk_hw->enable_bit = data->bit;
350 clk_hw->enable_reg.ptr = reg;
352 if (_ti_clkctrl_clk_register(provider, node, &clk_hw->hw, offset,
353 data->bit, data->parents, 1,
354 &omap_gate_clk_ops, clkctrl_name))
355 kfree(clk_hw);
358 static void __init
359 _ti_clkctrl_setup_mux(struct omap_clkctrl_provider *provider,
360 struct device_node *node, u16 offset,
361 const struct omap_clkctrl_bit_data *data,
362 void __iomem *reg, const char *clkctrl_name)
364 struct clk_omap_mux *mux;
365 int num_parents = 0;
366 const char * const *pname;
368 mux = kzalloc(sizeof(*mux), GFP_KERNEL);
369 if (!mux)
370 return;
372 pname = data->parents;
373 while (*pname) {
374 num_parents++;
375 pname++;
378 mux->mask = num_parents;
379 if (!(mux->flags & CLK_MUX_INDEX_ONE))
380 mux->mask--;
382 mux->mask = (1 << fls(mux->mask)) - 1;
384 mux->shift = data->bit;
385 mux->reg.ptr = reg;
387 if (_ti_clkctrl_clk_register(provider, node, &mux->hw, offset,
388 data->bit, data->parents, num_parents,
389 &ti_clk_mux_ops, clkctrl_name))
390 kfree(mux);
393 static void __init
394 _ti_clkctrl_setup_div(struct omap_clkctrl_provider *provider,
395 struct device_node *node, u16 offset,
396 const struct omap_clkctrl_bit_data *data,
397 void __iomem *reg, const char *clkctrl_name)
399 struct clk_omap_divider *div;
400 const struct omap_clkctrl_div_data *div_data = data->data;
401 u8 div_flags = 0;
403 div = kzalloc(sizeof(*div), GFP_KERNEL);
404 if (!div)
405 return;
407 div->reg.ptr = reg;
408 div->shift = data->bit;
409 div->flags = div_data->flags;
411 if (div->flags & CLK_DIVIDER_POWER_OF_TWO)
412 div_flags |= CLKF_INDEX_POWER_OF_TWO;
414 if (ti_clk_parse_divider_data((int *)div_data->dividers, 0,
415 div_data->max_div, div_flags,
416 div)) {
417 pr_err("%s: Data parsing for %pOF:%04x:%d failed\n", __func__,
418 node, offset, data->bit);
419 kfree(div);
420 return;
423 if (_ti_clkctrl_clk_register(provider, node, &div->hw, offset,
424 data->bit, data->parents, 1,
425 &ti_clk_divider_ops, clkctrl_name))
426 kfree(div);
429 static void __init
430 _ti_clkctrl_setup_subclks(struct omap_clkctrl_provider *provider,
431 struct device_node *node,
432 const struct omap_clkctrl_reg_data *data,
433 void __iomem *reg, const char *clkctrl_name)
435 const struct omap_clkctrl_bit_data *bits = data->bit_data;
437 if (!bits)
438 return;
440 while (bits->bit) {
441 switch (bits->type) {
442 case TI_CLK_GATE:
443 _ti_clkctrl_setup_gate(provider, node, data->offset,
444 bits, reg, clkctrl_name);
445 break;
447 case TI_CLK_DIVIDER:
448 _ti_clkctrl_setup_div(provider, node, data->offset,
449 bits, reg, clkctrl_name);
450 break;
452 case TI_CLK_MUX:
453 _ti_clkctrl_setup_mux(provider, node, data->offset,
454 bits, reg, clkctrl_name);
455 break;
457 default:
458 pr_err("%s: bad subclk type: %d\n", __func__,
459 bits->type);
460 return;
462 bits++;
466 static void __init _clkctrl_add_provider(void *data,
467 struct device_node *np)
469 of_clk_add_hw_provider(np, _ti_omap4_clkctrl_xlate, data);
472 /* Get clock name based on compatible string for clkctrl */
473 static char * __init clkctrl_get_name(struct device_node *np)
475 struct property *prop;
476 const int prefix_len = 11;
477 const char *compat;
478 char *name;
480 of_property_for_each_string(np, "compatible", prop, compat) {
481 if (!strncmp("ti,clkctrl-", compat, prefix_len)) {
482 /* Two letter minimum name length for l3, l4 etc */
483 if (strnlen(compat + prefix_len, 16) < 2)
484 continue;
485 name = kasprintf(GFP_KERNEL, "%s", compat + prefix_len);
486 if (!name)
487 continue;
488 strreplace(name, '-', '_');
490 return name;
494 return NULL;
497 static void __init _ti_omap4_clkctrl_setup(struct device_node *node)
499 struct omap_clkctrl_provider *provider;
500 const struct omap_clkctrl_data *data = default_clkctrl_data;
501 const struct omap_clkctrl_reg_data *reg_data;
502 struct clk_init_data init = { NULL };
503 struct clk_hw_omap *hw;
504 struct clk *clk;
505 struct omap_clkctrl_clk *clkctrl_clk = NULL;
506 const __be32 *addrp;
507 bool legacy_naming;
508 char *clkctrl_name;
509 u32 addr;
510 int ret;
511 char *c;
512 u16 soc_mask = 0;
514 if (!(ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT) &&
515 of_node_name_eq(node, "clk"))
516 ti_clk_features.flags |= TI_CLK_CLKCTRL_COMPAT;
518 addrp = of_get_address(node, 0, NULL, NULL);
519 addr = (u32)of_translate_address(node, addrp);
521 #ifdef CONFIG_ARCH_OMAP4
522 if (of_machine_is_compatible("ti,omap4"))
523 data = omap4_clkctrl_data;
524 #endif
525 #ifdef CONFIG_SOC_OMAP5
526 if (of_machine_is_compatible("ti,omap5"))
527 data = omap5_clkctrl_data;
528 #endif
529 #ifdef CONFIG_SOC_DRA7XX
530 if (of_machine_is_compatible("ti,dra7")) {
531 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
532 data = dra7_clkctrl_compat_data;
533 else
534 data = dra7_clkctrl_data;
537 if (of_machine_is_compatible("ti,dra72"))
538 soc_mask = CLKF_SOC_DRA72;
539 if (of_machine_is_compatible("ti,dra74"))
540 soc_mask = CLKF_SOC_DRA74;
541 if (of_machine_is_compatible("ti,dra76"))
542 soc_mask = CLKF_SOC_DRA76;
543 #endif
544 #ifdef CONFIG_SOC_AM33XX
545 if (of_machine_is_compatible("ti,am33xx")) {
546 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
547 data = am3_clkctrl_compat_data;
548 else
549 data = am3_clkctrl_data;
551 #endif
552 #ifdef CONFIG_SOC_AM43XX
553 if (of_machine_is_compatible("ti,am4372")) {
554 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
555 data = am4_clkctrl_compat_data;
556 else
557 data = am4_clkctrl_data;
560 if (of_machine_is_compatible("ti,am438x")) {
561 if (ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT)
562 data = am438x_clkctrl_compat_data;
563 else
564 data = am438x_clkctrl_data;
566 #endif
567 #ifdef CONFIG_SOC_TI81XX
568 if (of_machine_is_compatible("ti,dm814"))
569 data = dm814_clkctrl_data;
571 if (of_machine_is_compatible("ti,dm816"))
572 data = dm816_clkctrl_data;
573 #endif
575 if (ti_clk_get_features()->flags & TI_CLK_DEVICE_TYPE_GP)
576 soc_mask |= CLKF_SOC_NONSEC;
578 while (data->addr) {
579 if (addr == data->addr)
580 break;
582 data++;
585 if (!data->addr) {
586 pr_err("%pOF not found from clkctrl data.\n", node);
587 return;
590 provider = kzalloc(sizeof(*provider), GFP_KERNEL);
591 if (!provider)
592 return;
594 provider->base = of_iomap(node, 0);
596 legacy_naming = ti_clk_get_features()->flags & TI_CLK_CLKCTRL_COMPAT;
597 clkctrl_name = clkctrl_get_name(node);
598 if (clkctrl_name) {
599 provider->clkdm_name = kasprintf(GFP_KERNEL,
600 "%s_clkdm", clkctrl_name);
601 goto clkdm_found;
605 * The code below can be removed when all clkctrl nodes use domain
606 * specific compatible proprerty and standard clock node naming
608 if (legacy_naming) {
609 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFnxxx", node->parent);
610 if (!provider->clkdm_name) {
611 kfree(provider);
612 return;
616 * Create default clkdm name, replace _cm from end of parent
617 * node name with _clkdm
619 provider->clkdm_name[strlen(provider->clkdm_name) - 2] = 0;
620 } else {
621 provider->clkdm_name = kasprintf(GFP_KERNEL, "%pOFn", node);
622 if (!provider->clkdm_name) {
623 kfree(provider);
624 return;
628 * Create default clkdm name, replace _clkctrl from end of
629 * node name with _clkdm
631 provider->clkdm_name[strlen(provider->clkdm_name) - 7] = 0;
634 strcat(provider->clkdm_name, "clkdm");
636 /* Replace any dash from the clkdm name with underscore */
637 c = provider->clkdm_name;
639 while (*c) {
640 if (*c == '-')
641 *c = '_';
642 c++;
644 clkdm_found:
645 INIT_LIST_HEAD(&provider->clocks);
647 /* Generate clocks */
648 reg_data = data->regs;
650 while (reg_data->parent) {
651 if ((reg_data->flags & CLKF_SOC_MASK) &&
652 (reg_data->flags & soc_mask) == 0) {
653 reg_data++;
654 continue;
657 hw = kzalloc(sizeof(*hw), GFP_KERNEL);
658 if (!hw)
659 return;
661 hw->enable_reg.ptr = provider->base + reg_data->offset;
663 _ti_clkctrl_setup_subclks(provider, node, reg_data,
664 hw->enable_reg.ptr, clkctrl_name);
666 if (reg_data->flags & CLKF_SW_SUP)
667 hw->enable_bit = MODULEMODE_SWCTRL;
668 if (reg_data->flags & CLKF_HW_SUP)
669 hw->enable_bit = MODULEMODE_HWCTRL;
670 if (reg_data->flags & CLKF_NO_IDLEST)
671 set_bit(NO_IDLEST, &hw->flags);
673 if (reg_data->clkdm_name)
674 hw->clkdm_name = reg_data->clkdm_name;
675 else
676 hw->clkdm_name = provider->clkdm_name;
678 init.parent_names = &reg_data->parent;
679 init.num_parents = 1;
680 init.flags = 0;
681 if (reg_data->flags & CLKF_SET_RATE_PARENT)
682 init.flags |= CLK_SET_RATE_PARENT;
684 init.name = clkctrl_get_clock_name(node, clkctrl_name,
685 reg_data->offset, 0,
686 legacy_naming);
687 if (!init.name)
688 goto cleanup;
690 clkctrl_clk = kzalloc(sizeof(*clkctrl_clk), GFP_KERNEL);
691 if (!clkctrl_clk)
692 goto cleanup;
694 init.ops = &omap4_clkctrl_clk_ops;
695 hw->hw.init = &init;
697 clk = ti_clk_register_omap_hw(NULL, &hw->hw, init.name);
698 if (IS_ERR_OR_NULL(clk))
699 goto cleanup;
701 clkctrl_clk->reg_offset = reg_data->offset;
702 clkctrl_clk->clk = &hw->hw;
704 list_add(&clkctrl_clk->node, &provider->clocks);
706 reg_data++;
709 ret = of_clk_add_hw_provider(node, _ti_omap4_clkctrl_xlate, provider);
710 if (ret == -EPROBE_DEFER)
711 ti_clk_retry_init(node, provider, _clkctrl_add_provider);
713 kfree(clkctrl_name);
715 return;
717 cleanup:
718 kfree(hw);
719 kfree(init.name);
720 kfree(clkctrl_name);
721 kfree(clkctrl_clk);
723 CLK_OF_DECLARE(ti_omap4_clkctrl_clock, "ti,clkctrl",
724 _ti_omap4_clkctrl_setup);
727 * ti_clk_is_in_standby - Check if clkctrl clock is in standby or not
728 * @clk: clock to check standby status for
730 * Finds whether the provided clock is in standby mode or not. Returns
731 * true if the provided clock is a clkctrl type clock and it is in standby,
732 * false otherwise.
734 bool ti_clk_is_in_standby(struct clk *clk)
736 struct clk_hw *hw;
737 struct clk_hw_omap *hwclk;
738 u32 val;
740 hw = __clk_get_hw(clk);
742 if (!omap2_clk_is_hw_omap(hw))
743 return false;
745 hwclk = to_clk_hw_omap(hw);
747 val = ti_clk_ll_ops->clk_readl(&hwclk->enable_reg);
749 if (val & OMAP4_STBYST_MASK)
750 return true;
752 return false;
754 EXPORT_SYMBOL_GPL(ti_clk_is_in_standby);