Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / clocksource / ingenic-timer.c
blob905fd6b163a8190bf2bad79a7e33c6620c0a312a
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Ingenic SoCs TCU IRQ driver
4 * Copyright (C) 2019 Paul Cercueil <paul@crapouillou.net>
5 * Copyright (C) 2020 周琰杰 (Zhou Yanjie) <zhouyanjie@wanyeetech.com>
6 */
8 #include <linux/bitops.h>
9 #include <linux/clk.h>
10 #include <linux/clockchips.h>
11 #include <linux/clocksource.h>
12 #include <linux/interrupt.h>
13 #include <linux/mfd/ingenic-tcu.h>
14 #include <linux/mfd/syscon.h>
15 #include <linux/of.h>
16 #include <linux/of_address.h>
17 #include <linux/of_irq.h>
18 #include <linux/of_platform.h>
19 #include <linux/overflow.h>
20 #include <linux/platform_device.h>
21 #include <linux/regmap.h>
22 #include <linux/sched_clock.h>
24 #include <dt-bindings/clock/ingenic,tcu.h>
26 static DEFINE_PER_CPU(call_single_data_t, ingenic_cevt_csd);
28 struct ingenic_soc_info {
29 unsigned int num_channels;
32 struct ingenic_tcu_timer {
33 unsigned int cpu;
34 unsigned int channel;
35 struct clock_event_device cevt;
36 struct clk *clk;
37 char name[8];
40 struct ingenic_tcu {
41 struct regmap *map;
42 struct device_node *np;
43 struct clk *cs_clk;
44 unsigned int cs_channel;
45 struct clocksource cs;
46 unsigned long pwm_channels_mask;
47 struct ingenic_tcu_timer timers[];
50 static struct ingenic_tcu *ingenic_tcu;
52 static u64 notrace ingenic_tcu_timer_read(void)
54 struct ingenic_tcu *tcu = ingenic_tcu;
55 unsigned int count;
57 regmap_read(tcu->map, TCU_REG_TCNTc(tcu->cs_channel), &count);
59 return count;
62 static u64 notrace ingenic_tcu_timer_cs_read(struct clocksource *cs)
64 return ingenic_tcu_timer_read();
67 static inline struct ingenic_tcu *
68 to_ingenic_tcu(struct ingenic_tcu_timer *timer)
70 return container_of(timer, struct ingenic_tcu, timers[timer->cpu]);
73 static inline struct ingenic_tcu_timer *
74 to_ingenic_tcu_timer(struct clock_event_device *evt)
76 return container_of(evt, struct ingenic_tcu_timer, cevt);
79 static int ingenic_tcu_cevt_set_state_shutdown(struct clock_event_device *evt)
81 struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
82 struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
84 regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
86 return 0;
89 static int ingenic_tcu_cevt_set_next(unsigned long next,
90 struct clock_event_device *evt)
92 struct ingenic_tcu_timer *timer = to_ingenic_tcu_timer(evt);
93 struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
95 if (next > 0xffff)
96 return -EINVAL;
98 regmap_write(tcu->map, TCU_REG_TDFRc(timer->channel), next);
99 regmap_write(tcu->map, TCU_REG_TCNTc(timer->channel), 0);
100 regmap_write(tcu->map, TCU_REG_TESR, BIT(timer->channel));
102 return 0;
105 static void ingenic_per_cpu_event_handler(void *info)
107 struct clock_event_device *cevt = (struct clock_event_device *) info;
109 cevt->event_handler(cevt);
112 static irqreturn_t ingenic_tcu_cevt_cb(int irq, void *dev_id)
114 struct ingenic_tcu_timer *timer = dev_id;
115 struct ingenic_tcu *tcu = to_ingenic_tcu(timer);
116 call_single_data_t *csd;
118 regmap_write(tcu->map, TCU_REG_TECR, BIT(timer->channel));
120 if (timer->cevt.event_handler) {
121 csd = &per_cpu(ingenic_cevt_csd, timer->cpu);
122 csd->info = (void *) &timer->cevt;
123 csd->func = ingenic_per_cpu_event_handler;
124 smp_call_function_single_async(timer->cpu, csd);
127 return IRQ_HANDLED;
130 static struct clk *ingenic_tcu_get_clock(struct device_node *np, int id)
132 struct of_phandle_args args;
134 args.np = np;
135 args.args_count = 1;
136 args.args[0] = id;
138 return of_clk_get_from_provider(&args);
141 static int ingenic_tcu_setup_cevt(unsigned int cpu)
143 struct ingenic_tcu *tcu = ingenic_tcu;
144 struct ingenic_tcu_timer *timer = &tcu->timers[cpu];
145 unsigned int timer_virq;
146 struct irq_domain *domain;
147 unsigned long rate;
148 int err;
150 timer->clk = ingenic_tcu_get_clock(tcu->np, timer->channel);
151 if (IS_ERR(timer->clk))
152 return PTR_ERR(timer->clk);
154 err = clk_prepare_enable(timer->clk);
155 if (err)
156 goto err_clk_put;
158 rate = clk_get_rate(timer->clk);
159 if (!rate) {
160 err = -EINVAL;
161 goto err_clk_disable;
164 domain = irq_find_host(tcu->np);
165 if (!domain) {
166 err = -ENODEV;
167 goto err_clk_disable;
170 timer_virq = irq_create_mapping(domain, timer->channel);
171 if (!timer_virq) {
172 err = -EINVAL;
173 goto err_clk_disable;
176 snprintf(timer->name, sizeof(timer->name), "TCU%u", timer->channel);
178 err = request_irq(timer_virq, ingenic_tcu_cevt_cb, IRQF_TIMER,
179 timer->name, timer);
180 if (err)
181 goto err_irq_dispose_mapping;
183 timer->cpu = smp_processor_id();
184 timer->cevt.cpumask = cpumask_of(smp_processor_id());
185 timer->cevt.features = CLOCK_EVT_FEAT_ONESHOT;
186 timer->cevt.name = timer->name;
187 timer->cevt.rating = 200;
188 timer->cevt.set_state_shutdown = ingenic_tcu_cevt_set_state_shutdown;
189 timer->cevt.set_next_event = ingenic_tcu_cevt_set_next;
191 clockevents_config_and_register(&timer->cevt, rate, 10, 0xffff);
193 return 0;
195 err_irq_dispose_mapping:
196 irq_dispose_mapping(timer_virq);
197 err_clk_disable:
198 clk_disable_unprepare(timer->clk);
199 err_clk_put:
200 clk_put(timer->clk);
201 return err;
204 static int __init ingenic_tcu_clocksource_init(struct device_node *np,
205 struct ingenic_tcu *tcu)
207 unsigned int channel = tcu->cs_channel;
208 struct clocksource *cs = &tcu->cs;
209 unsigned long rate;
210 int err;
212 tcu->cs_clk = ingenic_tcu_get_clock(np, channel);
213 if (IS_ERR(tcu->cs_clk))
214 return PTR_ERR(tcu->cs_clk);
216 err = clk_prepare_enable(tcu->cs_clk);
217 if (err)
218 goto err_clk_put;
220 rate = clk_get_rate(tcu->cs_clk);
221 if (!rate) {
222 err = -EINVAL;
223 goto err_clk_disable;
226 /* Reset channel */
227 regmap_update_bits(tcu->map, TCU_REG_TCSRc(channel),
228 0xffff & ~TCU_TCSR_RESERVED_BITS, 0);
230 /* Reset counter */
231 regmap_write(tcu->map, TCU_REG_TDFRc(channel), 0xffff);
232 regmap_write(tcu->map, TCU_REG_TCNTc(channel), 0);
234 /* Enable channel */
235 regmap_write(tcu->map, TCU_REG_TESR, BIT(channel));
237 cs->name = "ingenic-timer";
238 cs->rating = 200;
239 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
240 cs->mask = CLOCKSOURCE_MASK(16);
241 cs->read = ingenic_tcu_timer_cs_read;
243 err = clocksource_register_hz(cs, rate);
244 if (err)
245 goto err_clk_disable;
247 return 0;
249 err_clk_disable:
250 clk_disable_unprepare(tcu->cs_clk);
251 err_clk_put:
252 clk_put(tcu->cs_clk);
253 return err;
256 static const struct ingenic_soc_info jz4740_soc_info = {
257 .num_channels = 8,
260 static const struct ingenic_soc_info jz4725b_soc_info = {
261 .num_channels = 6,
264 static const struct of_device_id ingenic_tcu_of_match[] = {
265 { .compatible = "ingenic,jz4740-tcu", .data = &jz4740_soc_info, },
266 { .compatible = "ingenic,jz4725b-tcu", .data = &jz4725b_soc_info, },
267 { .compatible = "ingenic,jz4770-tcu", .data = &jz4740_soc_info, },
268 { .compatible = "ingenic,x1000-tcu", .data = &jz4740_soc_info, },
269 { /* sentinel */ }
272 static int __init ingenic_tcu_init(struct device_node *np)
274 const struct of_device_id *id = of_match_node(ingenic_tcu_of_match, np);
275 const struct ingenic_soc_info *soc_info = id->data;
276 struct ingenic_tcu_timer *timer;
277 struct ingenic_tcu *tcu;
278 struct regmap *map;
279 unsigned int cpu;
280 int ret, last_bit = -1;
281 long rate;
283 of_node_clear_flag(np, OF_POPULATED);
285 map = device_node_to_regmap(np);
286 if (IS_ERR(map))
287 return PTR_ERR(map);
289 tcu = kzalloc(struct_size(tcu, timers, num_possible_cpus()),
290 GFP_KERNEL);
291 if (!tcu)
292 return -ENOMEM;
295 * Enable all TCU channels for PWM use by default except channels 0/1,
296 * and channel 2 if target CPU is JZ4780/X2000 and SMP is selected.
298 tcu->pwm_channels_mask = GENMASK(soc_info->num_channels - 1,
299 num_possible_cpus() + 1);
300 of_property_read_u32(np, "ingenic,pwm-channels-mask",
301 (u32 *)&tcu->pwm_channels_mask);
303 /* Verify that we have at least num_possible_cpus() + 1 free channels */
304 if (hweight8(tcu->pwm_channels_mask) >
305 soc_info->num_channels - num_possible_cpus() + 1) {
306 pr_crit("%s: Invalid PWM channel mask: 0x%02lx\n", __func__,
307 tcu->pwm_channels_mask);
308 ret = -EINVAL;
309 goto err_free_ingenic_tcu;
312 tcu->map = map;
313 tcu->np = np;
314 ingenic_tcu = tcu;
316 for (cpu = 0; cpu < num_possible_cpus(); cpu++) {
317 timer = &tcu->timers[cpu];
319 timer->cpu = cpu;
320 timer->channel = find_next_zero_bit(&tcu->pwm_channels_mask,
321 soc_info->num_channels,
322 last_bit + 1);
323 last_bit = timer->channel;
326 tcu->cs_channel = find_next_zero_bit(&tcu->pwm_channels_mask,
327 soc_info->num_channels,
328 last_bit + 1);
330 ret = ingenic_tcu_clocksource_init(np, tcu);
331 if (ret) {
332 pr_crit("%s: Unable to init clocksource: %d\n", __func__, ret);
333 goto err_free_ingenic_tcu;
336 /* Setup clock events on each CPU core */
337 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "Ingenic XBurst: online",
338 ingenic_tcu_setup_cevt, NULL);
339 if (ret < 0) {
340 pr_crit("%s: Unable to start CPU timers: %d\n", __func__, ret);
341 goto err_tcu_clocksource_cleanup;
344 /* Register the sched_clock at the end as there's no way to undo it */
345 rate = clk_get_rate(tcu->cs_clk);
346 sched_clock_register(ingenic_tcu_timer_read, 16, rate);
348 return 0;
350 err_tcu_clocksource_cleanup:
351 clocksource_unregister(&tcu->cs);
352 clk_disable_unprepare(tcu->cs_clk);
353 clk_put(tcu->cs_clk);
354 err_free_ingenic_tcu:
355 kfree(tcu);
356 return ret;
359 TIMER_OF_DECLARE(jz4740_tcu_intc, "ingenic,jz4740-tcu", ingenic_tcu_init);
360 TIMER_OF_DECLARE(jz4725b_tcu_intc, "ingenic,jz4725b-tcu", ingenic_tcu_init);
361 TIMER_OF_DECLARE(jz4770_tcu_intc, "ingenic,jz4770-tcu", ingenic_tcu_init);
362 TIMER_OF_DECLARE(x1000_tcu_intc, "ingenic,x1000-tcu", ingenic_tcu_init);
364 static int __init ingenic_tcu_probe(struct platform_device *pdev)
366 platform_set_drvdata(pdev, ingenic_tcu);
368 return 0;
371 static int __maybe_unused ingenic_tcu_suspend(struct device *dev)
373 struct ingenic_tcu *tcu = dev_get_drvdata(dev);
374 unsigned int cpu;
376 clk_disable(tcu->cs_clk);
378 for (cpu = 0; cpu < num_online_cpus(); cpu++)
379 clk_disable(tcu->timers[cpu].clk);
381 return 0;
384 static int __maybe_unused ingenic_tcu_resume(struct device *dev)
386 struct ingenic_tcu *tcu = dev_get_drvdata(dev);
387 unsigned int cpu;
388 int ret;
390 for (cpu = 0; cpu < num_online_cpus(); cpu++) {
391 ret = clk_enable(tcu->timers[cpu].clk);
392 if (ret)
393 goto err_timer_clk_disable;
396 ret = clk_enable(tcu->cs_clk);
397 if (ret)
398 goto err_timer_clk_disable;
400 return 0;
402 err_timer_clk_disable:
403 for (; cpu > 0; cpu--)
404 clk_disable(tcu->timers[cpu - 1].clk);
405 return ret;
408 static const struct dev_pm_ops __maybe_unused ingenic_tcu_pm_ops = {
409 /* _noirq: We want the TCU clocks to be gated last / ungated first */
410 .suspend_noirq = ingenic_tcu_suspend,
411 .resume_noirq = ingenic_tcu_resume,
414 static struct platform_driver ingenic_tcu_driver = {
415 .driver = {
416 .name = "ingenic-tcu-timer",
417 #ifdef CONFIG_PM_SLEEP
418 .pm = &ingenic_tcu_pm_ops,
419 #endif
420 .of_match_table = ingenic_tcu_of_match,
423 builtin_platform_driver_probe(ingenic_tcu_driver, ingenic_tcu_probe);