1 // SPDX-License-Identifier: GPL-2.0
3 * SuperH Timer Support - TMU
5 * Copyright (C) 2009 Magnus Damm
9 #include <linux/clockchips.h>
10 #include <linux/clocksource.h>
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/irq.h>
18 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/sh_timer.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
28 #include <asm/platform_early.h>
38 struct sh_tmu_channel
{
39 struct sh_tmu_device
*tmu
;
45 unsigned long periodic
;
46 struct clock_event_device ced
;
47 struct clocksource cs
;
49 unsigned int enable_count
;
52 struct sh_tmu_device
{
53 struct platform_device
*pdev
;
55 void __iomem
*mapbase
;
59 enum sh_tmu_model model
;
61 raw_spinlock_t lock
; /* Protect the shared start/stop register */
63 struct sh_tmu_channel
*channels
;
64 unsigned int num_channels
;
70 #define TSTR -1 /* shared register */
71 #define TCOR 0 /* channel register */
72 #define TCNT 1 /* channel register */
73 #define TCR 2 /* channel register */
75 #define TCR_UNF (1 << 8)
76 #define TCR_UNIE (1 << 5)
77 #define TCR_TPSC_CLK4 (0 << 0)
78 #define TCR_TPSC_CLK16 (1 << 0)
79 #define TCR_TPSC_CLK64 (2 << 0)
80 #define TCR_TPSC_CLK256 (3 << 0)
81 #define TCR_TPSC_CLK1024 (4 << 0)
82 #define TCR_TPSC_MASK (7 << 0)
84 static inline unsigned long sh_tmu_read(struct sh_tmu_channel
*ch
, int reg_nr
)
89 switch (ch
->tmu
->model
) {
91 return ioread8(ch
->tmu
->mapbase
+ 2);
93 return ioread8(ch
->tmu
->mapbase
+ 4);
100 return ioread16(ch
->base
+ offs
);
102 return ioread32(ch
->base
+ offs
);
105 static inline void sh_tmu_write(struct sh_tmu_channel
*ch
, int reg_nr
,
110 if (reg_nr
== TSTR
) {
111 switch (ch
->tmu
->model
) {
113 return iowrite8(value
, ch
->tmu
->mapbase
+ 2);
115 return iowrite8(value
, ch
->tmu
->mapbase
+ 4);
122 iowrite16(value
, ch
->base
+ offs
);
124 iowrite32(value
, ch
->base
+ offs
);
127 static void sh_tmu_start_stop_ch(struct sh_tmu_channel
*ch
, int start
)
129 unsigned long flags
, value
;
131 /* start stop register shared by multiple timer channels */
132 raw_spin_lock_irqsave(&ch
->tmu
->lock
, flags
);
133 value
= sh_tmu_read(ch
, TSTR
);
136 value
|= 1 << ch
->index
;
138 value
&= ~(1 << ch
->index
);
140 sh_tmu_write(ch
, TSTR
, value
);
141 raw_spin_unlock_irqrestore(&ch
->tmu
->lock
, flags
);
144 static int __sh_tmu_enable(struct sh_tmu_channel
*ch
)
149 ret
= clk_enable(ch
->tmu
->clk
);
151 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: cannot enable clock\n",
156 /* make sure channel is disabled */
157 sh_tmu_start_stop_ch(ch
, 0);
159 /* maximum timeout */
160 sh_tmu_write(ch
, TCOR
, 0xffffffff);
161 sh_tmu_write(ch
, TCNT
, 0xffffffff);
163 /* configure channel to parent clock / 4, irq off */
164 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
167 sh_tmu_start_stop_ch(ch
, 1);
172 static int sh_tmu_enable(struct sh_tmu_channel
*ch
)
174 if (ch
->enable_count
++ > 0)
177 pm_runtime_get_sync(&ch
->tmu
->pdev
->dev
);
178 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, true);
180 return __sh_tmu_enable(ch
);
183 static void __sh_tmu_disable(struct sh_tmu_channel
*ch
)
185 /* disable channel */
186 sh_tmu_start_stop_ch(ch
, 0);
188 /* disable interrupts in TMU block */
189 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
192 clk_disable(ch
->tmu
->clk
);
195 static void sh_tmu_disable(struct sh_tmu_channel
*ch
)
197 if (WARN_ON(ch
->enable_count
== 0))
200 if (--ch
->enable_count
> 0)
203 __sh_tmu_disable(ch
);
205 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, false);
206 pm_runtime_put(&ch
->tmu
->pdev
->dev
);
209 static void sh_tmu_set_next(struct sh_tmu_channel
*ch
, unsigned long delta
,
213 sh_tmu_start_stop_ch(ch
, 0);
215 /* acknowledge interrupt */
216 sh_tmu_read(ch
, TCR
);
218 /* enable interrupt */
219 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
221 /* reload delta value in case of periodic timer */
223 sh_tmu_write(ch
, TCOR
, delta
);
225 sh_tmu_write(ch
, TCOR
, 0xffffffff);
227 sh_tmu_write(ch
, TCNT
, delta
);
230 sh_tmu_start_stop_ch(ch
, 1);
233 static irqreturn_t
sh_tmu_interrupt(int irq
, void *dev_id
)
235 struct sh_tmu_channel
*ch
= dev_id
;
237 /* disable or acknowledge interrupt */
238 if (clockevent_state_oneshot(&ch
->ced
))
239 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
241 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
243 /* notify clockevent layer */
244 ch
->ced
.event_handler(&ch
->ced
);
248 static struct sh_tmu_channel
*cs_to_sh_tmu(struct clocksource
*cs
)
250 return container_of(cs
, struct sh_tmu_channel
, cs
);
253 static u64
sh_tmu_clocksource_read(struct clocksource
*cs
)
255 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
257 return sh_tmu_read(ch
, TCNT
) ^ 0xffffffff;
260 static int sh_tmu_clocksource_enable(struct clocksource
*cs
)
262 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
265 if (WARN_ON(ch
->cs_enabled
))
268 ret
= sh_tmu_enable(ch
);
270 ch
->cs_enabled
= true;
275 static void sh_tmu_clocksource_disable(struct clocksource
*cs
)
277 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
279 if (WARN_ON(!ch
->cs_enabled
))
283 ch
->cs_enabled
= false;
286 static void sh_tmu_clocksource_suspend(struct clocksource
*cs
)
288 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
293 if (--ch
->enable_count
== 0) {
294 __sh_tmu_disable(ch
);
295 dev_pm_genpd_suspend(&ch
->tmu
->pdev
->dev
);
299 static void sh_tmu_clocksource_resume(struct clocksource
*cs
)
301 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
306 if (ch
->enable_count
++ == 0) {
307 dev_pm_genpd_resume(&ch
->tmu
->pdev
->dev
);
312 static int sh_tmu_register_clocksource(struct sh_tmu_channel
*ch
,
315 struct clocksource
*cs
= &ch
->cs
;
319 cs
->read
= sh_tmu_clocksource_read
;
320 cs
->enable
= sh_tmu_clocksource_enable
;
321 cs
->disable
= sh_tmu_clocksource_disable
;
322 cs
->suspend
= sh_tmu_clocksource_suspend
;
323 cs
->resume
= sh_tmu_clocksource_resume
;
324 cs
->mask
= CLOCKSOURCE_MASK(32);
325 cs
->flags
= CLOCK_SOURCE_IS_CONTINUOUS
;
327 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used as clock source\n",
330 clocksource_register_hz(cs
, ch
->tmu
->rate
);
334 static struct sh_tmu_channel
*ced_to_sh_tmu(struct clock_event_device
*ced
)
336 return container_of(ced
, struct sh_tmu_channel
, ced
);
339 static void sh_tmu_clock_event_start(struct sh_tmu_channel
*ch
, int periodic
)
344 ch
->periodic
= (ch
->tmu
->rate
+ HZ
/2) / HZ
;
345 sh_tmu_set_next(ch
, ch
->periodic
, 1);
349 static int sh_tmu_clock_event_shutdown(struct clock_event_device
*ced
)
351 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
353 if (clockevent_state_oneshot(ced
) || clockevent_state_periodic(ced
))
358 static int sh_tmu_clock_event_set_state(struct clock_event_device
*ced
,
361 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
363 /* deal with old setting first */
364 if (clockevent_state_oneshot(ced
) || clockevent_state_periodic(ced
))
367 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for %s clock events\n",
368 ch
->index
, periodic
? "periodic" : "oneshot");
369 sh_tmu_clock_event_start(ch
, periodic
);
373 static int sh_tmu_clock_event_set_oneshot(struct clock_event_device
*ced
)
375 return sh_tmu_clock_event_set_state(ced
, 0);
378 static int sh_tmu_clock_event_set_periodic(struct clock_event_device
*ced
)
380 return sh_tmu_clock_event_set_state(ced
, 1);
383 static int sh_tmu_clock_event_next(unsigned long delta
,
384 struct clock_event_device
*ced
)
386 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
388 BUG_ON(!clockevent_state_oneshot(ced
));
390 /* program new delta value */
391 sh_tmu_set_next(ch
, delta
, 0);
395 static void sh_tmu_clock_event_suspend(struct clock_event_device
*ced
)
397 dev_pm_genpd_suspend(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
400 static void sh_tmu_clock_event_resume(struct clock_event_device
*ced
)
402 dev_pm_genpd_resume(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
405 static void sh_tmu_register_clockevent(struct sh_tmu_channel
*ch
,
408 struct clock_event_device
*ced
= &ch
->ced
;
412 ced
->features
= CLOCK_EVT_FEAT_PERIODIC
;
413 ced
->features
|= CLOCK_EVT_FEAT_ONESHOT
;
415 ced
->cpumask
= cpu_possible_mask
;
416 ced
->set_next_event
= sh_tmu_clock_event_next
;
417 ced
->set_state_shutdown
= sh_tmu_clock_event_shutdown
;
418 ced
->set_state_periodic
= sh_tmu_clock_event_set_periodic
;
419 ced
->set_state_oneshot
= sh_tmu_clock_event_set_oneshot
;
420 ced
->suspend
= sh_tmu_clock_event_suspend
;
421 ced
->resume
= sh_tmu_clock_event_resume
;
423 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for clock events\n",
426 clockevents_config_and_register(ced
, ch
->tmu
->rate
, 0x300, 0xffffffff);
428 ret
= request_irq(ch
->irq
, sh_tmu_interrupt
,
429 IRQF_TIMER
| IRQF_IRQPOLL
| IRQF_NOBALANCING
,
430 dev_name(&ch
->tmu
->pdev
->dev
), ch
);
432 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: failed to request irq %d\n",
438 static int sh_tmu_register(struct sh_tmu_channel
*ch
, const char *name
,
439 bool clockevent
, bool clocksource
)
442 ch
->tmu
->has_clockevent
= true;
443 sh_tmu_register_clockevent(ch
, name
);
444 } else if (clocksource
) {
445 ch
->tmu
->has_clocksource
= true;
446 sh_tmu_register_clocksource(ch
, name
);
452 static int sh_tmu_channel_setup(struct sh_tmu_channel
*ch
, unsigned int index
,
453 bool clockevent
, bool clocksource
,
454 struct sh_tmu_device
*tmu
)
456 /* Skip unused channels. */
457 if (!clockevent
&& !clocksource
)
463 if (tmu
->model
== SH_TMU_SH3
)
464 ch
->base
= tmu
->mapbase
+ 4 + ch
->index
* 12;
466 ch
->base
= tmu
->mapbase
+ 8 + ch
->index
* 12;
468 ch
->irq
= platform_get_irq(tmu
->pdev
, index
);
472 ch
->cs_enabled
= false;
473 ch
->enable_count
= 0;
475 return sh_tmu_register(ch
, dev_name(&tmu
->pdev
->dev
),
476 clockevent
, clocksource
);
479 static int sh_tmu_map_memory(struct sh_tmu_device
*tmu
)
481 struct resource
*res
;
483 res
= platform_get_resource(tmu
->pdev
, IORESOURCE_MEM
, 0);
485 dev_err(&tmu
->pdev
->dev
, "failed to get I/O memory\n");
489 tmu
->mapbase
= ioremap(res
->start
, resource_size(res
));
490 if (tmu
->mapbase
== NULL
)
496 static int sh_tmu_parse_dt(struct sh_tmu_device
*tmu
)
498 struct device_node
*np
= tmu
->pdev
->dev
.of_node
;
501 tmu
->num_channels
= 3;
503 of_property_read_u32(np
, "#renesas,channels", &tmu
->num_channels
);
505 if (tmu
->num_channels
!= 2 && tmu
->num_channels
!= 3) {
506 dev_err(&tmu
->pdev
->dev
, "invalid number of channels %u\n",
514 static int sh_tmu_setup(struct sh_tmu_device
*tmu
, struct platform_device
*pdev
)
521 raw_spin_lock_init(&tmu
->lock
);
523 if (IS_ENABLED(CONFIG_OF
) && pdev
->dev
.of_node
) {
524 ret
= sh_tmu_parse_dt(tmu
);
527 } else if (pdev
->dev
.platform_data
) {
528 const struct platform_device_id
*id
= pdev
->id_entry
;
529 struct sh_timer_config
*cfg
= pdev
->dev
.platform_data
;
531 tmu
->model
= id
->driver_data
;
532 tmu
->num_channels
= hweight8(cfg
->channels_mask
);
534 dev_err(&tmu
->pdev
->dev
, "missing platform data\n");
538 /* Get hold of clock. */
539 tmu
->clk
= clk_get(&tmu
->pdev
->dev
, "fck");
540 if (IS_ERR(tmu
->clk
)) {
541 dev_err(&tmu
->pdev
->dev
, "cannot get clock\n");
542 return PTR_ERR(tmu
->clk
);
545 ret
= clk_prepare(tmu
->clk
);
549 /* Determine clock rate. */
550 ret
= clk_enable(tmu
->clk
);
552 goto err_clk_unprepare
;
554 tmu
->rate
= clk_get_rate(tmu
->clk
) / 4;
555 clk_disable(tmu
->clk
);
557 /* Map the memory resource. */
558 ret
= sh_tmu_map_memory(tmu
);
560 dev_err(&tmu
->pdev
->dev
, "failed to remap I/O memory\n");
561 goto err_clk_unprepare
;
564 /* Allocate and setup the channels. */
565 tmu
->channels
= kcalloc(tmu
->num_channels
, sizeof(*tmu
->channels
),
567 if (tmu
->channels
== NULL
) {
573 * Use the first channel as a clock event device and the second channel
576 for (i
= 0; i
< tmu
->num_channels
; ++i
) {
577 ret
= sh_tmu_channel_setup(&tmu
->channels
[i
], i
,
578 i
== 0, i
== 1, tmu
);
583 platform_set_drvdata(pdev
, tmu
);
588 kfree(tmu
->channels
);
589 iounmap(tmu
->mapbase
);
591 clk_unprepare(tmu
->clk
);
597 static int sh_tmu_probe(struct platform_device
*pdev
)
599 struct sh_tmu_device
*tmu
= platform_get_drvdata(pdev
);
602 if (!is_sh_early_platform_device(pdev
)) {
603 pm_runtime_set_active(&pdev
->dev
);
604 pm_runtime_enable(&pdev
->dev
);
608 dev_info(&pdev
->dev
, "kept as earlytimer\n");
612 tmu
= kzalloc(sizeof(*tmu
), GFP_KERNEL
);
616 ret
= sh_tmu_setup(tmu
, pdev
);
619 pm_runtime_idle(&pdev
->dev
);
623 if (is_sh_early_platform_device(pdev
))
627 if (tmu
->has_clockevent
|| tmu
->has_clocksource
)
628 pm_runtime_irq_safe(&pdev
->dev
);
630 pm_runtime_idle(&pdev
->dev
);
635 static int sh_tmu_remove(struct platform_device
*pdev
)
637 return -EBUSY
; /* cannot unregister clockevent and clocksource */
640 static const struct platform_device_id sh_tmu_id_table
[] = {
641 { "sh-tmu", SH_TMU
},
642 { "sh-tmu-sh3", SH_TMU_SH3
},
645 MODULE_DEVICE_TABLE(platform
, sh_tmu_id_table
);
647 static const struct of_device_id sh_tmu_of_table
[] __maybe_unused
= {
648 { .compatible
= "renesas,tmu" },
651 MODULE_DEVICE_TABLE(of
, sh_tmu_of_table
);
653 static struct platform_driver sh_tmu_device_driver
= {
654 .probe
= sh_tmu_probe
,
655 .remove
= sh_tmu_remove
,
658 .of_match_table
= of_match_ptr(sh_tmu_of_table
),
660 .id_table
= sh_tmu_id_table
,
663 static int __init
sh_tmu_init(void)
665 return platform_driver_register(&sh_tmu_device_driver
);
668 static void __exit
sh_tmu_exit(void)
670 platform_driver_unregister(&sh_tmu_device_driver
);
674 sh_early_platform_init("earlytimer", &sh_tmu_device_driver
);
677 subsys_initcall(sh_tmu_init
);
678 module_exit(sh_tmu_exit
);
680 MODULE_AUTHOR("Magnus Damm");
681 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
682 MODULE_LICENSE("GPL v2");