1 // SPDX-License-Identifier: GPL-2.0
3 * SuperH Timer Support - TMU
5 * Copyright (C) 2009 Magnus Damm
9 #include <linux/clockchips.h>
10 #include <linux/clocksource.h>
11 #include <linux/delay.h>
12 #include <linux/err.h>
13 #include <linux/init.h>
14 #include <linux/interrupt.h>
16 #include <linux/ioport.h>
17 #include <linux/irq.h>
18 #include <linux/module.h>
20 #include <linux/platform_device.h>
21 #include <linux/pm_domain.h>
22 #include <linux/pm_runtime.h>
23 #include <linux/sh_timer.h>
24 #include <linux/slab.h>
25 #include <linux/spinlock.h>
34 struct sh_tmu_channel
{
35 struct sh_tmu_device
*tmu
;
41 unsigned long periodic
;
42 struct clock_event_device ced
;
43 struct clocksource cs
;
45 unsigned int enable_count
;
48 struct sh_tmu_device
{
49 struct platform_device
*pdev
;
51 void __iomem
*mapbase
;
55 enum sh_tmu_model model
;
57 raw_spinlock_t lock
; /* Protect the shared start/stop register */
59 struct sh_tmu_channel
*channels
;
60 unsigned int num_channels
;
66 #define TSTR -1 /* shared register */
67 #define TCOR 0 /* channel register */
68 #define TCNT 1 /* channel register */
69 #define TCR 2 /* channel register */
71 #define TCR_UNF (1 << 8)
72 #define TCR_UNIE (1 << 5)
73 #define TCR_TPSC_CLK4 (0 << 0)
74 #define TCR_TPSC_CLK16 (1 << 0)
75 #define TCR_TPSC_CLK64 (2 << 0)
76 #define TCR_TPSC_CLK256 (3 << 0)
77 #define TCR_TPSC_CLK1024 (4 << 0)
78 #define TCR_TPSC_MASK (7 << 0)
80 static inline unsigned long sh_tmu_read(struct sh_tmu_channel
*ch
, int reg_nr
)
85 switch (ch
->tmu
->model
) {
87 return ioread8(ch
->tmu
->mapbase
+ 2);
89 return ioread8(ch
->tmu
->mapbase
+ 4);
96 return ioread16(ch
->base
+ offs
);
98 return ioread32(ch
->base
+ offs
);
101 static inline void sh_tmu_write(struct sh_tmu_channel
*ch
, int reg_nr
,
106 if (reg_nr
== TSTR
) {
107 switch (ch
->tmu
->model
) {
109 return iowrite8(value
, ch
->tmu
->mapbase
+ 2);
111 return iowrite8(value
, ch
->tmu
->mapbase
+ 4);
118 iowrite16(value
, ch
->base
+ offs
);
120 iowrite32(value
, ch
->base
+ offs
);
123 static void sh_tmu_start_stop_ch(struct sh_tmu_channel
*ch
, int start
)
125 unsigned long flags
, value
;
127 /* start stop register shared by multiple timer channels */
128 raw_spin_lock_irqsave(&ch
->tmu
->lock
, flags
);
129 value
= sh_tmu_read(ch
, TSTR
);
132 value
|= 1 << ch
->index
;
134 value
&= ~(1 << ch
->index
);
136 sh_tmu_write(ch
, TSTR
, value
);
137 raw_spin_unlock_irqrestore(&ch
->tmu
->lock
, flags
);
140 static int __sh_tmu_enable(struct sh_tmu_channel
*ch
)
145 ret
= clk_enable(ch
->tmu
->clk
);
147 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: cannot enable clock\n",
152 /* make sure channel is disabled */
153 sh_tmu_start_stop_ch(ch
, 0);
155 /* maximum timeout */
156 sh_tmu_write(ch
, TCOR
, 0xffffffff);
157 sh_tmu_write(ch
, TCNT
, 0xffffffff);
159 /* configure channel to parent clock / 4, irq off */
160 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
163 sh_tmu_start_stop_ch(ch
, 1);
168 static int sh_tmu_enable(struct sh_tmu_channel
*ch
)
170 if (ch
->enable_count
++ > 0)
173 pm_runtime_get_sync(&ch
->tmu
->pdev
->dev
);
174 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, true);
176 return __sh_tmu_enable(ch
);
179 static void __sh_tmu_disable(struct sh_tmu_channel
*ch
)
181 /* disable channel */
182 sh_tmu_start_stop_ch(ch
, 0);
184 /* disable interrupts in TMU block */
185 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
188 clk_disable(ch
->tmu
->clk
);
191 static void sh_tmu_disable(struct sh_tmu_channel
*ch
)
193 if (WARN_ON(ch
->enable_count
== 0))
196 if (--ch
->enable_count
> 0)
199 __sh_tmu_disable(ch
);
201 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, false);
202 pm_runtime_put(&ch
->tmu
->pdev
->dev
);
205 static void sh_tmu_set_next(struct sh_tmu_channel
*ch
, unsigned long delta
,
209 sh_tmu_start_stop_ch(ch
, 0);
211 /* acknowledge interrupt */
212 sh_tmu_read(ch
, TCR
);
214 /* enable interrupt */
215 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
217 /* reload delta value in case of periodic timer */
219 sh_tmu_write(ch
, TCOR
, delta
);
221 sh_tmu_write(ch
, TCOR
, 0xffffffff);
223 sh_tmu_write(ch
, TCNT
, delta
);
226 sh_tmu_start_stop_ch(ch
, 1);
229 static irqreturn_t
sh_tmu_interrupt(int irq
, void *dev_id
)
231 struct sh_tmu_channel
*ch
= dev_id
;
233 /* disable or acknowledge interrupt */
234 if (clockevent_state_oneshot(&ch
->ced
))
235 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
237 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
239 /* notify clockevent layer */
240 ch
->ced
.event_handler(&ch
->ced
);
244 static struct sh_tmu_channel
*cs_to_sh_tmu(struct clocksource
*cs
)
246 return container_of(cs
, struct sh_tmu_channel
, cs
);
249 static u64
sh_tmu_clocksource_read(struct clocksource
*cs
)
251 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
253 return sh_tmu_read(ch
, TCNT
) ^ 0xffffffff;
256 static int sh_tmu_clocksource_enable(struct clocksource
*cs
)
258 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
261 if (WARN_ON(ch
->cs_enabled
))
264 ret
= sh_tmu_enable(ch
);
266 ch
->cs_enabled
= true;
271 static void sh_tmu_clocksource_disable(struct clocksource
*cs
)
273 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
275 if (WARN_ON(!ch
->cs_enabled
))
279 ch
->cs_enabled
= false;
282 static void sh_tmu_clocksource_suspend(struct clocksource
*cs
)
284 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
289 if (--ch
->enable_count
== 0) {
290 __sh_tmu_disable(ch
);
291 pm_genpd_syscore_poweroff(&ch
->tmu
->pdev
->dev
);
295 static void sh_tmu_clocksource_resume(struct clocksource
*cs
)
297 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
302 if (ch
->enable_count
++ == 0) {
303 pm_genpd_syscore_poweron(&ch
->tmu
->pdev
->dev
);
308 static int sh_tmu_register_clocksource(struct sh_tmu_channel
*ch
,
311 struct clocksource
*cs
= &ch
->cs
;
315 cs
->read
= sh_tmu_clocksource_read
;
316 cs
->enable
= sh_tmu_clocksource_enable
;
317 cs
->disable
= sh_tmu_clocksource_disable
;
318 cs
->suspend
= sh_tmu_clocksource_suspend
;
319 cs
->resume
= sh_tmu_clocksource_resume
;
320 cs
->mask
= CLOCKSOURCE_MASK(32);
321 cs
->flags
= CLOCK_SOURCE_IS_CONTINUOUS
;
323 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used as clock source\n",
326 clocksource_register_hz(cs
, ch
->tmu
->rate
);
330 static struct sh_tmu_channel
*ced_to_sh_tmu(struct clock_event_device
*ced
)
332 return container_of(ced
, struct sh_tmu_channel
, ced
);
335 static void sh_tmu_clock_event_start(struct sh_tmu_channel
*ch
, int periodic
)
340 ch
->periodic
= (ch
->tmu
->rate
+ HZ
/2) / HZ
;
341 sh_tmu_set_next(ch
, ch
->periodic
, 1);
345 static int sh_tmu_clock_event_shutdown(struct clock_event_device
*ced
)
347 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
349 if (clockevent_state_oneshot(ced
) || clockevent_state_periodic(ced
))
354 static int sh_tmu_clock_event_set_state(struct clock_event_device
*ced
,
357 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
359 /* deal with old setting first */
360 if (clockevent_state_oneshot(ced
) || clockevent_state_periodic(ced
))
363 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for %s clock events\n",
364 ch
->index
, periodic
? "periodic" : "oneshot");
365 sh_tmu_clock_event_start(ch
, periodic
);
369 static int sh_tmu_clock_event_set_oneshot(struct clock_event_device
*ced
)
371 return sh_tmu_clock_event_set_state(ced
, 0);
374 static int sh_tmu_clock_event_set_periodic(struct clock_event_device
*ced
)
376 return sh_tmu_clock_event_set_state(ced
, 1);
379 static int sh_tmu_clock_event_next(unsigned long delta
,
380 struct clock_event_device
*ced
)
382 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
384 BUG_ON(!clockevent_state_oneshot(ced
));
386 /* program new delta value */
387 sh_tmu_set_next(ch
, delta
, 0);
391 static void sh_tmu_clock_event_suspend(struct clock_event_device
*ced
)
393 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
396 static void sh_tmu_clock_event_resume(struct clock_event_device
*ced
)
398 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
401 static void sh_tmu_register_clockevent(struct sh_tmu_channel
*ch
,
404 struct clock_event_device
*ced
= &ch
->ced
;
408 ced
->features
= CLOCK_EVT_FEAT_PERIODIC
;
409 ced
->features
|= CLOCK_EVT_FEAT_ONESHOT
;
411 ced
->cpumask
= cpu_possible_mask
;
412 ced
->set_next_event
= sh_tmu_clock_event_next
;
413 ced
->set_state_shutdown
= sh_tmu_clock_event_shutdown
;
414 ced
->set_state_periodic
= sh_tmu_clock_event_set_periodic
;
415 ced
->set_state_oneshot
= sh_tmu_clock_event_set_oneshot
;
416 ced
->suspend
= sh_tmu_clock_event_suspend
;
417 ced
->resume
= sh_tmu_clock_event_resume
;
419 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for clock events\n",
422 clockevents_config_and_register(ced
, ch
->tmu
->rate
, 0x300, 0xffffffff);
424 ret
= request_irq(ch
->irq
, sh_tmu_interrupt
,
425 IRQF_TIMER
| IRQF_IRQPOLL
| IRQF_NOBALANCING
,
426 dev_name(&ch
->tmu
->pdev
->dev
), ch
);
428 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: failed to request irq %d\n",
434 static int sh_tmu_register(struct sh_tmu_channel
*ch
, const char *name
,
435 bool clockevent
, bool clocksource
)
438 ch
->tmu
->has_clockevent
= true;
439 sh_tmu_register_clockevent(ch
, name
);
440 } else if (clocksource
) {
441 ch
->tmu
->has_clocksource
= true;
442 sh_tmu_register_clocksource(ch
, name
);
448 static int sh_tmu_channel_setup(struct sh_tmu_channel
*ch
, unsigned int index
,
449 bool clockevent
, bool clocksource
,
450 struct sh_tmu_device
*tmu
)
452 /* Skip unused channels. */
453 if (!clockevent
&& !clocksource
)
459 if (tmu
->model
== SH_TMU_SH3
)
460 ch
->base
= tmu
->mapbase
+ 4 + ch
->index
* 12;
462 ch
->base
= tmu
->mapbase
+ 8 + ch
->index
* 12;
464 ch
->irq
= platform_get_irq(tmu
->pdev
, index
);
466 dev_err(&tmu
->pdev
->dev
, "ch%u: failed to get irq\n",
471 ch
->cs_enabled
= false;
472 ch
->enable_count
= 0;
474 return sh_tmu_register(ch
, dev_name(&tmu
->pdev
->dev
),
475 clockevent
, clocksource
);
478 static int sh_tmu_map_memory(struct sh_tmu_device
*tmu
)
480 struct resource
*res
;
482 res
= platform_get_resource(tmu
->pdev
, IORESOURCE_MEM
, 0);
484 dev_err(&tmu
->pdev
->dev
, "failed to get I/O memory\n");
488 tmu
->mapbase
= ioremap_nocache(res
->start
, resource_size(res
));
489 if (tmu
->mapbase
== NULL
)
495 static int sh_tmu_parse_dt(struct sh_tmu_device
*tmu
)
497 struct device_node
*np
= tmu
->pdev
->dev
.of_node
;
500 tmu
->num_channels
= 3;
502 of_property_read_u32(np
, "#renesas,channels", &tmu
->num_channels
);
504 if (tmu
->num_channels
!= 2 && tmu
->num_channels
!= 3) {
505 dev_err(&tmu
->pdev
->dev
, "invalid number of channels %u\n",
513 static int sh_tmu_setup(struct sh_tmu_device
*tmu
, struct platform_device
*pdev
)
520 raw_spin_lock_init(&tmu
->lock
);
522 if (IS_ENABLED(CONFIG_OF
) && pdev
->dev
.of_node
) {
523 ret
= sh_tmu_parse_dt(tmu
);
526 } else if (pdev
->dev
.platform_data
) {
527 const struct platform_device_id
*id
= pdev
->id_entry
;
528 struct sh_timer_config
*cfg
= pdev
->dev
.platform_data
;
530 tmu
->model
= id
->driver_data
;
531 tmu
->num_channels
= hweight8(cfg
->channels_mask
);
533 dev_err(&tmu
->pdev
->dev
, "missing platform data\n");
537 /* Get hold of clock. */
538 tmu
->clk
= clk_get(&tmu
->pdev
->dev
, "fck");
539 if (IS_ERR(tmu
->clk
)) {
540 dev_err(&tmu
->pdev
->dev
, "cannot get clock\n");
541 return PTR_ERR(tmu
->clk
);
544 ret
= clk_prepare(tmu
->clk
);
548 /* Determine clock rate. */
549 ret
= clk_enable(tmu
->clk
);
551 goto err_clk_unprepare
;
553 tmu
->rate
= clk_get_rate(tmu
->clk
) / 4;
554 clk_disable(tmu
->clk
);
556 /* Map the memory resource. */
557 ret
= sh_tmu_map_memory(tmu
);
559 dev_err(&tmu
->pdev
->dev
, "failed to remap I/O memory\n");
560 goto err_clk_unprepare
;
563 /* Allocate and setup the channels. */
564 tmu
->channels
= kcalloc(tmu
->num_channels
, sizeof(*tmu
->channels
),
566 if (tmu
->channels
== NULL
) {
572 * Use the first channel as a clock event device and the second channel
575 for (i
= 0; i
< tmu
->num_channels
; ++i
) {
576 ret
= sh_tmu_channel_setup(&tmu
->channels
[i
], i
,
577 i
== 0, i
== 1, tmu
);
582 platform_set_drvdata(pdev
, tmu
);
587 kfree(tmu
->channels
);
588 iounmap(tmu
->mapbase
);
590 clk_unprepare(tmu
->clk
);
596 static int sh_tmu_probe(struct platform_device
*pdev
)
598 struct sh_tmu_device
*tmu
= platform_get_drvdata(pdev
);
601 if (!is_early_platform_device(pdev
)) {
602 pm_runtime_set_active(&pdev
->dev
);
603 pm_runtime_enable(&pdev
->dev
);
607 dev_info(&pdev
->dev
, "kept as earlytimer\n");
611 tmu
= kzalloc(sizeof(*tmu
), GFP_KERNEL
);
615 ret
= sh_tmu_setup(tmu
, pdev
);
618 pm_runtime_idle(&pdev
->dev
);
621 if (is_early_platform_device(pdev
))
625 if (tmu
->has_clockevent
|| tmu
->has_clocksource
)
626 pm_runtime_irq_safe(&pdev
->dev
);
628 pm_runtime_idle(&pdev
->dev
);
633 static int sh_tmu_remove(struct platform_device
*pdev
)
635 return -EBUSY
; /* cannot unregister clockevent and clocksource */
638 static const struct platform_device_id sh_tmu_id_table
[] = {
639 { "sh-tmu", SH_TMU
},
640 { "sh-tmu-sh3", SH_TMU_SH3
},
643 MODULE_DEVICE_TABLE(platform
, sh_tmu_id_table
);
645 static const struct of_device_id sh_tmu_of_table
[] __maybe_unused
= {
646 { .compatible
= "renesas,tmu" },
649 MODULE_DEVICE_TABLE(of
, sh_tmu_of_table
);
651 static struct platform_driver sh_tmu_device_driver
= {
652 .probe
= sh_tmu_probe
,
653 .remove
= sh_tmu_remove
,
656 .of_match_table
= of_match_ptr(sh_tmu_of_table
),
658 .id_table
= sh_tmu_id_table
,
661 static int __init
sh_tmu_init(void)
663 return platform_driver_register(&sh_tmu_device_driver
);
666 static void __exit
sh_tmu_exit(void)
668 platform_driver_unregister(&sh_tmu_device_driver
);
671 early_platform_init("earlytimer", &sh_tmu_device_driver
);
672 subsys_initcall(sh_tmu_init
);
673 module_exit(sh_tmu_exit
);
675 MODULE_AUTHOR("Magnus Damm");
676 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
677 MODULE_LICENSE("GPL v2");