2 * SuperH Timer Support - TMU
4 * Copyright (C) 2009 Magnus Damm
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/clk.h>
17 #include <linux/clockchips.h>
18 #include <linux/clocksource.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/irq.h>
26 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sh_timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
42 struct sh_tmu_channel
{
43 struct sh_tmu_device
*tmu
;
49 unsigned long periodic
;
50 struct clock_event_device ced
;
51 struct clocksource cs
;
53 unsigned int enable_count
;
56 struct sh_tmu_device
{
57 struct platform_device
*pdev
;
59 void __iomem
*mapbase
;
63 enum sh_tmu_model model
;
65 raw_spinlock_t lock
; /* Protect the shared start/stop register */
67 struct sh_tmu_channel
*channels
;
68 unsigned int num_channels
;
74 #define TSTR -1 /* shared register */
75 #define TCOR 0 /* channel register */
76 #define TCNT 1 /* channel register */
77 #define TCR 2 /* channel register */
79 #define TCR_UNF (1 << 8)
80 #define TCR_UNIE (1 << 5)
81 #define TCR_TPSC_CLK4 (0 << 0)
82 #define TCR_TPSC_CLK16 (1 << 0)
83 #define TCR_TPSC_CLK64 (2 << 0)
84 #define TCR_TPSC_CLK256 (3 << 0)
85 #define TCR_TPSC_CLK1024 (4 << 0)
86 #define TCR_TPSC_MASK (7 << 0)
88 static inline unsigned long sh_tmu_read(struct sh_tmu_channel
*ch
, int reg_nr
)
93 switch (ch
->tmu
->model
) {
95 return ioread8(ch
->tmu
->mapbase
+ 2);
97 return ioread8(ch
->tmu
->mapbase
+ 4);
104 return ioread16(ch
->base
+ offs
);
106 return ioread32(ch
->base
+ offs
);
109 static inline void sh_tmu_write(struct sh_tmu_channel
*ch
, int reg_nr
,
114 if (reg_nr
== TSTR
) {
115 switch (ch
->tmu
->model
) {
117 return iowrite8(value
, ch
->tmu
->mapbase
+ 2);
119 return iowrite8(value
, ch
->tmu
->mapbase
+ 4);
126 iowrite16(value
, ch
->base
+ offs
);
128 iowrite32(value
, ch
->base
+ offs
);
131 static void sh_tmu_start_stop_ch(struct sh_tmu_channel
*ch
, int start
)
133 unsigned long flags
, value
;
135 /* start stop register shared by multiple timer channels */
136 raw_spin_lock_irqsave(&ch
->tmu
->lock
, flags
);
137 value
= sh_tmu_read(ch
, TSTR
);
140 value
|= 1 << ch
->index
;
142 value
&= ~(1 << ch
->index
);
144 sh_tmu_write(ch
, TSTR
, value
);
145 raw_spin_unlock_irqrestore(&ch
->tmu
->lock
, flags
);
148 static int __sh_tmu_enable(struct sh_tmu_channel
*ch
)
153 ret
= clk_enable(ch
->tmu
->clk
);
155 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: cannot enable clock\n",
160 /* make sure channel is disabled */
161 sh_tmu_start_stop_ch(ch
, 0);
163 /* maximum timeout */
164 sh_tmu_write(ch
, TCOR
, 0xffffffff);
165 sh_tmu_write(ch
, TCNT
, 0xffffffff);
167 /* configure channel to parent clock / 4, irq off */
168 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
171 sh_tmu_start_stop_ch(ch
, 1);
176 static int sh_tmu_enable(struct sh_tmu_channel
*ch
)
178 if (ch
->enable_count
++ > 0)
181 pm_runtime_get_sync(&ch
->tmu
->pdev
->dev
);
182 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, true);
184 return __sh_tmu_enable(ch
);
187 static void __sh_tmu_disable(struct sh_tmu_channel
*ch
)
189 /* disable channel */
190 sh_tmu_start_stop_ch(ch
, 0);
192 /* disable interrupts in TMU block */
193 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
196 clk_disable(ch
->tmu
->clk
);
199 static void sh_tmu_disable(struct sh_tmu_channel
*ch
)
201 if (WARN_ON(ch
->enable_count
== 0))
204 if (--ch
->enable_count
> 0)
207 __sh_tmu_disable(ch
);
209 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, false);
210 pm_runtime_put(&ch
->tmu
->pdev
->dev
);
213 static void sh_tmu_set_next(struct sh_tmu_channel
*ch
, unsigned long delta
,
217 sh_tmu_start_stop_ch(ch
, 0);
219 /* acknowledge interrupt */
220 sh_tmu_read(ch
, TCR
);
222 /* enable interrupt */
223 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
225 /* reload delta value in case of periodic timer */
227 sh_tmu_write(ch
, TCOR
, delta
);
229 sh_tmu_write(ch
, TCOR
, 0xffffffff);
231 sh_tmu_write(ch
, TCNT
, delta
);
234 sh_tmu_start_stop_ch(ch
, 1);
237 static irqreturn_t
sh_tmu_interrupt(int irq
, void *dev_id
)
239 struct sh_tmu_channel
*ch
= dev_id
;
241 /* disable or acknowledge interrupt */
242 if (clockevent_state_oneshot(&ch
->ced
))
243 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
245 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
247 /* notify clockevent layer */
248 ch
->ced
.event_handler(&ch
->ced
);
252 static struct sh_tmu_channel
*cs_to_sh_tmu(struct clocksource
*cs
)
254 return container_of(cs
, struct sh_tmu_channel
, cs
);
257 static u64
sh_tmu_clocksource_read(struct clocksource
*cs
)
259 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
261 return sh_tmu_read(ch
, TCNT
) ^ 0xffffffff;
264 static int sh_tmu_clocksource_enable(struct clocksource
*cs
)
266 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
269 if (WARN_ON(ch
->cs_enabled
))
272 ret
= sh_tmu_enable(ch
);
274 ch
->cs_enabled
= true;
279 static void sh_tmu_clocksource_disable(struct clocksource
*cs
)
281 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
283 if (WARN_ON(!ch
->cs_enabled
))
287 ch
->cs_enabled
= false;
290 static void sh_tmu_clocksource_suspend(struct clocksource
*cs
)
292 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
297 if (--ch
->enable_count
== 0) {
298 __sh_tmu_disable(ch
);
299 pm_genpd_syscore_poweroff(&ch
->tmu
->pdev
->dev
);
303 static void sh_tmu_clocksource_resume(struct clocksource
*cs
)
305 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
310 if (ch
->enable_count
++ == 0) {
311 pm_genpd_syscore_poweron(&ch
->tmu
->pdev
->dev
);
316 static int sh_tmu_register_clocksource(struct sh_tmu_channel
*ch
,
319 struct clocksource
*cs
= &ch
->cs
;
323 cs
->read
= sh_tmu_clocksource_read
;
324 cs
->enable
= sh_tmu_clocksource_enable
;
325 cs
->disable
= sh_tmu_clocksource_disable
;
326 cs
->suspend
= sh_tmu_clocksource_suspend
;
327 cs
->resume
= sh_tmu_clocksource_resume
;
328 cs
->mask
= CLOCKSOURCE_MASK(32);
329 cs
->flags
= CLOCK_SOURCE_IS_CONTINUOUS
;
331 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used as clock source\n",
334 clocksource_register_hz(cs
, ch
->tmu
->rate
);
338 static struct sh_tmu_channel
*ced_to_sh_tmu(struct clock_event_device
*ced
)
340 return container_of(ced
, struct sh_tmu_channel
, ced
);
343 static void sh_tmu_clock_event_start(struct sh_tmu_channel
*ch
, int periodic
)
348 ch
->periodic
= (ch
->tmu
->rate
+ HZ
/2) / HZ
;
349 sh_tmu_set_next(ch
, ch
->periodic
, 1);
353 static int sh_tmu_clock_event_shutdown(struct clock_event_device
*ced
)
355 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
357 if (clockevent_state_oneshot(ced
) || clockevent_state_periodic(ced
))
362 static int sh_tmu_clock_event_set_state(struct clock_event_device
*ced
,
365 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
367 /* deal with old setting first */
368 if (clockevent_state_oneshot(ced
) || clockevent_state_periodic(ced
))
371 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for %s clock events\n",
372 ch
->index
, periodic
? "periodic" : "oneshot");
373 sh_tmu_clock_event_start(ch
, periodic
);
377 static int sh_tmu_clock_event_set_oneshot(struct clock_event_device
*ced
)
379 return sh_tmu_clock_event_set_state(ced
, 0);
382 static int sh_tmu_clock_event_set_periodic(struct clock_event_device
*ced
)
384 return sh_tmu_clock_event_set_state(ced
, 1);
387 static int sh_tmu_clock_event_next(unsigned long delta
,
388 struct clock_event_device
*ced
)
390 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
392 BUG_ON(!clockevent_state_oneshot(ced
));
394 /* program new delta value */
395 sh_tmu_set_next(ch
, delta
, 0);
399 static void sh_tmu_clock_event_suspend(struct clock_event_device
*ced
)
401 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
404 static void sh_tmu_clock_event_resume(struct clock_event_device
*ced
)
406 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
409 static void sh_tmu_register_clockevent(struct sh_tmu_channel
*ch
,
412 struct clock_event_device
*ced
= &ch
->ced
;
416 ced
->features
= CLOCK_EVT_FEAT_PERIODIC
;
417 ced
->features
|= CLOCK_EVT_FEAT_ONESHOT
;
419 ced
->cpumask
= cpu_possible_mask
;
420 ced
->set_next_event
= sh_tmu_clock_event_next
;
421 ced
->set_state_shutdown
= sh_tmu_clock_event_shutdown
;
422 ced
->set_state_periodic
= sh_tmu_clock_event_set_periodic
;
423 ced
->set_state_oneshot
= sh_tmu_clock_event_set_oneshot
;
424 ced
->suspend
= sh_tmu_clock_event_suspend
;
425 ced
->resume
= sh_tmu_clock_event_resume
;
427 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for clock events\n",
430 clockevents_config_and_register(ced
, ch
->tmu
->rate
, 0x300, 0xffffffff);
432 ret
= request_irq(ch
->irq
, sh_tmu_interrupt
,
433 IRQF_TIMER
| IRQF_IRQPOLL
| IRQF_NOBALANCING
,
434 dev_name(&ch
->tmu
->pdev
->dev
), ch
);
436 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: failed to request irq %d\n",
442 static int sh_tmu_register(struct sh_tmu_channel
*ch
, const char *name
,
443 bool clockevent
, bool clocksource
)
446 ch
->tmu
->has_clockevent
= true;
447 sh_tmu_register_clockevent(ch
, name
);
448 } else if (clocksource
) {
449 ch
->tmu
->has_clocksource
= true;
450 sh_tmu_register_clocksource(ch
, name
);
456 static int sh_tmu_channel_setup(struct sh_tmu_channel
*ch
, unsigned int index
,
457 bool clockevent
, bool clocksource
,
458 struct sh_tmu_device
*tmu
)
460 /* Skip unused channels. */
461 if (!clockevent
&& !clocksource
)
467 if (tmu
->model
== SH_TMU_SH3
)
468 ch
->base
= tmu
->mapbase
+ 4 + ch
->index
* 12;
470 ch
->base
= tmu
->mapbase
+ 8 + ch
->index
* 12;
472 ch
->irq
= platform_get_irq(tmu
->pdev
, index
);
474 dev_err(&tmu
->pdev
->dev
, "ch%u: failed to get irq\n",
479 ch
->cs_enabled
= false;
480 ch
->enable_count
= 0;
482 return sh_tmu_register(ch
, dev_name(&tmu
->pdev
->dev
),
483 clockevent
, clocksource
);
486 static int sh_tmu_map_memory(struct sh_tmu_device
*tmu
)
488 struct resource
*res
;
490 res
= platform_get_resource(tmu
->pdev
, IORESOURCE_MEM
, 0);
492 dev_err(&tmu
->pdev
->dev
, "failed to get I/O memory\n");
496 tmu
->mapbase
= ioremap_nocache(res
->start
, resource_size(res
));
497 if (tmu
->mapbase
== NULL
)
503 static int sh_tmu_parse_dt(struct sh_tmu_device
*tmu
)
505 struct device_node
*np
= tmu
->pdev
->dev
.of_node
;
508 tmu
->num_channels
= 3;
510 of_property_read_u32(np
, "#renesas,channels", &tmu
->num_channels
);
512 if (tmu
->num_channels
!= 2 && tmu
->num_channels
!= 3) {
513 dev_err(&tmu
->pdev
->dev
, "invalid number of channels %u\n",
521 static int sh_tmu_setup(struct sh_tmu_device
*tmu
, struct platform_device
*pdev
)
528 raw_spin_lock_init(&tmu
->lock
);
530 if (IS_ENABLED(CONFIG_OF
) && pdev
->dev
.of_node
) {
531 ret
= sh_tmu_parse_dt(tmu
);
534 } else if (pdev
->dev
.platform_data
) {
535 const struct platform_device_id
*id
= pdev
->id_entry
;
536 struct sh_timer_config
*cfg
= pdev
->dev
.platform_data
;
538 tmu
->model
= id
->driver_data
;
539 tmu
->num_channels
= hweight8(cfg
->channels_mask
);
541 dev_err(&tmu
->pdev
->dev
, "missing platform data\n");
545 /* Get hold of clock. */
546 tmu
->clk
= clk_get(&tmu
->pdev
->dev
, "fck");
547 if (IS_ERR(tmu
->clk
)) {
548 dev_err(&tmu
->pdev
->dev
, "cannot get clock\n");
549 return PTR_ERR(tmu
->clk
);
552 ret
= clk_prepare(tmu
->clk
);
556 /* Determine clock rate. */
557 ret
= clk_enable(tmu
->clk
);
559 goto err_clk_unprepare
;
561 tmu
->rate
= clk_get_rate(tmu
->clk
) / 4;
562 clk_disable(tmu
->clk
);
564 /* Map the memory resource. */
565 ret
= sh_tmu_map_memory(tmu
);
567 dev_err(&tmu
->pdev
->dev
, "failed to remap I/O memory\n");
568 goto err_clk_unprepare
;
571 /* Allocate and setup the channels. */
572 tmu
->channels
= kcalloc(tmu
->num_channels
, sizeof(*tmu
->channels
),
574 if (tmu
->channels
== NULL
) {
580 * Use the first channel as a clock event device and the second channel
583 for (i
= 0; i
< tmu
->num_channels
; ++i
) {
584 ret
= sh_tmu_channel_setup(&tmu
->channels
[i
], i
,
585 i
== 0, i
== 1, tmu
);
590 platform_set_drvdata(pdev
, tmu
);
595 kfree(tmu
->channels
);
596 iounmap(tmu
->mapbase
);
598 clk_unprepare(tmu
->clk
);
604 static int sh_tmu_probe(struct platform_device
*pdev
)
606 struct sh_tmu_device
*tmu
= platform_get_drvdata(pdev
);
609 if (!is_early_platform_device(pdev
)) {
610 pm_runtime_set_active(&pdev
->dev
);
611 pm_runtime_enable(&pdev
->dev
);
615 dev_info(&pdev
->dev
, "kept as earlytimer\n");
619 tmu
= kzalloc(sizeof(*tmu
), GFP_KERNEL
);
623 ret
= sh_tmu_setup(tmu
, pdev
);
626 pm_runtime_idle(&pdev
->dev
);
629 if (is_early_platform_device(pdev
))
633 if (tmu
->has_clockevent
|| tmu
->has_clocksource
)
634 pm_runtime_irq_safe(&pdev
->dev
);
636 pm_runtime_idle(&pdev
->dev
);
641 static int sh_tmu_remove(struct platform_device
*pdev
)
643 return -EBUSY
; /* cannot unregister clockevent and clocksource */
646 static const struct platform_device_id sh_tmu_id_table
[] = {
647 { "sh-tmu", SH_TMU
},
648 { "sh-tmu-sh3", SH_TMU_SH3
},
651 MODULE_DEVICE_TABLE(platform
, sh_tmu_id_table
);
653 static const struct of_device_id sh_tmu_of_table
[] __maybe_unused
= {
654 { .compatible
= "renesas,tmu" },
657 MODULE_DEVICE_TABLE(of
, sh_tmu_of_table
);
659 static struct platform_driver sh_tmu_device_driver
= {
660 .probe
= sh_tmu_probe
,
661 .remove
= sh_tmu_remove
,
664 .of_match_table
= of_match_ptr(sh_tmu_of_table
),
666 .id_table
= sh_tmu_id_table
,
669 static int __init
sh_tmu_init(void)
671 return platform_driver_register(&sh_tmu_device_driver
);
674 static void __exit
sh_tmu_exit(void)
676 platform_driver_unregister(&sh_tmu_device_driver
);
679 early_platform_init("earlytimer", &sh_tmu_device_driver
);
680 subsys_initcall(sh_tmu_init
);
681 module_exit(sh_tmu_exit
);
683 MODULE_AUTHOR("Magnus Damm");
684 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
685 MODULE_LICENSE("GPL v2");