2 * SuperH Timer Support - TMU
4 * Copyright (C) 2009 Magnus Damm
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/clk.h>
17 #include <linux/clockchips.h>
18 #include <linux/clocksource.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/irq.h>
26 #include <linux/module.h>
28 #include <linux/platform_device.h>
29 #include <linux/pm_domain.h>
30 #include <linux/pm_runtime.h>
31 #include <linux/sh_timer.h>
32 #include <linux/slab.h>
33 #include <linux/spinlock.h>
42 struct sh_tmu_channel
{
43 struct sh_tmu_device
*tmu
;
50 unsigned long periodic
;
51 struct clock_event_device ced
;
52 struct clocksource cs
;
54 unsigned int enable_count
;
57 struct sh_tmu_device
{
58 struct platform_device
*pdev
;
60 void __iomem
*mapbase
;
63 enum sh_tmu_model model
;
65 raw_spinlock_t lock
; /* Protect the shared start/stop register */
67 struct sh_tmu_channel
*channels
;
68 unsigned int num_channels
;
74 #define TSTR -1 /* shared register */
75 #define TCOR 0 /* channel register */
76 #define TCNT 1 /* channel register */
77 #define TCR 2 /* channel register */
79 #define TCR_UNF (1 << 8)
80 #define TCR_UNIE (1 << 5)
81 #define TCR_TPSC_CLK4 (0 << 0)
82 #define TCR_TPSC_CLK16 (1 << 0)
83 #define TCR_TPSC_CLK64 (2 << 0)
84 #define TCR_TPSC_CLK256 (3 << 0)
85 #define TCR_TPSC_CLK1024 (4 << 0)
86 #define TCR_TPSC_MASK (7 << 0)
88 static inline unsigned long sh_tmu_read(struct sh_tmu_channel
*ch
, int reg_nr
)
93 switch (ch
->tmu
->model
) {
95 return ioread8(ch
->tmu
->mapbase
+ 2);
97 return ioread8(ch
->tmu
->mapbase
+ 4);
104 return ioread16(ch
->base
+ offs
);
106 return ioread32(ch
->base
+ offs
);
109 static inline void sh_tmu_write(struct sh_tmu_channel
*ch
, int reg_nr
,
114 if (reg_nr
== TSTR
) {
115 switch (ch
->tmu
->model
) {
117 return iowrite8(value
, ch
->tmu
->mapbase
+ 2);
119 return iowrite8(value
, ch
->tmu
->mapbase
+ 4);
126 iowrite16(value
, ch
->base
+ offs
);
128 iowrite32(value
, ch
->base
+ offs
);
131 static void sh_tmu_start_stop_ch(struct sh_tmu_channel
*ch
, int start
)
133 unsigned long flags
, value
;
135 /* start stop register shared by multiple timer channels */
136 raw_spin_lock_irqsave(&ch
->tmu
->lock
, flags
);
137 value
= sh_tmu_read(ch
, TSTR
);
140 value
|= 1 << ch
->index
;
142 value
&= ~(1 << ch
->index
);
144 sh_tmu_write(ch
, TSTR
, value
);
145 raw_spin_unlock_irqrestore(&ch
->tmu
->lock
, flags
);
148 static int __sh_tmu_enable(struct sh_tmu_channel
*ch
)
153 ret
= clk_enable(ch
->tmu
->clk
);
155 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: cannot enable clock\n",
160 /* make sure channel is disabled */
161 sh_tmu_start_stop_ch(ch
, 0);
163 /* maximum timeout */
164 sh_tmu_write(ch
, TCOR
, 0xffffffff);
165 sh_tmu_write(ch
, TCNT
, 0xffffffff);
167 /* configure channel to parent clock / 4, irq off */
168 ch
->rate
= clk_get_rate(ch
->tmu
->clk
) / 4;
169 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
172 sh_tmu_start_stop_ch(ch
, 1);
177 static int sh_tmu_enable(struct sh_tmu_channel
*ch
)
179 if (ch
->enable_count
++ > 0)
182 pm_runtime_get_sync(&ch
->tmu
->pdev
->dev
);
183 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, true);
185 return __sh_tmu_enable(ch
);
188 static void __sh_tmu_disable(struct sh_tmu_channel
*ch
)
190 /* disable channel */
191 sh_tmu_start_stop_ch(ch
, 0);
193 /* disable interrupts in TMU block */
194 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
197 clk_disable(ch
->tmu
->clk
);
200 static void sh_tmu_disable(struct sh_tmu_channel
*ch
)
202 if (WARN_ON(ch
->enable_count
== 0))
205 if (--ch
->enable_count
> 0)
208 __sh_tmu_disable(ch
);
210 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, false);
211 pm_runtime_put(&ch
->tmu
->pdev
->dev
);
214 static void sh_tmu_set_next(struct sh_tmu_channel
*ch
, unsigned long delta
,
218 sh_tmu_start_stop_ch(ch
, 0);
220 /* acknowledge interrupt */
221 sh_tmu_read(ch
, TCR
);
223 /* enable interrupt */
224 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
226 /* reload delta value in case of periodic timer */
228 sh_tmu_write(ch
, TCOR
, delta
);
230 sh_tmu_write(ch
, TCOR
, 0xffffffff);
232 sh_tmu_write(ch
, TCNT
, delta
);
235 sh_tmu_start_stop_ch(ch
, 1);
238 static irqreturn_t
sh_tmu_interrupt(int irq
, void *dev_id
)
240 struct sh_tmu_channel
*ch
= dev_id
;
242 /* disable or acknowledge interrupt */
243 if (ch
->ced
.mode
== CLOCK_EVT_MODE_ONESHOT
)
244 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
246 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
248 /* notify clockevent layer */
249 ch
->ced
.event_handler(&ch
->ced
);
253 static struct sh_tmu_channel
*cs_to_sh_tmu(struct clocksource
*cs
)
255 return container_of(cs
, struct sh_tmu_channel
, cs
);
258 static cycle_t
sh_tmu_clocksource_read(struct clocksource
*cs
)
260 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
262 return sh_tmu_read(ch
, TCNT
) ^ 0xffffffff;
265 static int sh_tmu_clocksource_enable(struct clocksource
*cs
)
267 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
270 if (WARN_ON(ch
->cs_enabled
))
273 ret
= sh_tmu_enable(ch
);
275 __clocksource_updatefreq_hz(cs
, ch
->rate
);
276 ch
->cs_enabled
= true;
282 static void sh_tmu_clocksource_disable(struct clocksource
*cs
)
284 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
286 if (WARN_ON(!ch
->cs_enabled
))
290 ch
->cs_enabled
= false;
293 static void sh_tmu_clocksource_suspend(struct clocksource
*cs
)
295 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
300 if (--ch
->enable_count
== 0) {
301 __sh_tmu_disable(ch
);
302 pm_genpd_syscore_poweroff(&ch
->tmu
->pdev
->dev
);
306 static void sh_tmu_clocksource_resume(struct clocksource
*cs
)
308 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
313 if (ch
->enable_count
++ == 0) {
314 pm_genpd_syscore_poweron(&ch
->tmu
->pdev
->dev
);
319 static int sh_tmu_register_clocksource(struct sh_tmu_channel
*ch
,
322 struct clocksource
*cs
= &ch
->cs
;
326 cs
->read
= sh_tmu_clocksource_read
;
327 cs
->enable
= sh_tmu_clocksource_enable
;
328 cs
->disable
= sh_tmu_clocksource_disable
;
329 cs
->suspend
= sh_tmu_clocksource_suspend
;
330 cs
->resume
= sh_tmu_clocksource_resume
;
331 cs
->mask
= CLOCKSOURCE_MASK(32);
332 cs
->flags
= CLOCK_SOURCE_IS_CONTINUOUS
;
334 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used as clock source\n",
337 /* Register with dummy 1 Hz value, gets updated in ->enable() */
338 clocksource_register_hz(cs
, 1);
342 static struct sh_tmu_channel
*ced_to_sh_tmu(struct clock_event_device
*ced
)
344 return container_of(ced
, struct sh_tmu_channel
, ced
);
347 static void sh_tmu_clock_event_start(struct sh_tmu_channel
*ch
, int periodic
)
349 struct clock_event_device
*ced
= &ch
->ced
;
353 clockevents_config(ced
, ch
->rate
);
356 ch
->periodic
= (ch
->rate
+ HZ
/2) / HZ
;
357 sh_tmu_set_next(ch
, ch
->periodic
, 1);
361 static void sh_tmu_clock_event_mode(enum clock_event_mode mode
,
362 struct clock_event_device
*ced
)
364 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
367 /* deal with old setting first */
369 case CLOCK_EVT_MODE_PERIODIC
:
370 case CLOCK_EVT_MODE_ONESHOT
:
379 case CLOCK_EVT_MODE_PERIODIC
:
380 dev_info(&ch
->tmu
->pdev
->dev
,
381 "ch%u: used for periodic clock events\n", ch
->index
);
382 sh_tmu_clock_event_start(ch
, 1);
384 case CLOCK_EVT_MODE_ONESHOT
:
385 dev_info(&ch
->tmu
->pdev
->dev
,
386 "ch%u: used for oneshot clock events\n", ch
->index
);
387 sh_tmu_clock_event_start(ch
, 0);
389 case CLOCK_EVT_MODE_UNUSED
:
393 case CLOCK_EVT_MODE_SHUTDOWN
:
399 static int sh_tmu_clock_event_next(unsigned long delta
,
400 struct clock_event_device
*ced
)
402 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
404 BUG_ON(ced
->mode
!= CLOCK_EVT_MODE_ONESHOT
);
406 /* program new delta value */
407 sh_tmu_set_next(ch
, delta
, 0);
411 static void sh_tmu_clock_event_suspend(struct clock_event_device
*ced
)
413 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
416 static void sh_tmu_clock_event_resume(struct clock_event_device
*ced
)
418 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
421 static void sh_tmu_register_clockevent(struct sh_tmu_channel
*ch
,
424 struct clock_event_device
*ced
= &ch
->ced
;
428 ced
->features
= CLOCK_EVT_FEAT_PERIODIC
;
429 ced
->features
|= CLOCK_EVT_FEAT_ONESHOT
;
431 ced
->cpumask
= cpumask_of(0);
432 ced
->set_next_event
= sh_tmu_clock_event_next
;
433 ced
->set_mode
= sh_tmu_clock_event_mode
;
434 ced
->suspend
= sh_tmu_clock_event_suspend
;
435 ced
->resume
= sh_tmu_clock_event_resume
;
437 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for clock events\n",
440 clockevents_config_and_register(ced
, 1, 0x300, 0xffffffff);
442 ret
= request_irq(ch
->irq
, sh_tmu_interrupt
,
443 IRQF_TIMER
| IRQF_IRQPOLL
| IRQF_NOBALANCING
,
444 dev_name(&ch
->tmu
->pdev
->dev
), ch
);
446 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: failed to request irq %d\n",
452 static int sh_tmu_register(struct sh_tmu_channel
*ch
, const char *name
,
453 bool clockevent
, bool clocksource
)
456 ch
->tmu
->has_clockevent
= true;
457 sh_tmu_register_clockevent(ch
, name
);
458 } else if (clocksource
) {
459 ch
->tmu
->has_clocksource
= true;
460 sh_tmu_register_clocksource(ch
, name
);
466 static int sh_tmu_channel_setup(struct sh_tmu_channel
*ch
, unsigned int index
,
467 bool clockevent
, bool clocksource
,
468 struct sh_tmu_device
*tmu
)
470 /* Skip unused channels. */
471 if (!clockevent
&& !clocksource
)
477 if (tmu
->model
== SH_TMU_SH3
)
478 ch
->base
= tmu
->mapbase
+ 4 + ch
->index
* 12;
480 ch
->base
= tmu
->mapbase
+ 8 + ch
->index
* 12;
482 ch
->irq
= platform_get_irq(tmu
->pdev
, index
);
484 dev_err(&tmu
->pdev
->dev
, "ch%u: failed to get irq\n",
489 ch
->cs_enabled
= false;
490 ch
->enable_count
= 0;
492 return sh_tmu_register(ch
, dev_name(&tmu
->pdev
->dev
),
493 clockevent
, clocksource
);
496 static int sh_tmu_map_memory(struct sh_tmu_device
*tmu
)
498 struct resource
*res
;
500 res
= platform_get_resource(tmu
->pdev
, IORESOURCE_MEM
, 0);
502 dev_err(&tmu
->pdev
->dev
, "failed to get I/O memory\n");
506 tmu
->mapbase
= ioremap_nocache(res
->start
, resource_size(res
));
507 if (tmu
->mapbase
== NULL
)
513 static int sh_tmu_parse_dt(struct sh_tmu_device
*tmu
)
515 struct device_node
*np
= tmu
->pdev
->dev
.of_node
;
518 tmu
->num_channels
= 3;
520 of_property_read_u32(np
, "#renesas,channels", &tmu
->num_channels
);
522 if (tmu
->num_channels
!= 2 && tmu
->num_channels
!= 3) {
523 dev_err(&tmu
->pdev
->dev
, "invalid number of channels %u\n",
531 static int sh_tmu_setup(struct sh_tmu_device
*tmu
, struct platform_device
*pdev
)
538 raw_spin_lock_init(&tmu
->lock
);
540 if (IS_ENABLED(CONFIG_OF
) && pdev
->dev
.of_node
) {
541 ret
= sh_tmu_parse_dt(tmu
);
544 } else if (pdev
->dev
.platform_data
) {
545 const struct platform_device_id
*id
= pdev
->id_entry
;
546 struct sh_timer_config
*cfg
= pdev
->dev
.platform_data
;
548 tmu
->model
= id
->driver_data
;
549 tmu
->num_channels
= hweight8(cfg
->channels_mask
);
551 dev_err(&tmu
->pdev
->dev
, "missing platform data\n");
555 /* Get hold of clock. */
556 tmu
->clk
= clk_get(&tmu
->pdev
->dev
, "fck");
557 if (IS_ERR(tmu
->clk
)) {
558 dev_err(&tmu
->pdev
->dev
, "cannot get clock\n");
559 return PTR_ERR(tmu
->clk
);
562 ret
= clk_prepare(tmu
->clk
);
566 /* Map the memory resource. */
567 ret
= sh_tmu_map_memory(tmu
);
569 dev_err(&tmu
->pdev
->dev
, "failed to remap I/O memory\n");
570 goto err_clk_unprepare
;
573 /* Allocate and setup the channels. */
574 tmu
->channels
= kzalloc(sizeof(*tmu
->channels
) * tmu
->num_channels
,
576 if (tmu
->channels
== NULL
) {
582 * Use the first channel as a clock event device and the second channel
585 for (i
= 0; i
< tmu
->num_channels
; ++i
) {
586 ret
= sh_tmu_channel_setup(&tmu
->channels
[i
], i
,
587 i
== 0, i
== 1, tmu
);
592 platform_set_drvdata(pdev
, tmu
);
597 kfree(tmu
->channels
);
598 iounmap(tmu
->mapbase
);
600 clk_unprepare(tmu
->clk
);
606 static int sh_tmu_probe(struct platform_device
*pdev
)
608 struct sh_tmu_device
*tmu
= platform_get_drvdata(pdev
);
611 if (!is_early_platform_device(pdev
)) {
612 pm_runtime_set_active(&pdev
->dev
);
613 pm_runtime_enable(&pdev
->dev
);
617 dev_info(&pdev
->dev
, "kept as earlytimer\n");
621 tmu
= kzalloc(sizeof(*tmu
), GFP_KERNEL
);
625 ret
= sh_tmu_setup(tmu
, pdev
);
628 pm_runtime_idle(&pdev
->dev
);
631 if (is_early_platform_device(pdev
))
635 if (tmu
->has_clockevent
|| tmu
->has_clocksource
)
636 pm_runtime_irq_safe(&pdev
->dev
);
638 pm_runtime_idle(&pdev
->dev
);
643 static int sh_tmu_remove(struct platform_device
*pdev
)
645 return -EBUSY
; /* cannot unregister clockevent and clocksource */
648 static const struct platform_device_id sh_tmu_id_table
[] = {
649 { "sh-tmu", SH_TMU
},
650 { "sh-tmu-sh3", SH_TMU_SH3
},
653 MODULE_DEVICE_TABLE(platform
, sh_tmu_id_table
);
655 static const struct of_device_id sh_tmu_of_table
[] __maybe_unused
= {
656 { .compatible
= "renesas,tmu" },
659 MODULE_DEVICE_TABLE(of
, sh_tmu_of_table
);
661 static struct platform_driver sh_tmu_device_driver
= {
662 .probe
= sh_tmu_probe
,
663 .remove
= sh_tmu_remove
,
666 .of_match_table
= of_match_ptr(sh_tmu_of_table
),
668 .id_table
= sh_tmu_id_table
,
671 static int __init
sh_tmu_init(void)
673 return platform_driver_register(&sh_tmu_device_driver
);
676 static void __exit
sh_tmu_exit(void)
678 platform_driver_unregister(&sh_tmu_device_driver
);
681 early_platform_init("earlytimer", &sh_tmu_device_driver
);
682 subsys_initcall(sh_tmu_init
);
683 module_exit(sh_tmu_exit
);
685 MODULE_AUTHOR("Magnus Damm");
686 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
687 MODULE_LICENSE("GPL v2");