2 * SuperH Timer Support - TMU
4 * Copyright (C) 2009 Magnus Damm
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
16 #include <linux/clk.h>
17 #include <linux/clockchips.h>
18 #include <linux/clocksource.h>
19 #include <linux/delay.h>
20 #include <linux/err.h>
21 #include <linux/init.h>
22 #include <linux/interrupt.h>
24 #include <linux/ioport.h>
25 #include <linux/irq.h>
26 #include <linux/module.h>
27 #include <linux/platform_device.h>
28 #include <linux/pm_domain.h>
29 #include <linux/pm_runtime.h>
30 #include <linux/sh_timer.h>
31 #include <linux/slab.h>
32 #include <linux/spinlock.h>
42 struct sh_tmu_channel
{
43 struct sh_tmu_device
*tmu
;
50 unsigned long periodic
;
51 struct clock_event_device ced
;
52 struct clocksource cs
;
54 unsigned int enable_count
;
57 struct sh_tmu_device
{
58 struct platform_device
*pdev
;
60 void __iomem
*mapbase
;
63 enum sh_tmu_model model
;
65 struct sh_tmu_channel
*channels
;
66 unsigned int num_channels
;
72 static DEFINE_RAW_SPINLOCK(sh_tmu_lock
);
74 #define TSTR -1 /* shared register */
75 #define TCOR 0 /* channel register */
76 #define TCNT 1 /* channel register */
77 #define TCR 2 /* channel register */
79 #define TCR_UNF (1 << 8)
80 #define TCR_UNIE (1 << 5)
81 #define TCR_TPSC_CLK4 (0 << 0)
82 #define TCR_TPSC_CLK16 (1 << 0)
83 #define TCR_TPSC_CLK64 (2 << 0)
84 #define TCR_TPSC_CLK256 (3 << 0)
85 #define TCR_TPSC_CLK1024 (4 << 0)
86 #define TCR_TPSC_MASK (7 << 0)
88 static inline unsigned long sh_tmu_read(struct sh_tmu_channel
*ch
, int reg_nr
)
93 switch (ch
->tmu
->model
) {
95 return ioread8(ch
->tmu
->mapbase
);
97 return ioread8(ch
->tmu
->mapbase
+ 2);
99 return ioread8(ch
->tmu
->mapbase
+ 4);
106 return ioread16(ch
->base
+ offs
);
108 return ioread32(ch
->base
+ offs
);
111 static inline void sh_tmu_write(struct sh_tmu_channel
*ch
, int reg_nr
,
116 if (reg_nr
== TSTR
) {
117 switch (ch
->tmu
->model
) {
119 return iowrite8(value
, ch
->tmu
->mapbase
);
121 return iowrite8(value
, ch
->tmu
->mapbase
+ 2);
123 return iowrite8(value
, ch
->tmu
->mapbase
+ 4);
130 iowrite16(value
, ch
->base
+ offs
);
132 iowrite32(value
, ch
->base
+ offs
);
135 static void sh_tmu_start_stop_ch(struct sh_tmu_channel
*ch
, int start
)
137 unsigned long flags
, value
;
139 /* start stop register shared by multiple timer channels */
140 raw_spin_lock_irqsave(&sh_tmu_lock
, flags
);
141 value
= sh_tmu_read(ch
, TSTR
);
144 value
|= 1 << ch
->index
;
146 value
&= ~(1 << ch
->index
);
148 sh_tmu_write(ch
, TSTR
, value
);
149 raw_spin_unlock_irqrestore(&sh_tmu_lock
, flags
);
152 static int __sh_tmu_enable(struct sh_tmu_channel
*ch
)
157 ret
= clk_enable(ch
->tmu
->clk
);
159 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: cannot enable clock\n",
164 /* make sure channel is disabled */
165 sh_tmu_start_stop_ch(ch
, 0);
167 /* maximum timeout */
168 sh_tmu_write(ch
, TCOR
, 0xffffffff);
169 sh_tmu_write(ch
, TCNT
, 0xffffffff);
171 /* configure channel to parent clock / 4, irq off */
172 ch
->rate
= clk_get_rate(ch
->tmu
->clk
) / 4;
173 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
176 sh_tmu_start_stop_ch(ch
, 1);
181 static int sh_tmu_enable(struct sh_tmu_channel
*ch
)
183 if (ch
->enable_count
++ > 0)
186 pm_runtime_get_sync(&ch
->tmu
->pdev
->dev
);
187 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, true);
189 return __sh_tmu_enable(ch
);
192 static void __sh_tmu_disable(struct sh_tmu_channel
*ch
)
194 /* disable channel */
195 sh_tmu_start_stop_ch(ch
, 0);
197 /* disable interrupts in TMU block */
198 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
201 clk_disable(ch
->tmu
->clk
);
204 static void sh_tmu_disable(struct sh_tmu_channel
*ch
)
206 if (WARN_ON(ch
->enable_count
== 0))
209 if (--ch
->enable_count
> 0)
212 __sh_tmu_disable(ch
);
214 dev_pm_syscore_device(&ch
->tmu
->pdev
->dev
, false);
215 pm_runtime_put(&ch
->tmu
->pdev
->dev
);
218 static void sh_tmu_set_next(struct sh_tmu_channel
*ch
, unsigned long delta
,
222 sh_tmu_start_stop_ch(ch
, 0);
224 /* acknowledge interrupt */
225 sh_tmu_read(ch
, TCR
);
227 /* enable interrupt */
228 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
230 /* reload delta value in case of periodic timer */
232 sh_tmu_write(ch
, TCOR
, delta
);
234 sh_tmu_write(ch
, TCOR
, 0xffffffff);
236 sh_tmu_write(ch
, TCNT
, delta
);
239 sh_tmu_start_stop_ch(ch
, 1);
242 static irqreturn_t
sh_tmu_interrupt(int irq
, void *dev_id
)
244 struct sh_tmu_channel
*ch
= dev_id
;
246 /* disable or acknowledge interrupt */
247 if (ch
->ced
.mode
== CLOCK_EVT_MODE_ONESHOT
)
248 sh_tmu_write(ch
, TCR
, TCR_TPSC_CLK4
);
250 sh_tmu_write(ch
, TCR
, TCR_UNIE
| TCR_TPSC_CLK4
);
252 /* notify clockevent layer */
253 ch
->ced
.event_handler(&ch
->ced
);
257 static struct sh_tmu_channel
*cs_to_sh_tmu(struct clocksource
*cs
)
259 return container_of(cs
, struct sh_tmu_channel
, cs
);
262 static cycle_t
sh_tmu_clocksource_read(struct clocksource
*cs
)
264 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
266 return sh_tmu_read(ch
, TCNT
) ^ 0xffffffff;
269 static int sh_tmu_clocksource_enable(struct clocksource
*cs
)
271 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
274 if (WARN_ON(ch
->cs_enabled
))
277 ret
= sh_tmu_enable(ch
);
279 __clocksource_updatefreq_hz(cs
, ch
->rate
);
280 ch
->cs_enabled
= true;
286 static void sh_tmu_clocksource_disable(struct clocksource
*cs
)
288 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
290 if (WARN_ON(!ch
->cs_enabled
))
294 ch
->cs_enabled
= false;
297 static void sh_tmu_clocksource_suspend(struct clocksource
*cs
)
299 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
304 if (--ch
->enable_count
== 0) {
305 __sh_tmu_disable(ch
);
306 pm_genpd_syscore_poweroff(&ch
->tmu
->pdev
->dev
);
310 static void sh_tmu_clocksource_resume(struct clocksource
*cs
)
312 struct sh_tmu_channel
*ch
= cs_to_sh_tmu(cs
);
317 if (ch
->enable_count
++ == 0) {
318 pm_genpd_syscore_poweron(&ch
->tmu
->pdev
->dev
);
323 static int sh_tmu_register_clocksource(struct sh_tmu_channel
*ch
,
326 struct clocksource
*cs
= &ch
->cs
;
330 cs
->read
= sh_tmu_clocksource_read
;
331 cs
->enable
= sh_tmu_clocksource_enable
;
332 cs
->disable
= sh_tmu_clocksource_disable
;
333 cs
->suspend
= sh_tmu_clocksource_suspend
;
334 cs
->resume
= sh_tmu_clocksource_resume
;
335 cs
->mask
= CLOCKSOURCE_MASK(32);
336 cs
->flags
= CLOCK_SOURCE_IS_CONTINUOUS
;
338 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used as clock source\n",
341 /* Register with dummy 1 Hz value, gets updated in ->enable() */
342 clocksource_register_hz(cs
, 1);
346 static struct sh_tmu_channel
*ced_to_sh_tmu(struct clock_event_device
*ced
)
348 return container_of(ced
, struct sh_tmu_channel
, ced
);
351 static void sh_tmu_clock_event_start(struct sh_tmu_channel
*ch
, int periodic
)
353 struct clock_event_device
*ced
= &ch
->ced
;
357 clockevents_config(ced
, ch
->rate
);
360 ch
->periodic
= (ch
->rate
+ HZ
/2) / HZ
;
361 sh_tmu_set_next(ch
, ch
->periodic
, 1);
365 static void sh_tmu_clock_event_mode(enum clock_event_mode mode
,
366 struct clock_event_device
*ced
)
368 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
371 /* deal with old setting first */
373 case CLOCK_EVT_MODE_PERIODIC
:
374 case CLOCK_EVT_MODE_ONESHOT
:
383 case CLOCK_EVT_MODE_PERIODIC
:
384 dev_info(&ch
->tmu
->pdev
->dev
,
385 "ch%u: used for periodic clock events\n", ch
->index
);
386 sh_tmu_clock_event_start(ch
, 1);
388 case CLOCK_EVT_MODE_ONESHOT
:
389 dev_info(&ch
->tmu
->pdev
->dev
,
390 "ch%u: used for oneshot clock events\n", ch
->index
);
391 sh_tmu_clock_event_start(ch
, 0);
393 case CLOCK_EVT_MODE_UNUSED
:
397 case CLOCK_EVT_MODE_SHUTDOWN
:
403 static int sh_tmu_clock_event_next(unsigned long delta
,
404 struct clock_event_device
*ced
)
406 struct sh_tmu_channel
*ch
= ced_to_sh_tmu(ced
);
408 BUG_ON(ced
->mode
!= CLOCK_EVT_MODE_ONESHOT
);
410 /* program new delta value */
411 sh_tmu_set_next(ch
, delta
, 0);
415 static void sh_tmu_clock_event_suspend(struct clock_event_device
*ced
)
417 pm_genpd_syscore_poweroff(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
420 static void sh_tmu_clock_event_resume(struct clock_event_device
*ced
)
422 pm_genpd_syscore_poweron(&ced_to_sh_tmu(ced
)->tmu
->pdev
->dev
);
425 static void sh_tmu_register_clockevent(struct sh_tmu_channel
*ch
,
428 struct clock_event_device
*ced
= &ch
->ced
;
432 ced
->features
= CLOCK_EVT_FEAT_PERIODIC
;
433 ced
->features
|= CLOCK_EVT_FEAT_ONESHOT
;
435 ced
->cpumask
= cpumask_of(0);
436 ced
->set_next_event
= sh_tmu_clock_event_next
;
437 ced
->set_mode
= sh_tmu_clock_event_mode
;
438 ced
->suspend
= sh_tmu_clock_event_suspend
;
439 ced
->resume
= sh_tmu_clock_event_resume
;
441 dev_info(&ch
->tmu
->pdev
->dev
, "ch%u: used for clock events\n",
444 clockevents_config_and_register(ced
, 1, 0x300, 0xffffffff);
446 ret
= request_irq(ch
->irq
, sh_tmu_interrupt
,
447 IRQF_TIMER
| IRQF_IRQPOLL
| IRQF_NOBALANCING
,
448 dev_name(&ch
->tmu
->pdev
->dev
), ch
);
450 dev_err(&ch
->tmu
->pdev
->dev
, "ch%u: failed to request irq %d\n",
456 static int sh_tmu_register(struct sh_tmu_channel
*ch
, const char *name
,
457 bool clockevent
, bool clocksource
)
460 ch
->tmu
->has_clockevent
= true;
461 sh_tmu_register_clockevent(ch
, name
);
462 } else if (clocksource
) {
463 ch
->tmu
->has_clocksource
= true;
464 sh_tmu_register_clocksource(ch
, name
);
470 static int sh_tmu_channel_setup(struct sh_tmu_channel
*ch
, unsigned int index
,
471 bool clockevent
, bool clocksource
,
472 struct sh_tmu_device
*tmu
)
474 /* Skip unused channels. */
475 if (!clockevent
&& !clocksource
)
480 if (tmu
->model
== SH_TMU_LEGACY
) {
481 struct sh_timer_config
*cfg
= tmu
->pdev
->dev
.platform_data
;
484 * The SH3 variant (SH770x, SH7705, SH7710 and SH7720) maps
485 * channel registers blocks at base + 2 + 12 * index, while all
486 * other variants map them at base + 4 + 12 * index. We can
487 * compute the index by just dividing by 12, the 2 bytes or 4
488 * bytes offset being hidden by the integer division.
490 ch
->index
= cfg
->channel_offset
/ 12;
491 ch
->base
= tmu
->mapbase
+ cfg
->channel_offset
;
495 if (tmu
->model
== SH_TMU_SH3
)
496 ch
->base
= tmu
->mapbase
+ 4 + ch
->index
* 12;
498 ch
->base
= tmu
->mapbase
+ 8 + ch
->index
* 12;
501 ch
->irq
= platform_get_irq(tmu
->pdev
, index
);
503 dev_err(&tmu
->pdev
->dev
, "ch%u: failed to get irq\n",
508 ch
->cs_enabled
= false;
509 ch
->enable_count
= 0;
511 return sh_tmu_register(ch
, dev_name(&tmu
->pdev
->dev
),
512 clockevent
, clocksource
);
515 static int sh_tmu_map_memory(struct sh_tmu_device
*tmu
)
517 struct resource
*res
;
519 res
= platform_get_resource(tmu
->pdev
, IORESOURCE_MEM
, 0);
521 dev_err(&tmu
->pdev
->dev
, "failed to get I/O memory\n");
525 tmu
->mapbase
= ioremap_nocache(res
->start
, resource_size(res
));
526 if (tmu
->mapbase
== NULL
)
530 * In legacy platform device configuration (with one device per channel)
531 * the resource points to the channel base address.
533 if (tmu
->model
== SH_TMU_LEGACY
) {
534 struct sh_timer_config
*cfg
= tmu
->pdev
->dev
.platform_data
;
535 tmu
->mapbase
-= cfg
->channel_offset
;
541 static void sh_tmu_unmap_memory(struct sh_tmu_device
*tmu
)
543 if (tmu
->model
== SH_TMU_LEGACY
) {
544 struct sh_timer_config
*cfg
= tmu
->pdev
->dev
.platform_data
;
545 tmu
->mapbase
+= cfg
->channel_offset
;
548 iounmap(tmu
->mapbase
);
551 static int sh_tmu_setup(struct sh_tmu_device
*tmu
, struct platform_device
*pdev
)
553 struct sh_timer_config
*cfg
= pdev
->dev
.platform_data
;
554 const struct platform_device_id
*id
= pdev
->id_entry
;
559 dev_err(&tmu
->pdev
->dev
, "missing platform data\n");
564 tmu
->model
= id
->driver_data
;
566 /* Get hold of clock. */
567 tmu
->clk
= clk_get(&tmu
->pdev
->dev
,
568 tmu
->model
== SH_TMU_LEGACY
? "tmu_fck" : "fck");
569 if (IS_ERR(tmu
->clk
)) {
570 dev_err(&tmu
->pdev
->dev
, "cannot get clock\n");
571 return PTR_ERR(tmu
->clk
);
574 ret
= clk_prepare(tmu
->clk
);
578 /* Map the memory resource. */
579 ret
= sh_tmu_map_memory(tmu
);
581 dev_err(&tmu
->pdev
->dev
, "failed to remap I/O memory\n");
582 goto err_clk_unprepare
;
585 /* Allocate and setup the channels. */
586 if (tmu
->model
== SH_TMU_LEGACY
)
587 tmu
->num_channels
= 1;
589 tmu
->num_channels
= hweight8(cfg
->channels_mask
);
591 tmu
->channels
= kzalloc(sizeof(*tmu
->channels
) * tmu
->num_channels
,
593 if (tmu
->channels
== NULL
) {
598 if (tmu
->model
== SH_TMU_LEGACY
) {
599 ret
= sh_tmu_channel_setup(&tmu
->channels
[0], 0,
600 cfg
->clockevent_rating
!= 0,
601 cfg
->clocksource_rating
!= 0, tmu
);
606 * Use the first channel as a clock event device and the second
607 * channel as a clock source.
609 for (i
= 0; i
< tmu
->num_channels
; ++i
) {
610 ret
= sh_tmu_channel_setup(&tmu
->channels
[i
], i
,
611 i
== 0, i
== 1, tmu
);
617 platform_set_drvdata(pdev
, tmu
);
622 kfree(tmu
->channels
);
623 sh_tmu_unmap_memory(tmu
);
625 clk_unprepare(tmu
->clk
);
631 static int sh_tmu_probe(struct platform_device
*pdev
)
633 struct sh_tmu_device
*tmu
= platform_get_drvdata(pdev
);
636 if (!is_early_platform_device(pdev
)) {
637 pm_runtime_set_active(&pdev
->dev
);
638 pm_runtime_enable(&pdev
->dev
);
642 dev_info(&pdev
->dev
, "kept as earlytimer\n");
646 tmu
= kzalloc(sizeof(*tmu
), GFP_KERNEL
);
650 ret
= sh_tmu_setup(tmu
, pdev
);
653 pm_runtime_idle(&pdev
->dev
);
656 if (is_early_platform_device(pdev
))
660 if (tmu
->has_clockevent
|| tmu
->has_clocksource
)
661 pm_runtime_irq_safe(&pdev
->dev
);
663 pm_runtime_idle(&pdev
->dev
);
668 static int sh_tmu_remove(struct platform_device
*pdev
)
670 return -EBUSY
; /* cannot unregister clockevent and clocksource */
673 static const struct platform_device_id sh_tmu_id_table
[] = {
674 { "sh_tmu", SH_TMU_LEGACY
},
675 { "sh-tmu", SH_TMU
},
676 { "sh-tmu-sh3", SH_TMU_SH3
},
679 MODULE_DEVICE_TABLE(platform
, sh_tmu_id_table
);
681 static struct platform_driver sh_tmu_device_driver
= {
682 .probe
= sh_tmu_probe
,
683 .remove
= sh_tmu_remove
,
687 .id_table
= sh_tmu_id_table
,
690 static int __init
sh_tmu_init(void)
692 return platform_driver_register(&sh_tmu_device_driver
);
695 static void __exit
sh_tmu_exit(void)
697 platform_driver_unregister(&sh_tmu_device_driver
);
700 early_platform_init("earlytimer", &sh_tmu_device_driver
);
701 subsys_initcall(sh_tmu_init
);
702 module_exit(sh_tmu_exit
);
704 MODULE_AUTHOR("Magnus Damm");
705 MODULE_DESCRIPTION("SuperH TMU Timer Driver");
706 MODULE_LICENSE("GPL v2");