2 * arch/sh/kernel/timers/timer-tmu.c - TMU Timer Support
4 * Copyright (C) 2005 - 2007 Paul Mundt
6 * TMU handling code hacked out of arch/sh/kernel/time.c
8 * Copyright (C) 1999 Tetsuya Okada & Niibe Yutaka
9 * Copyright (C) 2000 Philipp Rumpf <prumpf@tux.org>
10 * Copyright (C) 2002, 2003, 2004 Paul Mundt
11 * Copyright (C) 2002 M. R. Brown <mrbrown@linux-sh.org>
13 * This file is subject to the terms and conditions of the GNU General Public
14 * License. See the file "COPYING" in the main directory of this archive
17 #include <linux/init.h>
18 #include <linux/kernel.h>
19 #include <linux/interrupt.h>
20 #include <linux/seqlock.h>
21 #include <linux/clockchips.h>
22 #include <asm/timer.h>
26 #include <asm/clock.h>
28 #define TMU_TOCR_INIT 0x00
29 #define TMU_TCR_INIT 0x0020
34 static inline void _tmu_start(int tmu_num
)
36 ctrl_outb(ctrl_inb(TMU_012_TSTR
) | (0x1<<tmu_num
), TMU_012_TSTR
);
39 static inline void _tmu_set_irq(int tmu_num
, int enabled
)
41 register unsigned long tmu_tcr
= TMU0_TCR
+ (0xc*tmu_num
);
42 ctrl_outw( (enabled
? ctrl_inw(tmu_tcr
) | (1<<5) : ctrl_inw(tmu_tcr
) & ~(1<<5)), tmu_tcr
);
45 static inline void _tmu_stop(int tmu_num
)
47 ctrl_outb(ctrl_inb(TMU_012_TSTR
) & ~(0x1<<tmu_num
), TMU_012_TSTR
);
50 static inline void _tmu_clear_status(int tmu_num
)
52 register unsigned long tmu_tcr
= TMU0_TCR
+ (0xc*tmu_num
);
54 ctrl_outw(ctrl_inw(tmu_tcr
) & ~0x100, tmu_tcr
);
57 static inline unsigned long _tmu_read(int tmu_num
)
59 return ctrl_inl(TMU0_TCNT
+0xC*tmu_num
);
62 static int tmu_timer_start(void)
70 static int tmu_timer_stop(void)
74 _tmu_clear_status(TMU0
);
79 * also when the module_clk is scaled the TMU1
80 * will show the same frequency
82 static int tmus_are_scaled
;
84 static cycle_t
tmu_timer_read(struct clocksource
*cs
)
86 return ((cycle_t
)(~_tmu_read(TMU1
)))<<tmus_are_scaled
;
90 static unsigned long tmu_latest_interval
[3];
91 static void tmu_timer_set_interval(int tmu_num
, unsigned long interval
, unsigned int reload
)
93 unsigned long tmu_tcnt
= TMU0_TCNT
+ tmu_num
*0xC;
94 unsigned long tmu_tcor
= TMU0_TCOR
+ tmu_num
*0xC;
98 ctrl_outl(interval
, tmu_tcnt
);
99 tmu_latest_interval
[tmu_num
] = interval
;
102 * TCNT reloads from TCOR on underflow, clear it if we don't
103 * intend to auto-reload
105 ctrl_outl( reload
? interval
: 0 , tmu_tcor
);
110 static int tmu_set_next_event(unsigned long cycles
,
111 struct clock_event_device
*evt
)
113 tmu_timer_set_interval(TMU0
,cycles
, evt
->mode
== CLOCK_EVT_MODE_PERIODIC
);
114 _tmu_set_irq(TMU0
,1);
118 static void tmu_set_mode(enum clock_event_mode mode
,
119 struct clock_event_device
*evt
)
122 case CLOCK_EVT_MODE_PERIODIC
:
123 ctrl_outl(tmu_latest_interval
[TMU0
], TMU0_TCOR
);
125 case CLOCK_EVT_MODE_ONESHOT
:
126 ctrl_outl(0, TMU0_TCOR
);
128 case CLOCK_EVT_MODE_UNUSED
:
129 case CLOCK_EVT_MODE_SHUTDOWN
:
130 case CLOCK_EVT_MODE_RESUME
:
135 static struct clock_event_device tmu0_clockevent
= {
138 .features
= CLOCK_EVT_FEAT_PERIODIC
| CLOCK_EVT_FEAT_ONESHOT
,
139 .set_mode
= tmu_set_mode
,
140 .set_next_event
= tmu_set_next_event
,
143 static irqreturn_t
tmu_timer_interrupt(int irq
, void *dummy
)
145 struct clock_event_device
*evt
= &tmu0_clockevent
;
146 _tmu_clear_status(TMU0
);
147 _tmu_set_irq(TMU0
,tmu0_clockevent
.mode
!= CLOCK_EVT_MODE_ONESHOT
);
149 switch (tmu0_clockevent
.mode
) {
150 case CLOCK_EVT_MODE_ONESHOT
:
151 case CLOCK_EVT_MODE_PERIODIC
:
152 evt
->event_handler(evt
);
161 static struct irqaction tmu0_irq
= {
162 .name
= "periodic/oneshot timer",
163 .handler
= tmu_timer_interrupt
,
164 .flags
= IRQF_DISABLED
| IRQF_TIMER
| IRQF_IRQPOLL
,
167 static void __init
tmu_clk_init(struct clk
*clk
)
169 u8 divisor
= TMU_TCR_INIT
& 0x7;
170 int tmu_num
= clk
->name
[3]-'0';
171 ctrl_outw(TMU_TCR_INIT
, TMU0_TCR
+(tmu_num
*0xC));
172 clk
->rate
= clk_get_rate(clk
->parent
) / (4 << (divisor
<< 1));
175 static void tmu_clk_recalc(struct clk
*clk
)
177 int tmu_num
= clk
->name
[3]-'0';
178 unsigned long prev_rate
= clk_get_rate(clk
);
180 u8 divisor
= ctrl_inw(TMU0_TCR
+tmu_num
*0xC) & 0x7;
181 clk
->rate
= clk_get_rate(clk
->parent
) / (4 << (divisor
<< 1));
183 if(prev_rate
==clk_get_rate(clk
))
187 return; /* No more work on TMU1 */
189 local_irq_save(flags
);
190 tmus_are_scaled
= (prev_rate
> clk
->rate
);
194 tmu0_clockevent
.mult
= div_sc(clk
->rate
, NSEC_PER_SEC
,
195 tmu0_clockevent
.shift
);
196 tmu0_clockevent
.max_delta_ns
=
197 clockevent_delta2ns(-1, &tmu0_clockevent
);
198 tmu0_clockevent
.min_delta_ns
=
199 clockevent_delta2ns(1, &tmu0_clockevent
);
202 tmu_latest_interval
[TMU0
] >>= 1;
204 tmu_latest_interval
[TMU0
] <<= 1;
206 tmu_timer_set_interval(TMU0
,
207 tmu_latest_interval
[TMU0
],
208 tmu0_clockevent
.mode
== CLOCK_EVT_MODE_PERIODIC
);
212 local_irq_restore(flags
);
215 static struct clk_ops tmu_clk_ops
= {
216 .init
= tmu_clk_init
,
217 .recalc
= tmu_clk_recalc
,
220 static struct clk tmu0_clk
= {
225 static struct clk tmu1_clk
= {
230 static int tmu_timer_init(void)
232 unsigned long interval
;
233 unsigned long frequency
;
235 setup_irq(CONFIG_SH_TIMER_IRQ
, &tmu0_irq
);
237 tmu0_clk
.parent
= clk_get(NULL
, "module_clk");
238 tmu1_clk
.parent
= clk_get(NULL
, "module_clk");
242 #if !defined(CONFIG_CPU_SUBTYPE_SH7720) && \
243 !defined(CONFIG_CPU_SUBTYPE_SH7721) && \
244 !defined(CONFIG_CPU_SUBTYPE_SH7760) && \
245 !defined(CONFIG_CPU_SUBTYPE_SH7785) && \
246 !defined(CONFIG_CPU_SUBTYPE_SH7786) && \
247 !defined(CONFIG_CPU_SUBTYPE_SHX3)
248 ctrl_outb(TMU_TOCR_INIT
, TMU_TOCR
);
251 clk_register(&tmu0_clk
);
252 clk_register(&tmu1_clk
);
253 clk_enable(&tmu0_clk
);
254 clk_enable(&tmu1_clk
);
256 frequency
= clk_get_rate(&tmu0_clk
);
257 interval
= (frequency
+ HZ
/ 2) / HZ
;
259 tmu_timer_set_interval(TMU0
,interval
, 1);
260 tmu_timer_set_interval(TMU1
,~0,1);
264 clocksource_sh
.rating
= 200;
265 clocksource_sh
.mask
= CLOCKSOURCE_MASK(32);
266 clocksource_sh
.read
= tmu_timer_read
;
267 clocksource_sh
.shift
= 10;
268 clocksource_sh
.mult
= clocksource_hz2mult(clk_get_rate(&tmu1_clk
),
269 clocksource_sh
.shift
);
270 clocksource_sh
.flags
= CLOCK_SOURCE_IS_CONTINUOUS
;
271 clocksource_register(&clocksource_sh
);
273 tmu0_clockevent
.mult
= div_sc(frequency
, NSEC_PER_SEC
,
274 tmu0_clockevent
.shift
);
275 tmu0_clockevent
.max_delta_ns
=
276 clockevent_delta2ns(-1, &tmu0_clockevent
);
277 tmu0_clockevent
.min_delta_ns
=
278 clockevent_delta2ns(1, &tmu0_clockevent
);
280 tmu0_clockevent
.cpumask
= cpumask_of(0);
281 tmu0_clockevent
.rating
= 100;
283 clockevents_register_device(&tmu0_clockevent
);
288 static struct sys_timer_ops tmu_timer_ops
= {
289 .init
= tmu_timer_init
,
290 .start
= tmu_timer_start
,
291 .stop
= tmu_timer_stop
,
294 struct sys_timer tmu_timer
= {
296 .ops
= &tmu_timer_ops
,