2 * linux/arch/arm/plat-mxc/time.c
4 * Copyright (C) 2000-2001 Deep Blue Solutions
5 * Copyright (C) 2002 Shane Nay (shane@minirl.com)
6 * Copyright (C) 2006-2007 Pavel Pisa (ppisa@pikron.com)
7 * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de)
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; either version 2
12 * of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
24 #include <linux/interrupt.h>
25 #include <linux/irq.h>
26 #include <linux/clockchips.h>
27 #include <linux/clk.h>
28 #include <linux/delay.h>
29 #include <linux/err.h>
30 #include <linux/sched_clock.h>
31 #include <linux/slab.h>
33 #include <linux/of_address.h>
34 #include <linux/of_irq.h>
35 #include <soc/imx/timer.h>
38 * There are 4 versions of the timer hardware on Freescale MXC hardware.
41 * - MX25, MX31, MX35, MX37, MX51, MX6Q(rev1.0)
42 * - MX6DL, MX6SX, MX6Q(rev1.1+)
45 /* defines common for all i.MX */
47 #define MXC_TCTL_TEN (1 << 0) /* Enable module */
48 #define MXC_TPRER 0x04
51 #define MX1_2_TCTL_CLK_PCLK1 (1 << 1)
52 #define MX1_2_TCTL_IRQEN (1 << 4)
53 #define MX1_2_TCTL_FRR (1 << 8)
54 #define MX1_2_TCMP 0x08
55 #define MX1_2_TCN 0x10
56 #define MX1_2_TSTAT 0x14
59 #define MX2_TSTAT_CAPT (1 << 1)
60 #define MX2_TSTAT_COMP (1 << 0)
62 /* MX31, MX35, MX25, MX5, MX6 */
63 #define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
64 #define V2_TCTL_CLK_IPG (1 << 6)
65 #define V2_TCTL_CLK_PER (2 << 6)
66 #define V2_TCTL_CLK_OSC_DIV8 (5 << 6)
67 #define V2_TCTL_FRR (1 << 9)
68 #define V2_TCTL_24MEN (1 << 10)
69 #define V2_TPRER_PRE24M 12
72 #define V2_TSTAT_OF1 (1 << 0)
76 #define V2_TIMER_RATE_OSC_DIV8 3000000
79 enum imx_gpt_type type
;
84 const struct imx_gpt_data
*gpt
;
85 struct clock_event_device ced
;
93 void (*gpt_setup_tctl
)(struct imx_timer
*imxtm
);
94 void (*gpt_irq_enable
)(struct imx_timer
*imxtm
);
95 void (*gpt_irq_disable
)(struct imx_timer
*imxtm
);
96 void (*gpt_irq_acknowledge
)(struct imx_timer
*imxtm
);
97 int (*set_next_event
)(unsigned long evt
,
98 struct clock_event_device
*ced
);
101 static inline struct imx_timer
*to_imx_timer(struct clock_event_device
*ced
)
103 return container_of(ced
, struct imx_timer
, ced
);
106 static void imx1_gpt_irq_disable(struct imx_timer
*imxtm
)
110 tmp
= readl_relaxed(imxtm
->base
+ MXC_TCTL
);
111 writel_relaxed(tmp
& ~MX1_2_TCTL_IRQEN
, imxtm
->base
+ MXC_TCTL
);
113 #define imx21_gpt_irq_disable imx1_gpt_irq_disable
115 static void imx31_gpt_irq_disable(struct imx_timer
*imxtm
)
117 writel_relaxed(0, imxtm
->base
+ V2_IR
);
119 #define imx6dl_gpt_irq_disable imx31_gpt_irq_disable
121 static void imx1_gpt_irq_enable(struct imx_timer
*imxtm
)
125 tmp
= readl_relaxed(imxtm
->base
+ MXC_TCTL
);
126 writel_relaxed(tmp
| MX1_2_TCTL_IRQEN
, imxtm
->base
+ MXC_TCTL
);
128 #define imx21_gpt_irq_enable imx1_gpt_irq_enable
130 static void imx31_gpt_irq_enable(struct imx_timer
*imxtm
)
132 writel_relaxed(1<<0, imxtm
->base
+ V2_IR
);
134 #define imx6dl_gpt_irq_enable imx31_gpt_irq_enable
136 static void imx1_gpt_irq_acknowledge(struct imx_timer
*imxtm
)
138 writel_relaxed(0, imxtm
->base
+ MX1_2_TSTAT
);
141 static void imx21_gpt_irq_acknowledge(struct imx_timer
*imxtm
)
143 writel_relaxed(MX2_TSTAT_CAPT
| MX2_TSTAT_COMP
,
144 imxtm
->base
+ MX1_2_TSTAT
);
147 static void imx31_gpt_irq_acknowledge(struct imx_timer
*imxtm
)
149 writel_relaxed(V2_TSTAT_OF1
, imxtm
->base
+ V2_TSTAT
);
151 #define imx6dl_gpt_irq_acknowledge imx31_gpt_irq_acknowledge
153 static void __iomem
*sched_clock_reg
;
155 static u64 notrace
mxc_read_sched_clock(void)
157 return sched_clock_reg
? readl_relaxed(sched_clock_reg
) : 0;
160 static struct delay_timer imx_delay_timer
;
162 static unsigned long imx_read_current_timer(void)
164 return readl_relaxed(sched_clock_reg
);
167 static int __init
mxc_clocksource_init(struct imx_timer
*imxtm
)
169 unsigned int c
= clk_get_rate(imxtm
->clk_per
);
170 void __iomem
*reg
= imxtm
->base
+ imxtm
->gpt
->reg_tcn
;
172 imx_delay_timer
.read_current_timer
= &imx_read_current_timer
;
173 imx_delay_timer
.freq
= c
;
174 register_current_timer_delay(&imx_delay_timer
);
176 sched_clock_reg
= reg
;
178 sched_clock_register(mxc_read_sched_clock
, 32, c
);
179 return clocksource_mmio_init(reg
, "mxc_timer1", c
, 200, 32,
180 clocksource_mmio_readl_up
);
185 static int mx1_2_set_next_event(unsigned long evt
,
186 struct clock_event_device
*ced
)
188 struct imx_timer
*imxtm
= to_imx_timer(ced
);
191 tcmp
= readl_relaxed(imxtm
->base
+ MX1_2_TCN
) + evt
;
193 writel_relaxed(tcmp
, imxtm
->base
+ MX1_2_TCMP
);
195 return (int)(tcmp
- readl_relaxed(imxtm
->base
+ MX1_2_TCN
)) < 0 ?
199 static int v2_set_next_event(unsigned long evt
,
200 struct clock_event_device
*ced
)
202 struct imx_timer
*imxtm
= to_imx_timer(ced
);
205 tcmp
= readl_relaxed(imxtm
->base
+ V2_TCN
) + evt
;
207 writel_relaxed(tcmp
, imxtm
->base
+ V2_TCMP
);
209 return evt
< 0x7fffffff &&
210 (int)(tcmp
- readl_relaxed(imxtm
->base
+ V2_TCN
)) < 0 ?
214 static int mxc_shutdown(struct clock_event_device
*ced
)
216 struct imx_timer
*imxtm
= to_imx_timer(ced
);
221 * The timer interrupt generation is disabled at least
222 * for enough time to call mxc_set_next_event()
224 local_irq_save(flags
);
226 /* Disable interrupt in GPT module */
227 imxtm
->gpt
->gpt_irq_disable(imxtm
);
229 tcn
= readl_relaxed(imxtm
->base
+ imxtm
->gpt
->reg_tcn
);
230 /* Set event time into far-far future */
231 writel_relaxed(tcn
- 3, imxtm
->base
+ imxtm
->gpt
->reg_tcmp
);
233 /* Clear pending interrupt */
234 imxtm
->gpt
->gpt_irq_acknowledge(imxtm
);
237 printk(KERN_INFO
"%s: changing mode\n", __func__
);
240 local_irq_restore(flags
);
245 static int mxc_set_oneshot(struct clock_event_device
*ced
)
247 struct imx_timer
*imxtm
= to_imx_timer(ced
);
251 * The timer interrupt generation is disabled at least
252 * for enough time to call mxc_set_next_event()
254 local_irq_save(flags
);
256 /* Disable interrupt in GPT module */
257 imxtm
->gpt
->gpt_irq_disable(imxtm
);
259 if (!clockevent_state_oneshot(ced
)) {
260 u32 tcn
= readl_relaxed(imxtm
->base
+ imxtm
->gpt
->reg_tcn
);
261 /* Set event time into far-far future */
262 writel_relaxed(tcn
- 3, imxtm
->base
+ imxtm
->gpt
->reg_tcmp
);
264 /* Clear pending interrupt */
265 imxtm
->gpt
->gpt_irq_acknowledge(imxtm
);
269 printk(KERN_INFO
"%s: changing mode\n", __func__
);
273 * Do not put overhead of interrupt enable/disable into
274 * mxc_set_next_event(), the core has about 4 minutes
275 * to call mxc_set_next_event() or shutdown clock after
278 imxtm
->gpt
->gpt_irq_enable(imxtm
);
279 local_irq_restore(flags
);
285 * IRQ handler for the timer
287 static irqreturn_t
mxc_timer_interrupt(int irq
, void *dev_id
)
289 struct clock_event_device
*ced
= dev_id
;
290 struct imx_timer
*imxtm
= to_imx_timer(ced
);
293 tstat
= readl_relaxed(imxtm
->base
+ imxtm
->gpt
->reg_tstat
);
295 imxtm
->gpt
->gpt_irq_acknowledge(imxtm
);
297 ced
->event_handler(ced
);
302 static int __init
mxc_clockevent_init(struct imx_timer
*imxtm
)
304 struct clock_event_device
*ced
= &imxtm
->ced
;
305 struct irqaction
*act
= &imxtm
->act
;
307 ced
->name
= "mxc_timer1";
308 ced
->features
= CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_DYNIRQ
;
309 ced
->set_state_shutdown
= mxc_shutdown
;
310 ced
->set_state_oneshot
= mxc_set_oneshot
;
311 ced
->tick_resume
= mxc_shutdown
;
312 ced
->set_next_event
= imxtm
->gpt
->set_next_event
;
314 ced
->cpumask
= cpumask_of(0);
315 ced
->irq
= imxtm
->irq
;
316 clockevents_config_and_register(ced
, clk_get_rate(imxtm
->clk_per
),
319 act
->name
= "i.MX Timer Tick";
320 act
->flags
= IRQF_TIMER
| IRQF_IRQPOLL
;
321 act
->handler
= mxc_timer_interrupt
;
324 return setup_irq(imxtm
->irq
, act
);
327 static void imx1_gpt_setup_tctl(struct imx_timer
*imxtm
)
331 tctl_val
= MX1_2_TCTL_FRR
| MX1_2_TCTL_CLK_PCLK1
| MXC_TCTL_TEN
;
332 writel_relaxed(tctl_val
, imxtm
->base
+ MXC_TCTL
);
334 #define imx21_gpt_setup_tctl imx1_gpt_setup_tctl
336 static void imx31_gpt_setup_tctl(struct imx_timer
*imxtm
)
340 tctl_val
= V2_TCTL_FRR
| V2_TCTL_WAITEN
| MXC_TCTL_TEN
;
341 if (clk_get_rate(imxtm
->clk_per
) == V2_TIMER_RATE_OSC_DIV8
)
342 tctl_val
|= V2_TCTL_CLK_OSC_DIV8
;
344 tctl_val
|= V2_TCTL_CLK_PER
;
346 writel_relaxed(tctl_val
, imxtm
->base
+ MXC_TCTL
);
349 static void imx6dl_gpt_setup_tctl(struct imx_timer
*imxtm
)
353 tctl_val
= V2_TCTL_FRR
| V2_TCTL_WAITEN
| MXC_TCTL_TEN
;
354 if (clk_get_rate(imxtm
->clk_per
) == V2_TIMER_RATE_OSC_DIV8
) {
355 tctl_val
|= V2_TCTL_CLK_OSC_DIV8
;
357 writel_relaxed(7 << V2_TPRER_PRE24M
, imxtm
->base
+ MXC_TPRER
);
358 tctl_val
|= V2_TCTL_24MEN
;
360 tctl_val
|= V2_TCTL_CLK_PER
;
363 writel_relaxed(tctl_val
, imxtm
->base
+ MXC_TCTL
);
366 static const struct imx_gpt_data imx1_gpt_data
= {
367 .reg_tstat
= MX1_2_TSTAT
,
368 .reg_tcn
= MX1_2_TCN
,
369 .reg_tcmp
= MX1_2_TCMP
,
370 .gpt_irq_enable
= imx1_gpt_irq_enable
,
371 .gpt_irq_disable
= imx1_gpt_irq_disable
,
372 .gpt_irq_acknowledge
= imx1_gpt_irq_acknowledge
,
373 .gpt_setup_tctl
= imx1_gpt_setup_tctl
,
374 .set_next_event
= mx1_2_set_next_event
,
377 static const struct imx_gpt_data imx21_gpt_data
= {
378 .reg_tstat
= MX1_2_TSTAT
,
379 .reg_tcn
= MX1_2_TCN
,
380 .reg_tcmp
= MX1_2_TCMP
,
381 .gpt_irq_enable
= imx21_gpt_irq_enable
,
382 .gpt_irq_disable
= imx21_gpt_irq_disable
,
383 .gpt_irq_acknowledge
= imx21_gpt_irq_acknowledge
,
384 .gpt_setup_tctl
= imx21_gpt_setup_tctl
,
385 .set_next_event
= mx1_2_set_next_event
,
388 static const struct imx_gpt_data imx31_gpt_data
= {
389 .reg_tstat
= V2_TSTAT
,
392 .gpt_irq_enable
= imx31_gpt_irq_enable
,
393 .gpt_irq_disable
= imx31_gpt_irq_disable
,
394 .gpt_irq_acknowledge
= imx31_gpt_irq_acknowledge
,
395 .gpt_setup_tctl
= imx31_gpt_setup_tctl
,
396 .set_next_event
= v2_set_next_event
,
399 static const struct imx_gpt_data imx6dl_gpt_data
= {
400 .reg_tstat
= V2_TSTAT
,
403 .gpt_irq_enable
= imx6dl_gpt_irq_enable
,
404 .gpt_irq_disable
= imx6dl_gpt_irq_disable
,
405 .gpt_irq_acknowledge
= imx6dl_gpt_irq_acknowledge
,
406 .gpt_setup_tctl
= imx6dl_gpt_setup_tctl
,
407 .set_next_event
= v2_set_next_event
,
410 static int __init
_mxc_timer_init(struct imx_timer
*imxtm
)
414 switch (imxtm
->type
) {
416 imxtm
->gpt
= &imx1_gpt_data
;
419 imxtm
->gpt
= &imx21_gpt_data
;
422 imxtm
->gpt
= &imx31_gpt_data
;
424 case GPT_TYPE_IMX6DL
:
425 imxtm
->gpt
= &imx6dl_gpt_data
;
431 if (IS_ERR(imxtm
->clk_per
)) {
432 pr_err("i.MX timer: unable to get clk\n");
433 return PTR_ERR(imxtm
->clk_per
);
436 if (!IS_ERR(imxtm
->clk_ipg
))
437 clk_prepare_enable(imxtm
->clk_ipg
);
439 clk_prepare_enable(imxtm
->clk_per
);
442 * Initialise to a known state (all timers off, and timing reset)
445 writel_relaxed(0, imxtm
->base
+ MXC_TCTL
);
446 writel_relaxed(0, imxtm
->base
+ MXC_TPRER
); /* see datasheet note */
448 imxtm
->gpt
->gpt_setup_tctl(imxtm
);
450 /* init and register the timer to the framework */
451 ret
= mxc_clocksource_init(imxtm
);
455 return mxc_clockevent_init(imxtm
);
458 void __init
mxc_timer_init(unsigned long pbase
, int irq
, enum imx_gpt_type type
)
460 struct imx_timer
*imxtm
;
462 imxtm
= kzalloc(sizeof(*imxtm
), GFP_KERNEL
);
465 imxtm
->clk_per
= clk_get_sys("imx-gpt.0", "per");
466 imxtm
->clk_ipg
= clk_get_sys("imx-gpt.0", "ipg");
468 imxtm
->base
= ioremap(pbase
, SZ_4K
);
469 BUG_ON(!imxtm
->base
);
474 _mxc_timer_init(imxtm
);
477 static int __init
mxc_timer_init_dt(struct device_node
*np
, enum imx_gpt_type type
)
479 struct imx_timer
*imxtm
;
480 static int initialized
;
483 /* Support one instance only */
487 imxtm
= kzalloc(sizeof(*imxtm
), GFP_KERNEL
);
491 imxtm
->base
= of_iomap(np
, 0);
495 imxtm
->irq
= irq_of_parse_and_map(np
, 0);
499 imxtm
->clk_ipg
= of_clk_get_by_name(np
, "ipg");
501 /* Try osc_per first, and fall back to per otherwise */
502 imxtm
->clk_per
= of_clk_get_by_name(np
, "osc_per");
503 if (IS_ERR(imxtm
->clk_per
))
504 imxtm
->clk_per
= of_clk_get_by_name(np
, "per");
508 ret
= _mxc_timer_init(imxtm
);
517 static int __init
imx1_timer_init_dt(struct device_node
*np
)
519 return mxc_timer_init_dt(np
, GPT_TYPE_IMX1
);
522 static int __init
imx21_timer_init_dt(struct device_node
*np
)
524 return mxc_timer_init_dt(np
, GPT_TYPE_IMX21
);
527 static int __init
imx31_timer_init_dt(struct device_node
*np
)
529 enum imx_gpt_type type
= GPT_TYPE_IMX31
;
532 * We were using the same compatible string for i.MX6Q/D and i.MX6DL/S
533 * GPT device, while they actually have different programming model.
534 * This is a workaround to keep the existing i.MX6DL/S DTBs continue
535 * working with the new kernel.
537 if (of_machine_is_compatible("fsl,imx6dl"))
538 type
= GPT_TYPE_IMX6DL
;
540 return mxc_timer_init_dt(np
, type
);
543 static int __init
imx6dl_timer_init_dt(struct device_node
*np
)
545 return mxc_timer_init_dt(np
, GPT_TYPE_IMX6DL
);
548 CLOCKSOURCE_OF_DECLARE(imx1_timer
, "fsl,imx1-gpt", imx1_timer_init_dt
);
549 CLOCKSOURCE_OF_DECLARE(imx21_timer
, "fsl,imx21-gpt", imx21_timer_init_dt
);
550 CLOCKSOURCE_OF_DECLARE(imx27_timer
, "fsl,imx27-gpt", imx21_timer_init_dt
);
551 CLOCKSOURCE_OF_DECLARE(imx31_timer
, "fsl,imx31-gpt", imx31_timer_init_dt
);
552 CLOCKSOURCE_OF_DECLARE(imx25_timer
, "fsl,imx25-gpt", imx31_timer_init_dt
);
553 CLOCKSOURCE_OF_DECLARE(imx50_timer
, "fsl,imx50-gpt", imx31_timer_init_dt
);
554 CLOCKSOURCE_OF_DECLARE(imx51_timer
, "fsl,imx51-gpt", imx31_timer_init_dt
);
555 CLOCKSOURCE_OF_DECLARE(imx53_timer
, "fsl,imx53-gpt", imx31_timer_init_dt
);
556 CLOCKSOURCE_OF_DECLARE(imx6q_timer
, "fsl,imx6q-gpt", imx31_timer_init_dt
);
557 CLOCKSOURCE_OF_DECLARE(imx6dl_timer
, "fsl,imx6dl-gpt", imx6dl_timer_init_dt
);
558 CLOCKSOURCE_OF_DECLARE(imx6sl_timer
, "fsl,imx6sl-gpt", imx6dl_timer_init_dt
);
559 CLOCKSOURCE_OF_DECLARE(imx6sx_timer
, "fsl,imx6sx-gpt", imx6dl_timer_init_dt
);