2 * linux/arch/arm/kernel/arch_timer.c
4 * Copyright (C) 2011 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 #include <linux/init.h>
12 #include <linux/kernel.h>
13 #include <linux/delay.h>
14 #include <linux/device.h>
15 #include <linux/smp.h>
16 #include <linux/cpu.h>
17 #include <linux/jiffies.h>
18 #include <linux/clockchips.h>
19 #include <linux/interrupt.h>
20 #include <linux/of_irq.h>
23 #include <asm/cputype.h>
24 #include <asm/delay.h>
25 #include <asm/localtimer.h>
26 #include <asm/arch_timer.h>
27 #include <asm/system_info.h>
28 #include <asm/sched_clock.h>
30 static unsigned long arch_timer_rate
;
40 static int arch_timer_ppi
[MAX_TIMER_PPI
];
42 static struct clock_event_device __percpu
**arch_timer_evt
;
43 static struct delay_timer arch_delay_timer
;
45 static bool arch_timer_use_virtual
= true;
48 * Architected system timer support.
51 #define ARCH_TIMER_CTRL_ENABLE (1 << 0)
52 #define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
53 #define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
55 #define ARCH_TIMER_REG_CTRL 0
56 #define ARCH_TIMER_REG_FREQ 1
57 #define ARCH_TIMER_REG_TVAL 2
59 #define ARCH_TIMER_PHYS_ACCESS 0
60 #define ARCH_TIMER_VIRT_ACCESS 1
63 * These register accessors are marked inline so the compiler can
64 * nicely work out which register we want, and chuck away the rest of
65 * the code. At least it does so with a recent GCC (4.6.3).
67 static inline void arch_timer_reg_write(const int access
, const int reg
, u32 val
)
69 if (access
== ARCH_TIMER_PHYS_ACCESS
) {
71 case ARCH_TIMER_REG_CTRL
:
72 asm volatile("mcr p15, 0, %0, c14, c2, 1" : : "r" (val
));
74 case ARCH_TIMER_REG_TVAL
:
75 asm volatile("mcr p15, 0, %0, c14, c2, 0" : : "r" (val
));
80 if (access
== ARCH_TIMER_VIRT_ACCESS
) {
82 case ARCH_TIMER_REG_CTRL
:
83 asm volatile("mcr p15, 0, %0, c14, c3, 1" : : "r" (val
));
85 case ARCH_TIMER_REG_TVAL
:
86 asm volatile("mcr p15, 0, %0, c14, c3, 0" : : "r" (val
));
94 static inline u32
arch_timer_reg_read(const int access
, const int reg
)
98 if (access
== ARCH_TIMER_PHYS_ACCESS
) {
100 case ARCH_TIMER_REG_CTRL
:
101 asm volatile("mrc p15, 0, %0, c14, c2, 1" : "=r" (val
));
103 case ARCH_TIMER_REG_TVAL
:
104 asm volatile("mrc p15, 0, %0, c14, c2, 0" : "=r" (val
));
106 case ARCH_TIMER_REG_FREQ
:
107 asm volatile("mrc p15, 0, %0, c14, c0, 0" : "=r" (val
));
112 if (access
== ARCH_TIMER_VIRT_ACCESS
) {
114 case ARCH_TIMER_REG_CTRL
:
115 asm volatile("mrc p15, 0, %0, c14, c3, 1" : "=r" (val
));
117 case ARCH_TIMER_REG_TVAL
:
118 asm volatile("mrc p15, 0, %0, c14, c3, 0" : "=r" (val
));
126 static inline cycle_t
arch_timer_counter_read(const int access
)
130 if (access
== ARCH_TIMER_PHYS_ACCESS
)
131 asm volatile("mrrc p15, 0, %Q0, %R0, c14" : "=r" (cval
));
133 if (access
== ARCH_TIMER_VIRT_ACCESS
)
134 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (cval
));
139 static inline cycle_t
arch_counter_get_cntpct(void)
141 return arch_timer_counter_read(ARCH_TIMER_PHYS_ACCESS
);
144 static inline cycle_t
arch_counter_get_cntvct(void)
146 return arch_timer_counter_read(ARCH_TIMER_VIRT_ACCESS
);
149 static irqreturn_t
inline timer_handler(const int access
,
150 struct clock_event_device
*evt
)
153 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
);
154 if (ctrl
& ARCH_TIMER_CTRL_IT_STAT
) {
155 ctrl
|= ARCH_TIMER_CTRL_IT_MASK
;
156 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
);
157 evt
->event_handler(evt
);
164 static irqreturn_t
arch_timer_handler_virt(int irq
, void *dev_id
)
166 struct clock_event_device
*evt
= *(struct clock_event_device
**)dev_id
;
168 return timer_handler(ARCH_TIMER_VIRT_ACCESS
, evt
);
171 static irqreturn_t
arch_timer_handler_phys(int irq
, void *dev_id
)
173 struct clock_event_device
*evt
= *(struct clock_event_device
**)dev_id
;
175 return timer_handler(ARCH_TIMER_PHYS_ACCESS
, evt
);
178 static inline void timer_set_mode(const int access
, int mode
)
182 case CLOCK_EVT_MODE_UNUSED
:
183 case CLOCK_EVT_MODE_SHUTDOWN
:
184 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
);
185 ctrl
&= ~ARCH_TIMER_CTRL_ENABLE
;
186 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
);
193 static void arch_timer_set_mode_virt(enum clock_event_mode mode
,
194 struct clock_event_device
*clk
)
196 timer_set_mode(ARCH_TIMER_VIRT_ACCESS
, mode
);
199 static void arch_timer_set_mode_phys(enum clock_event_mode mode
,
200 struct clock_event_device
*clk
)
202 timer_set_mode(ARCH_TIMER_PHYS_ACCESS
, mode
);
205 static inline void set_next_event(const int access
, unsigned long evt
)
208 ctrl
= arch_timer_reg_read(access
, ARCH_TIMER_REG_CTRL
);
209 ctrl
|= ARCH_TIMER_CTRL_ENABLE
;
210 ctrl
&= ~ARCH_TIMER_CTRL_IT_MASK
;
211 arch_timer_reg_write(access
, ARCH_TIMER_REG_TVAL
, evt
);
212 arch_timer_reg_write(access
, ARCH_TIMER_REG_CTRL
, ctrl
);
215 static int arch_timer_set_next_event_virt(unsigned long evt
,
216 struct clock_event_device
*unused
)
218 set_next_event(ARCH_TIMER_VIRT_ACCESS
, evt
);
222 static int arch_timer_set_next_event_phys(unsigned long evt
,
223 struct clock_event_device
*unused
)
225 set_next_event(ARCH_TIMER_PHYS_ACCESS
, evt
);
229 static int __cpuinit
arch_timer_setup(struct clock_event_device
*clk
)
231 clk
->features
= CLOCK_EVT_FEAT_ONESHOT
| CLOCK_EVT_FEAT_C3STOP
;
232 clk
->name
= "arch_sys_timer";
234 if (arch_timer_use_virtual
) {
235 clk
->irq
= arch_timer_ppi
[VIRT_PPI
];
236 clk
->set_mode
= arch_timer_set_mode_virt
;
237 clk
->set_next_event
= arch_timer_set_next_event_virt
;
239 clk
->irq
= arch_timer_ppi
[PHYS_SECURE_PPI
];
240 clk
->set_mode
= arch_timer_set_mode_phys
;
241 clk
->set_next_event
= arch_timer_set_next_event_phys
;
244 clk
->set_mode(CLOCK_EVT_MODE_SHUTDOWN
, NULL
);
246 clockevents_config_and_register(clk
, arch_timer_rate
,
249 *__this_cpu_ptr(arch_timer_evt
) = clk
;
251 if (arch_timer_use_virtual
)
252 enable_percpu_irq(arch_timer_ppi
[VIRT_PPI
], 0);
254 enable_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
], 0);
255 if (arch_timer_ppi
[PHYS_NONSECURE_PPI
])
256 enable_percpu_irq(arch_timer_ppi
[PHYS_NONSECURE_PPI
], 0);
262 /* Is the optional system timer available? */
263 static int local_timer_is_architected(void)
265 return (cpu_architecture() >= CPU_ARCH_ARMv7
) &&
266 ((read_cpuid_ext(CPUID_EXT_PFR1
) >> 16) & 0xf) == 1;
269 static int arch_timer_available(void)
273 if (!local_timer_is_architected())
276 if (arch_timer_rate
== 0) {
277 freq
= arch_timer_reg_read(ARCH_TIMER_PHYS_ACCESS
,
278 ARCH_TIMER_REG_FREQ
);
280 /* Check the timer frequency. */
282 pr_warn("Architected timer frequency not available\n");
286 arch_timer_rate
= freq
;
289 pr_info_once("Architected local timer running at %lu.%02luMHz (%s).\n",
290 arch_timer_rate
/ 1000000, (arch_timer_rate
/ 10000) % 100,
291 arch_timer_use_virtual
? "virt" : "phys");
295 static u32 notrace
arch_counter_get_cntpct32(void)
297 cycle_t cnt
= arch_counter_get_cntpct();
300 * The sched_clock infrastructure only knows about counters
301 * with at most 32bits. Forget about the upper 24 bits for the
307 static u32 notrace
arch_counter_get_cntvct32(void)
309 cycle_t cnt
= arch_counter_get_cntvct();
312 * The sched_clock infrastructure only knows about counters
313 * with at most 32bits. Forget about the upper 24 bits for the
319 static cycle_t
arch_counter_read(struct clocksource
*cs
)
322 * Always use the physical counter for the clocksource.
323 * CNTHCTL.PL1PCTEN must be set to 1.
325 return arch_counter_get_cntpct();
328 static unsigned long arch_timer_read_current_timer(void)
330 return arch_counter_get_cntpct();
333 static cycle_t
arch_counter_read_cc(const struct cyclecounter
*cc
)
336 * Always use the physical counter for the clocksource.
337 * CNTHCTL.PL1PCTEN must be set to 1.
339 return arch_counter_get_cntpct();
342 static struct clocksource clocksource_counter
= {
343 .name
= "arch_sys_counter",
345 .read
= arch_counter_read
,
346 .mask
= CLOCKSOURCE_MASK(56),
347 .flags
= CLOCK_SOURCE_IS_CONTINUOUS
,
350 static struct cyclecounter cyclecounter
= {
351 .read
= arch_counter_read_cc
,
352 .mask
= CLOCKSOURCE_MASK(56),
355 static struct timecounter timecounter
;
357 struct timecounter
*arch_timer_get_timecounter(void)
362 static void __cpuinit
arch_timer_stop(struct clock_event_device
*clk
)
364 pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
365 clk
->irq
, smp_processor_id());
367 if (arch_timer_use_virtual
)
368 disable_percpu_irq(arch_timer_ppi
[VIRT_PPI
]);
370 disable_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
]);
371 if (arch_timer_ppi
[PHYS_NONSECURE_PPI
])
372 disable_percpu_irq(arch_timer_ppi
[PHYS_NONSECURE_PPI
]);
375 clk
->set_mode(CLOCK_EVT_MODE_UNUSED
, clk
);
378 static struct local_timer_ops arch_timer_ops __cpuinitdata
= {
379 .setup
= arch_timer_setup
,
380 .stop
= arch_timer_stop
,
383 static struct clock_event_device arch_timer_global_evt
;
385 static int __init
arch_timer_register(void)
390 err
= arch_timer_available();
394 arch_timer_evt
= alloc_percpu(struct clock_event_device
*);
395 if (!arch_timer_evt
) {
400 clocksource_register_hz(&clocksource_counter
, arch_timer_rate
);
401 cyclecounter
.mult
= clocksource_counter
.mult
;
402 cyclecounter
.shift
= clocksource_counter
.shift
;
403 timecounter_init(&timecounter
, &cyclecounter
,
404 arch_counter_get_cntpct());
406 if (arch_timer_use_virtual
) {
407 ppi
= arch_timer_ppi
[VIRT_PPI
];
408 err
= request_percpu_irq(ppi
, arch_timer_handler_virt
,
409 "arch_timer", arch_timer_evt
);
411 ppi
= arch_timer_ppi
[PHYS_SECURE_PPI
];
412 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
413 "arch_timer", arch_timer_evt
);
414 if (!err
&& arch_timer_ppi
[PHYS_NONSECURE_PPI
]) {
415 ppi
= arch_timer_ppi
[PHYS_NONSECURE_PPI
];
416 err
= request_percpu_irq(ppi
, arch_timer_handler_phys
,
417 "arch_timer", arch_timer_evt
);
419 free_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
],
425 pr_err("arch_timer: can't register interrupt %d (%d)\n",
430 err
= local_timer_register(&arch_timer_ops
);
433 * We couldn't register as a local timer (could be
434 * because we're on a UP platform, or because some
435 * other local timer is already present...). Try as a
436 * global timer instead.
438 arch_timer_global_evt
.cpumask
= cpumask_of(0);
439 err
= arch_timer_setup(&arch_timer_global_evt
);
444 /* Use the architected timer for the delay loop. */
445 arch_delay_timer
.read_current_timer
= &arch_timer_read_current_timer
;
446 arch_delay_timer
.freq
= arch_timer_rate
;
447 register_current_timer_delay(&arch_delay_timer
);
451 if (arch_timer_use_virtual
)
452 free_percpu_irq(arch_timer_ppi
[VIRT_PPI
], arch_timer_evt
);
454 free_percpu_irq(arch_timer_ppi
[PHYS_SECURE_PPI
],
456 if (arch_timer_ppi
[PHYS_NONSECURE_PPI
])
457 free_percpu_irq(arch_timer_ppi
[PHYS_NONSECURE_PPI
],
462 free_percpu(arch_timer_evt
);
467 static const struct of_device_id arch_timer_of_match
[] __initconst
= {
468 { .compatible
= "arm,armv7-timer", },
472 int __init
arch_timer_of_register(void)
474 struct device_node
*np
;
478 np
= of_find_matching_node(NULL
, arch_timer_of_match
);
480 pr_err("arch_timer: can't find DT node\n");
484 /* Try to determine the frequency from the device tree or CNTFRQ */
485 if (!of_property_read_u32(np
, "clock-frequency", &freq
))
486 arch_timer_rate
= freq
;
488 for (i
= PHYS_SECURE_PPI
; i
< MAX_TIMER_PPI
; i
++)
489 arch_timer_ppi
[i
] = irq_of_parse_and_map(np
, i
);
492 * If no interrupt provided for virtual timer, we'll have to
493 * stick to the physical timer. It'd better be accessible...
495 if (!arch_timer_ppi
[VIRT_PPI
]) {
496 arch_timer_use_virtual
= false;
498 if (!arch_timer_ppi
[PHYS_SECURE_PPI
] ||
499 !arch_timer_ppi
[PHYS_NONSECURE_PPI
]) {
500 pr_warn("arch_timer: No interrupt available, giving up\n");
505 return arch_timer_register();
508 int __init
arch_timer_sched_clock_init(void)
513 err
= arch_timer_available();
517 if (arch_timer_use_virtual
)
518 cnt32
= arch_counter_get_cntvct32
;
520 cnt32
= arch_counter_get_cntpct32
;
522 setup_sched_clock(cnt32
, 32, arch_timer_rate
);