1 /* SPDX-License-Identifier: GPL-2.0-only */
3 * arch/arm64/include/asm/arch_timer.h
5 * Copyright (C) 2012 ARM Ltd.
6 * Author: Marc Zyngier <marc.zyngier@arm.com>
8 #ifndef __ASM_ARCH_TIMER_H
9 #define __ASM_ARCH_TIMER_H
11 #include <asm/barrier.h>
12 #include <asm/hwcap.h>
13 #include <asm/sysreg.h>
15 #include <linux/bug.h>
16 #include <linux/init.h>
17 #include <linux/jump_label.h>
18 #include <linux/smp.h>
19 #include <linux/types.h>
21 #include <clocksource/arm_arch_timer.h>
23 #if IS_ENABLED(CONFIG_ARM_ARCH_TIMER_OOL_WORKAROUND)
24 #define has_erratum_handler(h) \
26 const struct arch_timer_erratum_workaround *__wa; \
27 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
31 #define erratum_handler(h) \
33 const struct arch_timer_erratum_workaround *__wa; \
34 __wa = __this_cpu_read(timer_unstable_counter_workaround); \
35 (__wa && __wa->h) ? __wa->h : arch_timer_##h; \
39 #define has_erratum_handler(h) false
40 #define erratum_handler(h) (arch_timer_##h)
43 enum arch_timer_erratum_match_type
{
45 ate_match_local_cap_id
,
46 ate_match_acpi_oem_info
,
49 struct clock_event_device
;
51 struct arch_timer_erratum_workaround
{
52 enum arch_timer_erratum_match_type match_type
;
55 u32 (*read_cntp_tval_el0
)(void);
56 u32 (*read_cntv_tval_el0
)(void);
57 u64 (*read_cntpct_el0
)(void);
58 u64 (*read_cntvct_el0
)(void);
59 int (*set_next_event_phys
)(unsigned long, struct clock_event_device
*);
60 int (*set_next_event_virt
)(unsigned long, struct clock_event_device
*);
61 bool disable_compat_vdso
;
64 DECLARE_PER_CPU(const struct arch_timer_erratum_workaround
*,
65 timer_unstable_counter_workaround
);
67 /* inline sysreg accessors that make erratum_handler() work */
68 static inline notrace u32
arch_timer_read_cntp_tval_el0(void)
70 return read_sysreg(cntp_tval_el0
);
73 static inline notrace u32
arch_timer_read_cntv_tval_el0(void)
75 return read_sysreg(cntv_tval_el0
);
78 static inline notrace u64
arch_timer_read_cntpct_el0(void)
80 return read_sysreg(cntpct_el0
);
83 static inline notrace u64
arch_timer_read_cntvct_el0(void)
85 return read_sysreg(cntvct_el0
);
88 #define arch_timer_reg_read_stable(reg) \
92 preempt_disable_notrace(); \
93 _val = erratum_handler(read_ ## reg)(); \
94 preempt_enable_notrace(); \
100 * These register accessors are marked inline so the compiler can
101 * nicely work out which register we want, and chuck away the rest of
104 static __always_inline
105 void arch_timer_reg_write_cp15(int access
, enum arch_timer_reg reg
, u32 val
)
107 if (access
== ARCH_TIMER_PHYS_ACCESS
) {
109 case ARCH_TIMER_REG_CTRL
:
110 write_sysreg(val
, cntp_ctl_el0
);
112 case ARCH_TIMER_REG_TVAL
:
113 write_sysreg(val
, cntp_tval_el0
);
116 } else if (access
== ARCH_TIMER_VIRT_ACCESS
) {
118 case ARCH_TIMER_REG_CTRL
:
119 write_sysreg(val
, cntv_ctl_el0
);
121 case ARCH_TIMER_REG_TVAL
:
122 write_sysreg(val
, cntv_tval_el0
);
130 static __always_inline
131 u32
arch_timer_reg_read_cp15(int access
, enum arch_timer_reg reg
)
133 if (access
== ARCH_TIMER_PHYS_ACCESS
) {
135 case ARCH_TIMER_REG_CTRL
:
136 return read_sysreg(cntp_ctl_el0
);
137 case ARCH_TIMER_REG_TVAL
:
138 return arch_timer_reg_read_stable(cntp_tval_el0
);
140 } else if (access
== ARCH_TIMER_VIRT_ACCESS
) {
142 case ARCH_TIMER_REG_CTRL
:
143 return read_sysreg(cntv_ctl_el0
);
144 case ARCH_TIMER_REG_TVAL
:
145 return arch_timer_reg_read_stable(cntv_tval_el0
);
152 static inline u32
arch_timer_get_cntfrq(void)
154 return read_sysreg(cntfrq_el0
);
157 static inline u32
arch_timer_get_cntkctl(void)
159 return read_sysreg(cntkctl_el1
);
162 static inline void arch_timer_set_cntkctl(u32 cntkctl
)
164 write_sysreg(cntkctl
, cntkctl_el1
);
169 * Ensure that reads of the counter are treated the same as memory reads
170 * for the purposes of ordering by subsequent memory barriers.
172 * This insanity brought to you by speculative system register reads,
173 * out-of-order memory accesses, sequence locks and Thomas Gleixner.
175 * http://lists.infradead.org/pipermail/linux-arm-kernel/2019-February/631195.html
177 #define arch_counter_enforce_ordering(val) do { \
178 u64 tmp, _val = (val); \
181 " eor %0, %1, %1\n" \
182 " add %0, sp, %0\n" \
184 : "=r" (tmp) : "r" (_val)); \
187 static __always_inline u64
__arch_counter_get_cntpct_stable(void)
192 cnt
= arch_timer_reg_read_stable(cntpct_el0
);
193 arch_counter_enforce_ordering(cnt
);
197 static __always_inline u64
__arch_counter_get_cntpct(void)
202 cnt
= read_sysreg(cntpct_el0
);
203 arch_counter_enforce_ordering(cnt
);
207 static __always_inline u64
__arch_counter_get_cntvct_stable(void)
212 cnt
= arch_timer_reg_read_stable(cntvct_el0
);
213 arch_counter_enforce_ordering(cnt
);
217 static __always_inline u64
__arch_counter_get_cntvct(void)
222 cnt
= read_sysreg(cntvct_el0
);
223 arch_counter_enforce_ordering(cnt
);
227 #undef arch_counter_enforce_ordering
229 static inline int arch_timer_arch_init(void)
234 static inline void arch_timer_set_evtstrm_feature(void)
236 cpu_set_named_feature(EVTSTRM
);
238 compat_elf_hwcap
|= COMPAT_HWCAP_EVTSTRM
;
242 static inline bool arch_timer_have_evtstrm_feature(void)
244 return cpu_have_named_feature(EVTSTRM
);