1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2018 ARM Limited
5 #ifndef __ASM_VDSO_GETTIMEOFDAY_H
6 #define __ASM_VDSO_GETTIMEOFDAY_H
10 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12 #include <asm/errno.h>
14 #include <asm/vdso/compat_barrier.h>
16 #define VDSO_HAS_CLOCK_GETRES 1
18 #define BUILD_VDSO32 1
20 static __always_inline
21 int gettimeofday_fallback(struct __kernel_old_timeval
*_tv
,
24 register struct timezone
*tz
asm("r1") = _tz
;
25 register struct __kernel_old_timeval
*tv
asm("r0") = _tv
;
26 register long ret
asm ("r0");
27 register long nr
asm("r7") = __NR_compat_gettimeofday
;
32 : "r" (tv
), "r" (tz
), "r" (nr
)
38 static __always_inline
39 long clock_gettime_fallback(clockid_t _clkid
, struct __kernel_timespec
*_ts
)
41 register struct __kernel_timespec
*ts
asm("r1") = _ts
;
42 register clockid_t clkid
asm("r0") = _clkid
;
43 register long ret
asm ("r0");
44 register long nr
asm("r7") = __NR_compat_clock_gettime64
;
49 : "r" (clkid
), "r" (ts
), "r" (nr
)
55 static __always_inline
56 long clock_gettime32_fallback(clockid_t _clkid
, struct old_timespec32
*_ts
)
58 register struct old_timespec32
*ts
asm("r1") = _ts
;
59 register clockid_t clkid
asm("r0") = _clkid
;
60 register long ret
asm ("r0");
61 register long nr
asm("r7") = __NR_compat_clock_gettime
;
66 : "r" (clkid
), "r" (ts
), "r" (nr
)
72 static __always_inline
73 int clock_getres_fallback(clockid_t _clkid
, struct __kernel_timespec
*_ts
)
75 register struct __kernel_timespec
*ts
asm("r1") = _ts
;
76 register clockid_t clkid
asm("r0") = _clkid
;
77 register long ret
asm ("r0");
78 register long nr
asm("r7") = __NR_compat_clock_getres_time64
;
83 : "r" (clkid
), "r" (ts
), "r" (nr
)
89 static __always_inline
90 int clock_getres32_fallback(clockid_t _clkid
, struct old_timespec32
*_ts
)
92 register struct old_timespec32
*ts
asm("r1") = _ts
;
93 register clockid_t clkid
asm("r0") = _clkid
;
94 register long ret
asm ("r0");
95 register long nr
asm("r7") = __NR_compat_clock_getres
;
100 : "r" (clkid
), "r" (ts
), "r" (nr
)
106 static __always_inline u64
__arch_get_hw_counter(s32 clock_mode
,
107 const struct vdso_data
*vd
)
112 * Core checks for mode already, so this raced against a concurrent
113 * update. Return something. Core will do another round and then
114 * see the mode change and fallback to the syscall.
116 if (clock_mode
!= VDSO_CLOCKMODE_ARCHTIMER
)
120 * This isb() is required to prevent that the counter value
124 asm volatile("mrrc p15, 1, %Q0, %R0, c14" : "=r" (res
));
126 * This isb() is required to prevent that the seq lock is
134 static __always_inline
const struct vdso_data
*__arch_get_vdso_data(void)
136 const struct vdso_data
*ret
;
139 * This simply puts &_vdso_data into ret. The reason why we don't use
140 * `ret = _vdso_data` is that the compiler tends to optimise this in a
141 * very suboptimal way: instead of keeping &_vdso_data in a register,
142 * it goes through a relocation almost every time _vdso_data must be
143 * accessed (even in subfunctions). This is both time and space
144 * consuming: each relocation uses a word in the code section, and it
145 * has to be loaded at runtime.
147 * This trick hides the assignment from the compiler. Since it cannot
148 * track where the pointer comes from, it will only use one relocation
149 * where __arch_get_vdso_data() is called, and then keep the result in
152 asm volatile("mov %0, %1" : "=r"(ret
) : "r"(_vdso_data
));
157 #ifdef CONFIG_TIME_NS
158 static __always_inline
const struct vdso_data
*__arch_get_timens_vdso_data(void)
160 const struct vdso_data
*ret
;
162 /* See __arch_get_vdso_data(). */
163 asm volatile("mov %0, %1" : "=r"(ret
) : "r"(_timens_data
));
169 static inline bool vdso_clocksource_ok(const struct vdso_data
*vd
)
171 return vd
->clock_mode
== VDSO_CLOCKMODE_ARCHTIMER
;
173 #define vdso_clocksource_ok vdso_clocksource_ok
175 #endif /* !__ASSEMBLY__ */
177 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */