1 // SPDX-License-Identifier: GPL-2.0
3 * Generic userspace implementations of gettimeofday() and similar.
5 #include <linux/compiler.h>
6 #include <linux/math64.h>
7 #include <linux/time.h>
8 #include <linux/kernel.h>
9 #include <linux/hrtimer_defs.h>
10 #include <vdso/datapage.h>
11 #include <vdso/helpers.h>
14 * The generic vDSO implementation requires that gettimeofday.h
16 * - __arch_get_vdso_data(): to get the vdso datapage.
17 * - __arch_get_hw_counter(): to get the hw counter based on the
19 * - gettimeofday_fallback(): fallback for gettimeofday.
20 * - clock_gettime_fallback(): fallback for clock_gettime.
21 * - clock_getres_fallback(): fallback for clock_getres.
23 #ifdef ENABLE_COMPAT_VDSO
24 #include <asm/vdso/compat_gettimeofday.h>
26 #include <asm/vdso/gettimeofday.h>
27 #endif /* ENABLE_COMPAT_VDSO */
29 #ifndef vdso_calc_delta
31 * Default implementation which works for all sane clocksources. That
32 * obviously excludes x86/TSC.
34 static __always_inline
35 u64
vdso_calc_delta(u64 cycles
, u64 last
, u64 mask
, u32 mult
)
37 return ((cycles
- last
) & mask
) * mult
;
41 static int do_hres(const struct vdso_data
*vd
, clockid_t clk
,
42 struct __kernel_timespec
*ts
)
44 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
45 u64 cycles
, last
, sec
, ns
;
49 seq
= vdso_read_begin(vd
);
50 cycles
= __arch_get_hw_counter(vd
->clock_mode
);
52 last
= vd
->cycle_last
;
53 if (unlikely((s64
)cycles
< 0))
56 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
59 } while (unlikely(vdso_read_retry(vd
, seq
)));
62 * Do this outside the loop: a race inside the loop could result
63 * in __iter_div_u64_rem() being extremely slow.
65 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
71 static void do_coarse(const struct vdso_data
*vd
, clockid_t clk
,
72 struct __kernel_timespec
*ts
)
74 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
78 seq
= vdso_read_begin(vd
);
79 ts
->tv_sec
= vdso_ts
->sec
;
80 ts
->tv_nsec
= vdso_ts
->nsec
;
81 } while (unlikely(vdso_read_retry(vd
, seq
)));
84 static __maybe_unused
int
85 __cvdso_clock_gettime_common(clockid_t clock
, struct __kernel_timespec
*ts
)
87 const struct vdso_data
*vd
= __arch_get_vdso_data();
90 /* Check for negative values or invalid clocks */
91 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
95 * Convert the clockid to a bitmask and use it to check which
96 * clocks are handled in the VDSO directly.
99 if (likely(msk
& VDSO_HRES
)) {
100 return do_hres(&vd
[CS_HRES_COARSE
], clock
, ts
);
101 } else if (msk
& VDSO_COARSE
) {
102 do_coarse(&vd
[CS_HRES_COARSE
], clock
, ts
);
104 } else if (msk
& VDSO_RAW
) {
105 return do_hres(&vd
[CS_RAW
], clock
, ts
);
110 static __maybe_unused
int
111 __cvdso_clock_gettime(clockid_t clock
, struct __kernel_timespec
*ts
)
113 int ret
= __cvdso_clock_gettime_common(clock
, ts
);
116 return clock_gettime_fallback(clock
, ts
);
120 static __maybe_unused
int
121 __cvdso_clock_gettime32(clockid_t clock
, struct old_timespec32
*res
)
123 struct __kernel_timespec ts
;
126 ret
= __cvdso_clock_gettime_common(clock
, &ts
);
128 #ifdef VDSO_HAS_32BIT_FALLBACK
130 return clock_gettime32_fallback(clock
, res
);
133 ret
= clock_gettime_fallback(clock
, &ts
);
137 res
->tv_sec
= ts
.tv_sec
;
138 res
->tv_nsec
= ts
.tv_nsec
;
143 static __maybe_unused
int
144 __cvdso_gettimeofday(struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
146 const struct vdso_data
*vd
= __arch_get_vdso_data();
148 if (likely(tv
!= NULL
)) {
149 struct __kernel_timespec ts
;
151 if (do_hres(&vd
[CS_HRES_COARSE
], CLOCK_REALTIME
, &ts
))
152 return gettimeofday_fallback(tv
, tz
);
154 tv
->tv_sec
= ts
.tv_sec
;
155 tv
->tv_usec
= (u32
)ts
.tv_nsec
/ NSEC_PER_USEC
;
158 if (unlikely(tz
!= NULL
)) {
159 tz
->tz_minuteswest
= vd
[CS_HRES_COARSE
].tz_minuteswest
;
160 tz
->tz_dsttime
= vd
[CS_HRES_COARSE
].tz_dsttime
;
167 static __maybe_unused
time_t __cvdso_time(time_t *time
)
169 const struct vdso_data
*vd
= __arch_get_vdso_data();
170 time_t t
= READ_ONCE(vd
[CS_HRES_COARSE
].basetime
[CLOCK_REALTIME
].sec
);
177 #endif /* VDSO_HAS_TIME */
179 #ifdef VDSO_HAS_CLOCK_GETRES
180 static __maybe_unused
181 int __cvdso_clock_getres_common(clockid_t clock
, struct __kernel_timespec
*res
)
183 const struct vdso_data
*vd
= __arch_get_vdso_data();
188 /* Check for negative values or invalid clocks */
189 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
192 hrtimer_res
= READ_ONCE(vd
[CS_HRES_COARSE
].hrtimer_res
);
194 * Convert the clockid to a bitmask and use it to check which
195 * clocks are handled in the VDSO directly.
198 if (msk
& VDSO_HRES
) {
200 * Preserves the behaviour of posix_get_hrtimer_res().
203 } else if (msk
& VDSO_COARSE
) {
205 * Preserves the behaviour of posix_get_coarse_res().
208 } else if (msk
& VDSO_RAW
) {
210 * Preserves the behaviour of posix_get_hrtimer_res().
223 int __cvdso_clock_getres(clockid_t clock
, struct __kernel_timespec
*res
)
225 int ret
= __cvdso_clock_getres_common(clock
, res
);
228 return clock_getres_fallback(clock
, res
);
232 static __maybe_unused
int
233 __cvdso_clock_getres_time32(clockid_t clock
, struct old_timespec32
*res
)
235 struct __kernel_timespec ts
;
238 ret
= __cvdso_clock_getres_common(clock
, &ts
);
240 #ifdef VDSO_HAS_32BIT_FALLBACK
242 return clock_getres32_fallback(clock
, res
);
245 ret
= clock_getres_fallback(clock
, &ts
);
249 res
->tv_sec
= ts
.tv_sec
;
250 res
->tv_nsec
= ts
.tv_nsec
;
254 #endif /* VDSO_HAS_CLOCK_GETRES */