1 // SPDX-License-Identifier: GPL-2.0
3 * Generic userspace implementations of gettimeofday() and similar.
5 #include <linux/compiler.h>
6 #include <linux/math64.h>
7 #include <linux/time.h>
8 #include <linux/kernel.h>
9 #include <linux/hrtimer_defs.h>
10 #include <vdso/datapage.h>
11 #include <vdso/helpers.h>
14 * The generic vDSO implementation requires that gettimeofday.h
16 * - __arch_get_vdso_data(): to get the vdso datapage.
17 * - __arch_get_hw_counter(): to get the hw counter based on the
19 * - gettimeofday_fallback(): fallback for gettimeofday.
20 * - clock_gettime_fallback(): fallback for clock_gettime.
21 * - clock_getres_fallback(): fallback for clock_getres.
23 #ifdef ENABLE_COMPAT_VDSO
24 #include <asm/vdso/compat_gettimeofday.h>
26 #include <asm/vdso/gettimeofday.h>
27 #endif /* ENABLE_COMPAT_VDSO */
29 #ifndef vdso_calc_delta
31 * Default implementation which works for all sane clocksources. That
32 * obviously excludes x86/TSC.
34 static __always_inline
35 u64
vdso_calc_delta(u64 cycles
, u64 last
, u64 mask
, u32 mult
)
37 return ((cycles
- last
) & mask
) * mult
;
42 static int do_hres_timens(const struct vdso_data
*vdns
, clockid_t clk
,
43 struct __kernel_timespec
*ts
)
45 const struct vdso_data
*vd
= __arch_get_timens_vdso_data();
46 const struct timens_offset
*offs
= &vdns
->offset
[clk
];
47 const struct vdso_timestamp
*vdso_ts
;
52 if (clk
!= CLOCK_MONOTONIC_RAW
)
53 vd
= &vd
[CS_HRES_COARSE
];
56 vdso_ts
= &vd
->basetime
[clk
];
59 seq
= vdso_read_begin(vd
);
60 cycles
= __arch_get_hw_counter(vd
->clock_mode
);
62 last
= vd
->cycle_last
;
63 if (unlikely((s64
)cycles
< 0))
66 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
69 } while (unlikely(vdso_read_retry(vd
, seq
)));
71 /* Add the namespace offset */
76 * Do this outside the loop: a race inside the loop could result
77 * in __iter_div_u64_rem() being extremely slow.
79 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
85 static __always_inline
const struct vdso_data
*__arch_get_timens_vdso_data(void)
90 static int do_hres_timens(const struct vdso_data
*vdns
, clockid_t clk
,
91 struct __kernel_timespec
*ts
)
97 static __always_inline
int do_hres(const struct vdso_data
*vd
, clockid_t clk
,
98 struct __kernel_timespec
*ts
)
100 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
101 u64 cycles
, last
, sec
, ns
;
106 * Open coded to handle VCLOCK_TIMENS. Time namespace
107 * enabled tasks have a special VVAR page installed which
108 * has vd->seq set to 1 and vd->clock_mode set to
109 * VCLOCK_TIMENS. For non time namespace affected tasks
110 * this does not affect performance because if vd->seq is
111 * odd, i.e. a concurrent update is in progress the extra
112 * check for vd->clock_mode is just a few extra
113 * instructions while spin waiting for vd->seq to become
116 while (unlikely((seq
= READ_ONCE(vd
->seq
)) & 1)) {
117 if (IS_ENABLED(CONFIG_TIME_NS
) &&
118 vd
->clock_mode
== VCLOCK_TIMENS
)
119 return do_hres_timens(vd
, clk
, ts
);
124 cycles
= __arch_get_hw_counter(vd
->clock_mode
);
126 last
= vd
->cycle_last
;
127 if (unlikely((s64
)cycles
< 0))
130 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
133 } while (unlikely(vdso_read_retry(vd
, seq
)));
136 * Do this outside the loop: a race inside the loop could result
137 * in __iter_div_u64_rem() being extremely slow.
139 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
145 #ifdef CONFIG_TIME_NS
146 static int do_coarse_timens(const struct vdso_data
*vdns
, clockid_t clk
,
147 struct __kernel_timespec
*ts
)
149 const struct vdso_data
*vd
= __arch_get_timens_vdso_data();
150 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
151 const struct timens_offset
*offs
= &vdns
->offset
[clk
];
157 seq
= vdso_read_begin(vd
);
159 nsec
= vdso_ts
->nsec
;
160 } while (unlikely(vdso_read_retry(vd
, seq
)));
162 /* Add the namespace offset */
167 * Do this outside the loop: a race inside the loop could result
168 * in __iter_div_u64_rem() being extremely slow.
170 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
175 static int do_coarse_timens(const struct vdso_data
*vdns
, clockid_t clk
,
176 struct __kernel_timespec
*ts
)
182 static __always_inline
int do_coarse(const struct vdso_data
*vd
, clockid_t clk
,
183 struct __kernel_timespec
*ts
)
185 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
190 * Open coded to handle VCLOCK_TIMENS. See comment in
193 while ((seq
= READ_ONCE(vd
->seq
)) & 1) {
194 if (IS_ENABLED(CONFIG_TIME_NS
) &&
195 vd
->clock_mode
== VCLOCK_TIMENS
)
196 return do_coarse_timens(vd
, clk
, ts
);
201 ts
->tv_sec
= vdso_ts
->sec
;
202 ts
->tv_nsec
= vdso_ts
->nsec
;
203 } while (unlikely(vdso_read_retry(vd
, seq
)));
208 static __maybe_unused
int
209 __cvdso_clock_gettime_common(clockid_t clock
, struct __kernel_timespec
*ts
)
211 const struct vdso_data
*vd
= __arch_get_vdso_data();
214 /* Check for negative values or invalid clocks */
215 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
219 * Convert the clockid to a bitmask and use it to check which
220 * clocks are handled in the VDSO directly.
223 if (likely(msk
& VDSO_HRES
))
224 vd
= &vd
[CS_HRES_COARSE
];
225 else if (msk
& VDSO_COARSE
)
226 return do_coarse(&vd
[CS_HRES_COARSE
], clock
, ts
);
227 else if (msk
& VDSO_RAW
)
232 return do_hres(vd
, clock
, ts
);
235 static __maybe_unused
int
236 __cvdso_clock_gettime(clockid_t clock
, struct __kernel_timespec
*ts
)
238 int ret
= __cvdso_clock_gettime_common(clock
, ts
);
241 return clock_gettime_fallback(clock
, ts
);
246 static __maybe_unused
int
247 __cvdso_clock_gettime32(clockid_t clock
, struct old_timespec32
*res
)
249 struct __kernel_timespec ts
;
252 ret
= __cvdso_clock_gettime_common(clock
, &ts
);
255 return clock_gettime32_fallback(clock
, res
);
258 res
->tv_sec
= ts
.tv_sec
;
259 res
->tv_nsec
= ts
.tv_nsec
;
263 #endif /* BUILD_VDSO32 */
265 static __maybe_unused
int
266 __cvdso_gettimeofday(struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
268 const struct vdso_data
*vd
= __arch_get_vdso_data();
270 if (likely(tv
!= NULL
)) {
271 struct __kernel_timespec ts
;
273 if (do_hres(&vd
[CS_HRES_COARSE
], CLOCK_REALTIME
, &ts
))
274 return gettimeofday_fallback(tv
, tz
);
276 tv
->tv_sec
= ts
.tv_sec
;
277 tv
->tv_usec
= (u32
)ts
.tv_nsec
/ NSEC_PER_USEC
;
280 if (unlikely(tz
!= NULL
)) {
281 if (IS_ENABLED(CONFIG_TIME_NS
) &&
282 vd
->clock_mode
== VCLOCK_TIMENS
)
283 vd
= __arch_get_timens_vdso_data();
285 tz
->tz_minuteswest
= vd
[CS_HRES_COARSE
].tz_minuteswest
;
286 tz
->tz_dsttime
= vd
[CS_HRES_COARSE
].tz_dsttime
;
293 static __maybe_unused __kernel_old_time_t
__cvdso_time(__kernel_old_time_t
*time
)
295 const struct vdso_data
*vd
= __arch_get_vdso_data();
296 __kernel_old_time_t t
;
298 if (IS_ENABLED(CONFIG_TIME_NS
) && vd
->clock_mode
== VCLOCK_TIMENS
)
299 vd
= __arch_get_timens_vdso_data();
301 t
= READ_ONCE(vd
[CS_HRES_COARSE
].basetime
[CLOCK_REALTIME
].sec
);
308 #endif /* VDSO_HAS_TIME */
310 #ifdef VDSO_HAS_CLOCK_GETRES
311 static __maybe_unused
312 int __cvdso_clock_getres_common(clockid_t clock
, struct __kernel_timespec
*res
)
314 const struct vdso_data
*vd
= __arch_get_vdso_data();
318 /* Check for negative values or invalid clocks */
319 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
322 if (IS_ENABLED(CONFIG_TIME_NS
) && vd
->clock_mode
== VCLOCK_TIMENS
)
323 vd
= __arch_get_timens_vdso_data();
326 * Convert the clockid to a bitmask and use it to check which
327 * clocks are handled in the VDSO directly.
330 if (msk
& (VDSO_HRES
| VDSO_RAW
)) {
332 * Preserves the behaviour of posix_get_hrtimer_res().
334 ns
= READ_ONCE(vd
[CS_HRES_COARSE
].hrtimer_res
);
335 } else if (msk
& VDSO_COARSE
) {
337 * Preserves the behaviour of posix_get_coarse_res().
351 static __maybe_unused
352 int __cvdso_clock_getres(clockid_t clock
, struct __kernel_timespec
*res
)
354 int ret
= __cvdso_clock_getres_common(clock
, res
);
357 return clock_getres_fallback(clock
, res
);
362 static __maybe_unused
int
363 __cvdso_clock_getres_time32(clockid_t clock
, struct old_timespec32
*res
)
365 struct __kernel_timespec ts
;
368 ret
= __cvdso_clock_getres_common(clock
, &ts
);
371 return clock_getres32_fallback(clock
, res
);
374 res
->tv_sec
= ts
.tv_sec
;
375 res
->tv_nsec
= ts
.tv_nsec
;
379 #endif /* BUILD_VDSO32 */
380 #endif /* VDSO_HAS_CLOCK_GETRES */