1 // SPDX-License-Identifier: GPL-2.0
3 * Generic userspace implementations of gettimeofday() and similar.
5 #include <vdso/datapage.h>
6 #include <vdso/helpers.h>
8 #ifndef vdso_calc_delta
10 * Default implementation which works for all sane clocksources. That
11 * obviously excludes x86/TSC.
13 static __always_inline
14 u64
vdso_calc_delta(u64 cycles
, u64 last
, u64 mask
, u32 mult
)
16 return ((cycles
- last
) & mask
) * mult
;
21 static __always_inline u64
vdso_shift_ns(u64 ns
, u32 shift
)
27 #ifndef __arch_vdso_hres_capable
28 static inline bool __arch_vdso_hres_capable(void)
34 #ifndef vdso_clocksource_ok
35 static inline bool vdso_clocksource_ok(const struct vdso_data
*vd
)
37 return vd
->clock_mode
!= VDSO_CLOCKMODE_NONE
;
41 #ifndef vdso_cycles_ok
42 static inline bool vdso_cycles_ok(u64 cycles
)
49 static int do_hres_timens(const struct vdso_data
*vdns
, clockid_t clk
,
50 struct __kernel_timespec
*ts
)
52 const struct vdso_data
*vd
= __arch_get_timens_vdso_data();
53 const struct timens_offset
*offs
= &vdns
->offset
[clk
];
54 const struct vdso_timestamp
*vdso_ts
;
59 if (clk
!= CLOCK_MONOTONIC_RAW
)
60 vd
= &vd
[CS_HRES_COARSE
];
63 vdso_ts
= &vd
->basetime
[clk
];
66 seq
= vdso_read_begin(vd
);
68 if (unlikely(!vdso_clocksource_ok(vd
)))
71 cycles
= __arch_get_hw_counter(vd
->clock_mode
, vd
);
72 if (unlikely(!vdso_cycles_ok(cycles
)))
75 last
= vd
->cycle_last
;
76 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
77 ns
= vdso_shift_ns(ns
, vd
->shift
);
79 } while (unlikely(vdso_read_retry(vd
, seq
)));
81 /* Add the namespace offset */
86 * Do this outside the loop: a race inside the loop could result
87 * in __iter_div_u64_rem() being extremely slow.
89 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
95 static __always_inline
const struct vdso_data
*__arch_get_timens_vdso_data(void)
100 static int do_hres_timens(const struct vdso_data
*vdns
, clockid_t clk
,
101 struct __kernel_timespec
*ts
)
107 static __always_inline
int do_hres(const struct vdso_data
*vd
, clockid_t clk
,
108 struct __kernel_timespec
*ts
)
110 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
111 u64 cycles
, last
, sec
, ns
;
114 /* Allows to compile the high resolution parts out */
115 if (!__arch_vdso_hres_capable())
120 * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
121 * enabled tasks have a special VVAR page installed which
122 * has vd->seq set to 1 and vd->clock_mode set to
123 * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
124 * this does not affect performance because if vd->seq is
125 * odd, i.e. a concurrent update is in progress the extra
126 * check for vd->clock_mode is just a few extra
127 * instructions while spin waiting for vd->seq to become
130 while (unlikely((seq
= READ_ONCE(vd
->seq
)) & 1)) {
131 if (IS_ENABLED(CONFIG_TIME_NS
) &&
132 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
133 return do_hres_timens(vd
, clk
, ts
);
138 if (unlikely(!vdso_clocksource_ok(vd
)))
141 cycles
= __arch_get_hw_counter(vd
->clock_mode
, vd
);
142 if (unlikely(!vdso_cycles_ok(cycles
)))
145 last
= vd
->cycle_last
;
146 ns
+= vdso_calc_delta(cycles
, last
, vd
->mask
, vd
->mult
);
147 ns
= vdso_shift_ns(ns
, vd
->shift
);
149 } while (unlikely(vdso_read_retry(vd
, seq
)));
152 * Do this outside the loop: a race inside the loop could result
153 * in __iter_div_u64_rem() being extremely slow.
155 ts
->tv_sec
= sec
+ __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
161 #ifdef CONFIG_TIME_NS
162 static int do_coarse_timens(const struct vdso_data
*vdns
, clockid_t clk
,
163 struct __kernel_timespec
*ts
)
165 const struct vdso_data
*vd
= __arch_get_timens_vdso_data();
166 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
167 const struct timens_offset
*offs
= &vdns
->offset
[clk
];
173 seq
= vdso_read_begin(vd
);
175 nsec
= vdso_ts
->nsec
;
176 } while (unlikely(vdso_read_retry(vd
, seq
)));
178 /* Add the namespace offset */
183 * Do this outside the loop: a race inside the loop could result
184 * in __iter_div_u64_rem() being extremely slow.
186 ts
->tv_sec
= sec
+ __iter_div_u64_rem(nsec
, NSEC_PER_SEC
, &nsec
);
191 static int do_coarse_timens(const struct vdso_data
*vdns
, clockid_t clk
,
192 struct __kernel_timespec
*ts
)
198 static __always_inline
int do_coarse(const struct vdso_data
*vd
, clockid_t clk
,
199 struct __kernel_timespec
*ts
)
201 const struct vdso_timestamp
*vdso_ts
= &vd
->basetime
[clk
];
206 * Open coded to handle VDSO_CLOCK_TIMENS. See comment in
209 while ((seq
= READ_ONCE(vd
->seq
)) & 1) {
210 if (IS_ENABLED(CONFIG_TIME_NS
) &&
211 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
212 return do_coarse_timens(vd
, clk
, ts
);
217 ts
->tv_sec
= vdso_ts
->sec
;
218 ts
->tv_nsec
= vdso_ts
->nsec
;
219 } while (unlikely(vdso_read_retry(vd
, seq
)));
224 static __always_inline
int
225 __cvdso_clock_gettime_common(const struct vdso_data
*vd
, clockid_t clock
,
226 struct __kernel_timespec
*ts
)
230 /* Check for negative values or invalid clocks */
231 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
235 * Convert the clockid to a bitmask and use it to check which
236 * clocks are handled in the VDSO directly.
239 if (likely(msk
& VDSO_HRES
))
240 vd
= &vd
[CS_HRES_COARSE
];
241 else if (msk
& VDSO_COARSE
)
242 return do_coarse(&vd
[CS_HRES_COARSE
], clock
, ts
);
243 else if (msk
& VDSO_RAW
)
248 return do_hres(vd
, clock
, ts
);
251 static __maybe_unused
int
252 __cvdso_clock_gettime_data(const struct vdso_data
*vd
, clockid_t clock
,
253 struct __kernel_timespec
*ts
)
255 int ret
= __cvdso_clock_gettime_common(vd
, clock
, ts
);
258 return clock_gettime_fallback(clock
, ts
);
262 static __maybe_unused
int
263 __cvdso_clock_gettime(clockid_t clock
, struct __kernel_timespec
*ts
)
265 return __cvdso_clock_gettime_data(__arch_get_vdso_data(), clock
, ts
);
269 static __maybe_unused
int
270 __cvdso_clock_gettime32_data(const struct vdso_data
*vd
, clockid_t clock
,
271 struct old_timespec32
*res
)
273 struct __kernel_timespec ts
;
276 ret
= __cvdso_clock_gettime_common(vd
, clock
, &ts
);
279 return clock_gettime32_fallback(clock
, res
);
282 res
->tv_sec
= ts
.tv_sec
;
283 res
->tv_nsec
= ts
.tv_nsec
;
288 static __maybe_unused
int
289 __cvdso_clock_gettime32(clockid_t clock
, struct old_timespec32
*res
)
291 return __cvdso_clock_gettime32_data(__arch_get_vdso_data(), clock
, res
);
293 #endif /* BUILD_VDSO32 */
295 static __maybe_unused
int
296 __cvdso_gettimeofday_data(const struct vdso_data
*vd
,
297 struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
300 if (likely(tv
!= NULL
)) {
301 struct __kernel_timespec ts
;
303 if (do_hres(&vd
[CS_HRES_COARSE
], CLOCK_REALTIME
, &ts
))
304 return gettimeofday_fallback(tv
, tz
);
306 tv
->tv_sec
= ts
.tv_sec
;
307 tv
->tv_usec
= (u32
)ts
.tv_nsec
/ NSEC_PER_USEC
;
310 if (unlikely(tz
!= NULL
)) {
311 if (IS_ENABLED(CONFIG_TIME_NS
) &&
312 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
313 vd
= __arch_get_timens_vdso_data();
315 tz
->tz_minuteswest
= vd
[CS_HRES_COARSE
].tz_minuteswest
;
316 tz
->tz_dsttime
= vd
[CS_HRES_COARSE
].tz_dsttime
;
322 static __maybe_unused
int
323 __cvdso_gettimeofday(struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
325 return __cvdso_gettimeofday_data(__arch_get_vdso_data(), tv
, tz
);
329 static __maybe_unused __kernel_old_time_t
330 __cvdso_time_data(const struct vdso_data
*vd
, __kernel_old_time_t
*time
)
332 __kernel_old_time_t t
;
334 if (IS_ENABLED(CONFIG_TIME_NS
) &&
335 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
336 vd
= __arch_get_timens_vdso_data();
338 t
= READ_ONCE(vd
[CS_HRES_COARSE
].basetime
[CLOCK_REALTIME
].sec
);
346 static __maybe_unused __kernel_old_time_t
__cvdso_time(__kernel_old_time_t
*time
)
348 return __cvdso_time_data(__arch_get_vdso_data(), time
);
350 #endif /* VDSO_HAS_TIME */
352 #ifdef VDSO_HAS_CLOCK_GETRES
353 static __maybe_unused
354 int __cvdso_clock_getres_common(const struct vdso_data
*vd
, clockid_t clock
,
355 struct __kernel_timespec
*res
)
360 /* Check for negative values or invalid clocks */
361 if (unlikely((u32
) clock
>= MAX_CLOCKS
))
364 if (IS_ENABLED(CONFIG_TIME_NS
) &&
365 vd
->clock_mode
== VDSO_CLOCKMODE_TIMENS
)
366 vd
= __arch_get_timens_vdso_data();
369 * Convert the clockid to a bitmask and use it to check which
370 * clocks are handled in the VDSO directly.
373 if (msk
& (VDSO_HRES
| VDSO_RAW
)) {
375 * Preserves the behaviour of posix_get_hrtimer_res().
377 ns
= READ_ONCE(vd
[CS_HRES_COARSE
].hrtimer_res
);
378 } else if (msk
& VDSO_COARSE
) {
380 * Preserves the behaviour of posix_get_coarse_res().
394 static __maybe_unused
395 int __cvdso_clock_getres_data(const struct vdso_data
*vd
, clockid_t clock
,
396 struct __kernel_timespec
*res
)
398 int ret
= __cvdso_clock_getres_common(vd
, clock
, res
);
401 return clock_getres_fallback(clock
, res
);
405 static __maybe_unused
406 int __cvdso_clock_getres(clockid_t clock
, struct __kernel_timespec
*res
)
408 return __cvdso_clock_getres_data(__arch_get_vdso_data(), clock
, res
);
412 static __maybe_unused
int
413 __cvdso_clock_getres_time32_data(const struct vdso_data
*vd
, clockid_t clock
,
414 struct old_timespec32
*res
)
416 struct __kernel_timespec ts
;
419 ret
= __cvdso_clock_getres_common(vd
, clock
, &ts
);
422 return clock_getres32_fallback(clock
, res
);
425 res
->tv_sec
= ts
.tv_sec
;
426 res
->tv_nsec
= ts
.tv_nsec
;
431 static __maybe_unused
int
432 __cvdso_clock_getres_time32(clockid_t clock
, struct old_timespec32
*res
)
434 return __cvdso_clock_getres_time32_data(__arch_get_vdso_data(),
437 #endif /* BUILD_VDSO32 */
438 #endif /* VDSO_HAS_CLOCK_GETRES */