treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / lib / vdso / gettimeofday.c
blobf8b8ec5e63aca5e5ffcbf2de5c902c0080cf84b0
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Generic userspace implementations of gettimeofday() and similar.
4 */
5 #include <linux/compiler.h>
6 #include <linux/math64.h>
7 #include <linux/time.h>
8 #include <linux/kernel.h>
9 #include <linux/hrtimer_defs.h>
10 #include <vdso/datapage.h>
11 #include <vdso/helpers.h>
14 * The generic vDSO implementation requires that gettimeofday.h
15 * provides:
16 * - __arch_get_vdso_data(): to get the vdso datapage.
17 * - __arch_get_hw_counter(): to get the hw counter based on the
18 * clock_mode.
19 * - gettimeofday_fallback(): fallback for gettimeofday.
20 * - clock_gettime_fallback(): fallback for clock_gettime.
21 * - clock_getres_fallback(): fallback for clock_getres.
23 #ifdef ENABLE_COMPAT_VDSO
24 #include <asm/vdso/compat_gettimeofday.h>
25 #else
26 #include <asm/vdso/gettimeofday.h>
27 #endif /* ENABLE_COMPAT_VDSO */
29 #ifndef vdso_calc_delta
31 * Default implementation which works for all sane clocksources. That
32 * obviously excludes x86/TSC.
34 static __always_inline
35 u64 vdso_calc_delta(u64 cycles, u64 last, u64 mask, u32 mult)
37 return ((cycles - last) & mask) * mult;
39 #endif
41 #ifdef CONFIG_TIME_NS
42 static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
43 struct __kernel_timespec *ts)
45 const struct vdso_data *vd = __arch_get_timens_vdso_data();
46 const struct timens_offset *offs = &vdns->offset[clk];
47 const struct vdso_timestamp *vdso_ts;
48 u64 cycles, last, ns;
49 u32 seq;
50 s64 sec;
52 if (clk != CLOCK_MONOTONIC_RAW)
53 vd = &vd[CS_HRES_COARSE];
54 else
55 vd = &vd[CS_RAW];
56 vdso_ts = &vd->basetime[clk];
58 do {
59 seq = vdso_read_begin(vd);
60 cycles = __arch_get_hw_counter(vd->clock_mode);
61 ns = vdso_ts->nsec;
62 last = vd->cycle_last;
63 if (unlikely((s64)cycles < 0))
64 return -1;
66 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
67 ns >>= vd->shift;
68 sec = vdso_ts->sec;
69 } while (unlikely(vdso_read_retry(vd, seq)));
71 /* Add the namespace offset */
72 sec += offs->sec;
73 ns += offs->nsec;
76 * Do this outside the loop: a race inside the loop could result
77 * in __iter_div_u64_rem() being extremely slow.
79 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
80 ts->tv_nsec = ns;
82 return 0;
84 #else
85 static __always_inline const struct vdso_data *__arch_get_timens_vdso_data(void)
87 return NULL;
90 static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
91 struct __kernel_timespec *ts)
93 return -EINVAL;
95 #endif
97 static __always_inline int do_hres(const struct vdso_data *vd, clockid_t clk,
98 struct __kernel_timespec *ts)
100 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
101 u64 cycles, last, sec, ns;
102 u32 seq;
104 do {
106 * Open coded to handle VCLOCK_TIMENS. Time namespace
107 * enabled tasks have a special VVAR page installed which
108 * has vd->seq set to 1 and vd->clock_mode set to
109 * VCLOCK_TIMENS. For non time namespace affected tasks
110 * this does not affect performance because if vd->seq is
111 * odd, i.e. a concurrent update is in progress the extra
112 * check for vd->clock_mode is just a few extra
113 * instructions while spin waiting for vd->seq to become
114 * even again.
116 while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
117 if (IS_ENABLED(CONFIG_TIME_NS) &&
118 vd->clock_mode == VCLOCK_TIMENS)
119 return do_hres_timens(vd, clk, ts);
120 cpu_relax();
122 smp_rmb();
124 cycles = __arch_get_hw_counter(vd->clock_mode);
125 ns = vdso_ts->nsec;
126 last = vd->cycle_last;
127 if (unlikely((s64)cycles < 0))
128 return -1;
130 ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
131 ns >>= vd->shift;
132 sec = vdso_ts->sec;
133 } while (unlikely(vdso_read_retry(vd, seq)));
136 * Do this outside the loop: a race inside the loop could result
137 * in __iter_div_u64_rem() being extremely slow.
139 ts->tv_sec = sec + __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
140 ts->tv_nsec = ns;
142 return 0;
145 #ifdef CONFIG_TIME_NS
146 static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
147 struct __kernel_timespec *ts)
149 const struct vdso_data *vd = __arch_get_timens_vdso_data();
150 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
151 const struct timens_offset *offs = &vdns->offset[clk];
152 u64 nsec;
153 s64 sec;
154 s32 seq;
156 do {
157 seq = vdso_read_begin(vd);
158 sec = vdso_ts->sec;
159 nsec = vdso_ts->nsec;
160 } while (unlikely(vdso_read_retry(vd, seq)));
162 /* Add the namespace offset */
163 sec += offs->sec;
164 nsec += offs->nsec;
167 * Do this outside the loop: a race inside the loop could result
168 * in __iter_div_u64_rem() being extremely slow.
170 ts->tv_sec = sec + __iter_div_u64_rem(nsec, NSEC_PER_SEC, &nsec);
171 ts->tv_nsec = nsec;
172 return 0;
174 #else
175 static int do_coarse_timens(const struct vdso_data *vdns, clockid_t clk,
176 struct __kernel_timespec *ts)
178 return -1;
180 #endif
182 static __always_inline int do_coarse(const struct vdso_data *vd, clockid_t clk,
183 struct __kernel_timespec *ts)
185 const struct vdso_timestamp *vdso_ts = &vd->basetime[clk];
186 u32 seq;
188 do {
190 * Open coded to handle VCLOCK_TIMENS. See comment in
191 * do_hres().
193 while ((seq = READ_ONCE(vd->seq)) & 1) {
194 if (IS_ENABLED(CONFIG_TIME_NS) &&
195 vd->clock_mode == VCLOCK_TIMENS)
196 return do_coarse_timens(vd, clk, ts);
197 cpu_relax();
199 smp_rmb();
201 ts->tv_sec = vdso_ts->sec;
202 ts->tv_nsec = vdso_ts->nsec;
203 } while (unlikely(vdso_read_retry(vd, seq)));
205 return 0;
208 static __maybe_unused int
209 __cvdso_clock_gettime_common(clockid_t clock, struct __kernel_timespec *ts)
211 const struct vdso_data *vd = __arch_get_vdso_data();
212 u32 msk;
214 /* Check for negative values or invalid clocks */
215 if (unlikely((u32) clock >= MAX_CLOCKS))
216 return -1;
219 * Convert the clockid to a bitmask and use it to check which
220 * clocks are handled in the VDSO directly.
222 msk = 1U << clock;
223 if (likely(msk & VDSO_HRES))
224 vd = &vd[CS_HRES_COARSE];
225 else if (msk & VDSO_COARSE)
226 return do_coarse(&vd[CS_HRES_COARSE], clock, ts);
227 else if (msk & VDSO_RAW)
228 vd = &vd[CS_RAW];
229 else
230 return -1;
232 return do_hres(vd, clock, ts);
235 static __maybe_unused int
236 __cvdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
238 int ret = __cvdso_clock_gettime_common(clock, ts);
240 if (unlikely(ret))
241 return clock_gettime_fallback(clock, ts);
242 return 0;
245 #ifdef BUILD_VDSO32
246 static __maybe_unused int
247 __cvdso_clock_gettime32(clockid_t clock, struct old_timespec32 *res)
249 struct __kernel_timespec ts;
250 int ret;
252 ret = __cvdso_clock_gettime_common(clock, &ts);
254 if (unlikely(ret))
255 return clock_gettime32_fallback(clock, res);
257 /* For ret == 0 */
258 res->tv_sec = ts.tv_sec;
259 res->tv_nsec = ts.tv_nsec;
261 return ret;
263 #endif /* BUILD_VDSO32 */
265 static __maybe_unused int
266 __cvdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
268 const struct vdso_data *vd = __arch_get_vdso_data();
270 if (likely(tv != NULL)) {
271 struct __kernel_timespec ts;
273 if (do_hres(&vd[CS_HRES_COARSE], CLOCK_REALTIME, &ts))
274 return gettimeofday_fallback(tv, tz);
276 tv->tv_sec = ts.tv_sec;
277 tv->tv_usec = (u32)ts.tv_nsec / NSEC_PER_USEC;
280 if (unlikely(tz != NULL)) {
281 if (IS_ENABLED(CONFIG_TIME_NS) &&
282 vd->clock_mode == VCLOCK_TIMENS)
283 vd = __arch_get_timens_vdso_data();
285 tz->tz_minuteswest = vd[CS_HRES_COARSE].tz_minuteswest;
286 tz->tz_dsttime = vd[CS_HRES_COARSE].tz_dsttime;
289 return 0;
292 #ifdef VDSO_HAS_TIME
293 static __maybe_unused __kernel_old_time_t __cvdso_time(__kernel_old_time_t *time)
295 const struct vdso_data *vd = __arch_get_vdso_data();
296 __kernel_old_time_t t;
298 if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
299 vd = __arch_get_timens_vdso_data();
301 t = READ_ONCE(vd[CS_HRES_COARSE].basetime[CLOCK_REALTIME].sec);
303 if (time)
304 *time = t;
306 return t;
308 #endif /* VDSO_HAS_TIME */
310 #ifdef VDSO_HAS_CLOCK_GETRES
311 static __maybe_unused
312 int __cvdso_clock_getres_common(clockid_t clock, struct __kernel_timespec *res)
314 const struct vdso_data *vd = __arch_get_vdso_data();
315 u32 msk;
316 u64 ns;
318 /* Check for negative values or invalid clocks */
319 if (unlikely((u32) clock >= MAX_CLOCKS))
320 return -1;
322 if (IS_ENABLED(CONFIG_TIME_NS) && vd->clock_mode == VCLOCK_TIMENS)
323 vd = __arch_get_timens_vdso_data();
326 * Convert the clockid to a bitmask and use it to check which
327 * clocks are handled in the VDSO directly.
329 msk = 1U << clock;
330 if (msk & (VDSO_HRES | VDSO_RAW)) {
332 * Preserves the behaviour of posix_get_hrtimer_res().
334 ns = READ_ONCE(vd[CS_HRES_COARSE].hrtimer_res);
335 } else if (msk & VDSO_COARSE) {
337 * Preserves the behaviour of posix_get_coarse_res().
339 ns = LOW_RES_NSEC;
340 } else {
341 return -1;
344 if (likely(res)) {
345 res->tv_sec = 0;
346 res->tv_nsec = ns;
348 return 0;
351 static __maybe_unused
352 int __cvdso_clock_getres(clockid_t clock, struct __kernel_timespec *res)
354 int ret = __cvdso_clock_getres_common(clock, res);
356 if (unlikely(ret))
357 return clock_getres_fallback(clock, res);
358 return 0;
361 #ifdef BUILD_VDSO32
362 static __maybe_unused int
363 __cvdso_clock_getres_time32(clockid_t clock, struct old_timespec32 *res)
365 struct __kernel_timespec ts;
366 int ret;
368 ret = __cvdso_clock_getres_common(clock, &ts);
370 if (unlikely(ret))
371 return clock_getres32_fallback(clock, res);
373 if (likely(res)) {
374 res->tv_sec = ts.tv_sec;
375 res->tv_nsec = ts.tv_nsec;
377 return ret;
379 #endif /* BUILD_VDSO32 */
380 #endif /* VDSO_HAS_CLOCK_GETRES */