1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VGTOD_H
3 #define _ASM_X86_VGTOD_H
5 #include <linux/compiler.h>
6 #include <linux/clocksource.h>
8 #include <uapi/linux/time.h>
10 #ifdef BUILD_VDSO32_64
11 typedef u64 gtod_long_t
;
13 typedef unsigned long gtod_long_t
;
17 * There is one of these objects in the vvar page for each
18 * vDSO-accelerated clockid. For high-resolution clocks, this encodes
19 * the time corresponding to vsyscall_gtod_data.cycle_last. For coarse
20 * clocks, this encodes the actual time.
22 * To confuse the reader, for high-resolution clocks, nsec is left-shifted
23 * by vsyscall_gtod_data.shift.
30 #define VGTOD_BASES (CLOCK_TAI + 1)
31 #define VGTOD_HRES (BIT(CLOCK_REALTIME) | BIT(CLOCK_MONOTONIC) | BIT(CLOCK_TAI))
32 #define VGTOD_COARSE (BIT(CLOCK_REALTIME_COARSE) | BIT(CLOCK_MONOTONIC_COARSE))
35 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
36 * so be carefull by modifying this structure.
38 struct vsyscall_gtod_data
{
47 struct vgtod_ts basetime
[VGTOD_BASES
];
52 extern struct vsyscall_gtod_data vsyscall_gtod_data
;
54 extern int vclocks_used
;
55 static inline bool vclock_was_used(int vclock
)
57 return READ_ONCE(vclocks_used
) & (1 << vclock
);
60 static inline unsigned int gtod_read_begin(const struct vsyscall_gtod_data
*s
)
65 ret
= READ_ONCE(s
->seq
);
66 if (unlikely(ret
& 1)) {
74 static inline int gtod_read_retry(const struct vsyscall_gtod_data
*s
,
78 return unlikely(s
->seq
!= start
);
81 static inline void gtod_write_begin(struct vsyscall_gtod_data
*s
)
87 static inline void gtod_write_end(struct vsyscall_gtod_data
*s
)
93 #endif /* _ASM_X86_VGTOD_H */