1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _ASM_X86_VGTOD_H
3 #define _ASM_X86_VGTOD_H
5 #include <linux/compiler.h>
6 #include <linux/clocksource.h>
9 typedef u64 gtod_long_t
;
11 typedef unsigned long gtod_long_t
;
14 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
15 * so be carefull by modifying this structure.
17 struct vsyscall_gtod_data
{
26 /* open coded 'struct timespec' */
28 gtod_long_t wall_time_sec
;
29 gtod_long_t monotonic_time_sec
;
30 u64 monotonic_time_snsec
;
31 gtod_long_t wall_time_coarse_sec
;
32 gtod_long_t wall_time_coarse_nsec
;
33 gtod_long_t monotonic_time_coarse_sec
;
34 gtod_long_t monotonic_time_coarse_nsec
;
39 extern struct vsyscall_gtod_data vsyscall_gtod_data
;
41 extern int vclocks_used
;
42 static inline bool vclock_was_used(int vclock
)
44 return READ_ONCE(vclocks_used
) & (1 << vclock
);
47 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data
*s
)
52 ret
= READ_ONCE(s
->seq
);
53 if (unlikely(ret
& 1)) {
61 static inline int gtod_read_retry(const struct vsyscall_gtod_data
*s
,
65 return unlikely(s
->seq
!= start
);
68 static inline void gtod_write_begin(struct vsyscall_gtod_data
*s
)
74 static inline void gtod_write_end(struct vsyscall_gtod_data
*s
)
82 #define VGETCPU_CPU_MASK 0xfff
84 static inline unsigned int __getcpu(void)
89 * Load per CPU data from GDT. LSL is faster than RDTSCP and
90 * works on all CPUs. This is volatile so that it orders
91 * correctly wrt barrier() and to keep gcc from cleverly
92 * hoisting it out of the calling function.
94 * If RDPID is available, use it.
96 alternative_io ("lsl %[seg],%[p]",
97 ".byte 0xf3,0x0f,0xc7,0xf8", /* RDPID %eax/rax */
99 [p
] "=a" (p
), [seg
] "r" (__PER_CPU_SEG
));
104 #endif /* CONFIG_X86_64 */
106 #endif /* _ASM_X86_VGTOD_H */