1 #ifndef _ASM_X86_VGTOD_H
2 #define _ASM_X86_VGTOD_H
4 #include <linux/compiler.h>
5 #include <linux/clocksource.h>
8 typedef u64 gtod_long_t
;
10 typedef unsigned long gtod_long_t
;
13 * vsyscall_gtod_data will be accessed by 32 and 64 bit code at the same time
14 * so be carefull by modifying this structure.
16 struct vsyscall_gtod_data
{
25 /* open coded 'struct timespec' */
27 gtod_long_t wall_time_sec
;
28 gtod_long_t monotonic_time_sec
;
29 u64 monotonic_time_snsec
;
30 gtod_long_t wall_time_coarse_sec
;
31 gtod_long_t wall_time_coarse_nsec
;
32 gtod_long_t monotonic_time_coarse_sec
;
33 gtod_long_t monotonic_time_coarse_nsec
;
38 extern struct vsyscall_gtod_data vsyscall_gtod_data
;
40 static inline unsigned gtod_read_begin(const struct vsyscall_gtod_data
*s
)
45 ret
= ACCESS_ONCE(s
->seq
);
46 if (unlikely(ret
& 1)) {
54 static inline int gtod_read_retry(const struct vsyscall_gtod_data
*s
,
58 return unlikely(s
->seq
!= start
);
61 static inline void gtod_write_begin(struct vsyscall_gtod_data
*s
)
67 static inline void gtod_write_end(struct vsyscall_gtod_data
*s
)
75 #define VGETCPU_CPU_MASK 0xfff
77 static inline unsigned int __getcpu(void)
82 * Load per CPU data from GDT. LSL is faster than RDTSCP and
83 * works on all CPUs. This is volatile so that it orders
84 * correctly wrt barrier() and to keep gcc from cleverly
85 * hoisting it out of the calling function.
87 asm volatile ("lsl %1,%0" : "=r" (p
) : "r" (__PER_CPU_SEG
));
92 #endif /* CONFIG_X86_64 */
94 #endif /* _ASM_X86_VGTOD_H */