2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
8 * sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
10 * The code should have no internal unresolved relocations.
11 * Check with readelf after changing.
14 #include <uapi/linux/time.h>
15 #include <asm/vgtod.h>
17 #include <asm/unistd.h>
19 #include <asm/pvclock.h>
20 #include <asm/mshyperv.h>
21 #include <linux/math64.h>
22 #include <linux/time.h>
23 #include <linux/kernel.h>
25 #define gtod (&VVAR(vsyscall_gtod_data))
27 extern int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
);
28 extern int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
);
29 extern time_t __vdso_time(time_t *t
);
31 #ifdef CONFIG_PARAVIRT_CLOCK
32 extern u8 pvclock_page
33 __attribute__((visibility("hidden")));
36 #ifdef CONFIG_HYPERV_TSCPAGE
37 extern u8 hvclock_page
38 __attribute__((visibility("hidden")));
43 notrace
static long vdso_fallback_gettime(long clock
, struct timespec
*ts
)
46 asm ("syscall" : "=a" (ret
), "=m" (*ts
) :
47 "0" (__NR_clock_gettime
), "D" (clock
), "S" (ts
) :
48 "memory", "rcx", "r11");
52 notrace
static long vdso_fallback_gtod(struct timeval
*tv
, struct timezone
*tz
)
56 asm ("syscall" : "=a" (ret
), "=m" (*tv
), "=m" (*tz
) :
57 "0" (__NR_gettimeofday
), "D" (tv
), "S" (tz
) :
58 "memory", "rcx", "r11");
65 notrace
static long vdso_fallback_gettime(long clock
, struct timespec
*ts
)
71 "mov %[clock], %%ebx \n"
72 "call __kernel_vsyscall \n"
74 : "=a" (ret
), "=m" (*ts
)
75 : "0" (__NR_clock_gettime
), [clock
] "g" (clock
), "c" (ts
)
80 notrace
static long vdso_fallback_gtod(struct timeval
*tv
, struct timezone
*tz
)
87 "call __kernel_vsyscall \n"
89 : "=a" (ret
), "=m" (*tv
), "=m" (*tz
)
90 : "0" (__NR_gettimeofday
), [tv
] "g" (tv
), "c" (tz
)
97 #ifdef CONFIG_PARAVIRT_CLOCK
98 static notrace
const struct pvclock_vsyscall_time_info
*get_pvti0(void)
100 return (const struct pvclock_vsyscall_time_info
*)&pvclock_page
;
103 static notrace u64
vread_pvclock(int *mode
)
105 const struct pvclock_vcpu_time_info
*pvti
= &get_pvti0()->pvti
;
111 * Note: The kernel and hypervisor must guarantee that cpu ID
112 * number maps 1:1 to per-CPU pvclock time info.
114 * Because the hypervisor is entirely unaware of guest userspace
115 * preemption, it cannot guarantee that per-CPU pvclock time
116 * info is updated if the underlying CPU changes or that that
117 * version is increased whenever underlying CPU changes.
119 * On KVM, we are guaranteed that pvti updates for any vCPU are
120 * atomic as seen by *all* vCPUs. This is an even stronger
121 * guarantee than we get with a normal seqlock.
123 * On Xen, we don't appear to have that guarantee, but Xen still
124 * supplies a valid seqlock using the version field.
126 * We only do pvclock vdso timing at all if
127 * PVCLOCK_TSC_STABLE_BIT is set, and we interpret that bit to
128 * mean that all vCPUs have matching pvti and that the TSC is
129 * synced, so we can just look at vCPU 0's pvti.
133 version
= pvclock_read_begin(pvti
);
135 if (unlikely(!(pvti
->flags
& PVCLOCK_TSC_STABLE_BIT
))) {
140 ret
= __pvclock_read_cycles(pvti
, rdtsc_ordered());
141 } while (pvclock_read_retry(pvti
, version
));
143 /* refer to vread_tsc() comment for rationale */
144 last
= gtod
->cycle_last
;
146 if (likely(ret
>= last
))
152 #ifdef CONFIG_HYPERV_TSCPAGE
153 static notrace u64
vread_hvclock(int *mode
)
155 const struct ms_hyperv_tsc_page
*tsc_pg
=
156 (const struct ms_hyperv_tsc_page
*)&hvclock_page
;
157 u64 current_tick
= hv_read_tsc_page(tsc_pg
);
159 if (current_tick
!= U64_MAX
)
167 notrace
static u64
vread_tsc(void)
169 u64 ret
= (u64
)rdtsc_ordered();
170 u64 last
= gtod
->cycle_last
;
172 if (likely(ret
>= last
))
176 * GCC likes to generate cmov here, but this branch is extremely
177 * predictable (it's just a function of time and the likely is
178 * very likely) and there's a data dependence, so force GCC
179 * to generate a branch instead. I don't barrier() because
180 * we don't actually need a barrier, and if this function
181 * ever gets inlined it will generate worse code.
187 notrace
static inline u64
vgetsns(int *mode
)
192 if (gtod
->vclock_mode
== VCLOCK_TSC
)
193 cycles
= vread_tsc();
194 #ifdef CONFIG_PARAVIRT_CLOCK
195 else if (gtod
->vclock_mode
== VCLOCK_PVCLOCK
)
196 cycles
= vread_pvclock(mode
);
198 #ifdef CONFIG_HYPERV_TSCPAGE
199 else if (gtod
->vclock_mode
== VCLOCK_HVCLOCK
)
200 cycles
= vread_hvclock(mode
);
204 v
= (cycles
- gtod
->cycle_last
) & gtod
->mask
;
205 return v
* gtod
->mult
;
208 /* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
209 notrace
static int __always_inline
do_realtime(struct timespec
*ts
)
216 seq
= gtod_read_begin(gtod
);
217 mode
= gtod
->vclock_mode
;
218 ts
->tv_sec
= gtod
->wall_time_sec
;
219 ns
= gtod
->wall_time_snsec
;
220 ns
+= vgetsns(&mode
);
222 } while (unlikely(gtod_read_retry(gtod
, seq
)));
224 ts
->tv_sec
+= __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
230 notrace
static int __always_inline
do_monotonic(struct timespec
*ts
)
237 seq
= gtod_read_begin(gtod
);
238 mode
= gtod
->vclock_mode
;
239 ts
->tv_sec
= gtod
->monotonic_time_sec
;
240 ns
= gtod
->monotonic_time_snsec
;
241 ns
+= vgetsns(&mode
);
243 } while (unlikely(gtod_read_retry(gtod
, seq
)));
245 ts
->tv_sec
+= __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
251 notrace
static void do_realtime_coarse(struct timespec
*ts
)
255 seq
= gtod_read_begin(gtod
);
256 ts
->tv_sec
= gtod
->wall_time_coarse_sec
;
257 ts
->tv_nsec
= gtod
->wall_time_coarse_nsec
;
258 } while (unlikely(gtod_read_retry(gtod
, seq
)));
261 notrace
static void do_monotonic_coarse(struct timespec
*ts
)
265 seq
= gtod_read_begin(gtod
);
266 ts
->tv_sec
= gtod
->monotonic_time_coarse_sec
;
267 ts
->tv_nsec
= gtod
->monotonic_time_coarse_nsec
;
268 } while (unlikely(gtod_read_retry(gtod
, seq
)));
271 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
275 if (do_realtime(ts
) == VCLOCK_NONE
)
278 case CLOCK_MONOTONIC
:
279 if (do_monotonic(ts
) == VCLOCK_NONE
)
282 case CLOCK_REALTIME_COARSE
:
283 do_realtime_coarse(ts
);
285 case CLOCK_MONOTONIC_COARSE
:
286 do_monotonic_coarse(ts
);
294 return vdso_fallback_gettime(clock
, ts
);
296 int clock_gettime(clockid_t
, struct timespec
*)
297 __attribute__((weak
, alias("__vdso_clock_gettime")));
299 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
301 if (likely(tv
!= NULL
)) {
302 if (unlikely(do_realtime((struct timespec
*)tv
) == VCLOCK_NONE
))
303 return vdso_fallback_gtod(tv
, tz
);
306 if (unlikely(tz
!= NULL
)) {
307 tz
->tz_minuteswest
= gtod
->tz_minuteswest
;
308 tz
->tz_dsttime
= gtod
->tz_dsttime
;
313 int gettimeofday(struct timeval
*, struct timezone
*)
314 __attribute__((weak
, alias("__vdso_gettimeofday")));
317 * This will break when the xtime seconds get inaccurate, but that is
320 notrace
time_t __vdso_time(time_t *t
)
322 /* This is atomic on x86 so we don't need any locks. */
323 time_t result
= ACCESS_ONCE(gtod
->wall_time_sec
);
330 __attribute__((weak
, alias("__vdso_time")));