ARM: 7409/1: Do not call flush_cache_user_range with mmap_sem held
[linux/fpc-iii.git] / arch / x86 / kernel / vread_tsc_64.c
bloba81aa9e9894c161fb08938802af73bec9108c645
1 /* This code runs in userspace. */
3 #define DISABLE_BRANCH_PROFILING
4 #include <asm/vgtod.h>
6 notrace cycle_t __vsyscall_fn vread_tsc(void)
8 cycle_t ret;
9 u64 last;
12 * Empirically, a fence (of type that depends on the CPU)
13 * before rdtsc is enough to ensure that rdtsc is ordered
14 * with respect to loads. The various CPU manuals are unclear
15 * as to whether rdtsc can be reordered with later loads,
16 * but no one has ever seen it happen.
18 rdtsc_barrier();
19 ret = (cycle_t)vget_cycles();
21 last = VVAR(vsyscall_gtod_data).clock.cycle_last;
23 if (likely(ret >= last))
24 return ret;
27 * GCC likes to generate cmov here, but this branch is extremely
28 * predictable (it's just a funciton of time and the likely is
29 * very likely) and there's a data dependence, so force GCC
30 * to generate a branch instead. I don't barrier() because
31 * we don't actually need a barrier, and if this function
32 * ever gets inlined it will generate worse code.
34 asm volatile ("");
35 return last;