2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
11 /* Disable profiling for userspace code: */
12 #define DISABLE_BRANCH_PROFILING
14 #include <linux/kernel.h>
15 #include <linux/posix-timers.h>
16 #include <linux/time.h>
17 #include <linux/string.h>
18 #include <asm/vsyscall.h>
19 #include <asm/fixmap.h>
20 #include <asm/vgtod.h>
21 #include <asm/timex.h>
23 #include <asm/unistd.h>
26 #define gtod (&VVAR(vsyscall_gtod_data))
28 notrace
static cycle_t
vread_tsc(void)
34 * Empirically, a fence (of type that depends on the CPU)
35 * before rdtsc is enough to ensure that rdtsc is ordered
36 * with respect to loads. The various CPU manuals are unclear
37 * as to whether rdtsc can be reordered with later loads,
38 * but no one has ever seen it happen.
41 ret
= (cycle_t
)vget_cycles();
43 last
= VVAR(vsyscall_gtod_data
).clock
.cycle_last
;
45 if (likely(ret
>= last
))
49 * GCC likes to generate cmov here, but this branch is extremely
50 * predictable (it's just a funciton of time and the likely is
51 * very likely) and there's a data dependence, so force GCC
52 * to generate a branch instead. I don't barrier() because
53 * we don't actually need a barrier, and if this function
54 * ever gets inlined it will generate worse code.
60 static notrace cycle_t
vread_hpet(void)
62 return readl((const void __iomem
*)fix_to_virt(VSYSCALL_HPET
) + 0xf0);
65 notrace
static long vdso_fallback_gettime(long clock
, struct timespec
*ts
)
68 asm("syscall" : "=a" (ret
) :
69 "0" (__NR_clock_gettime
),"D" (clock
), "S" (ts
) : "memory");
73 notrace
static inline long vgetns(void)
77 if (gtod
->clock
.vclock_mode
== VCLOCK_TSC
)
80 cycles
= vread_hpet();
81 v
= (cycles
- gtod
->clock
.cycle_last
) & gtod
->clock
.mask
;
82 return (v
* gtod
->clock
.mult
) >> gtod
->clock
.shift
;
85 notrace
static noinline
int do_realtime(struct timespec
*ts
)
87 unsigned long seq
, ns
;
89 seq
= read_seqbegin(>od
->lock
);
90 ts
->tv_sec
= gtod
->wall_time_sec
;
91 ts
->tv_nsec
= gtod
->wall_time_nsec
;
93 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
94 timespec_add_ns(ts
, ns
);
98 notrace
static noinline
int do_monotonic(struct timespec
*ts
)
100 unsigned long seq
, ns
, secs
;
102 seq
= read_seqbegin(>od
->lock
);
103 secs
= gtod
->wall_time_sec
;
104 ns
= gtod
->wall_time_nsec
+ vgetns();
105 secs
+= gtod
->wall_to_monotonic
.tv_sec
;
106 ns
+= gtod
->wall_to_monotonic
.tv_nsec
;
107 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
109 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
110 * are all guaranteed to be nonnegative.
112 while (ns
>= NSEC_PER_SEC
) {
122 notrace
static noinline
int do_realtime_coarse(struct timespec
*ts
)
126 seq
= read_seqbegin(>od
->lock
);
127 ts
->tv_sec
= gtod
->wall_time_coarse
.tv_sec
;
128 ts
->tv_nsec
= gtod
->wall_time_coarse
.tv_nsec
;
129 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
133 notrace
static noinline
int do_monotonic_coarse(struct timespec
*ts
)
135 unsigned long seq
, ns
, secs
;
137 seq
= read_seqbegin(>od
->lock
);
138 secs
= gtod
->wall_time_coarse
.tv_sec
;
139 ns
= gtod
->wall_time_coarse
.tv_nsec
;
140 secs
+= gtod
->wall_to_monotonic
.tv_sec
;
141 ns
+= gtod
->wall_to_monotonic
.tv_nsec
;
142 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
144 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
145 * guaranteed to be between 0 and NSEC_PER_SEC.
147 if (ns
>= NSEC_PER_SEC
) {
157 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
161 if (likely(gtod
->clock
.vclock_mode
!= VCLOCK_NONE
))
162 return do_realtime(ts
);
164 case CLOCK_MONOTONIC
:
165 if (likely(gtod
->clock
.vclock_mode
!= VCLOCK_NONE
))
166 return do_monotonic(ts
);
168 case CLOCK_REALTIME_COARSE
:
169 return do_realtime_coarse(ts
);
170 case CLOCK_MONOTONIC_COARSE
:
171 return do_monotonic_coarse(ts
);
174 return vdso_fallback_gettime(clock
, ts
);
176 int clock_gettime(clockid_t
, struct timespec
*)
177 __attribute__((weak
, alias("__vdso_clock_gettime")));
179 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
182 if (likely(gtod
->clock
.vclock_mode
!= VCLOCK_NONE
)) {
183 if (likely(tv
!= NULL
)) {
184 BUILD_BUG_ON(offsetof(struct timeval
, tv_usec
) !=
185 offsetof(struct timespec
, tv_nsec
) ||
186 sizeof(*tv
) != sizeof(struct timespec
));
187 do_realtime((struct timespec
*)tv
);
190 if (unlikely(tz
!= NULL
)) {
191 /* Avoid memcpy. Some old compilers fail to inline it */
192 tz
->tz_minuteswest
= gtod
->sys_tz
.tz_minuteswest
;
193 tz
->tz_dsttime
= gtod
->sys_tz
.tz_dsttime
;
197 asm("syscall" : "=a" (ret
) :
198 "0" (__NR_gettimeofday
), "D" (tv
), "S" (tz
) : "memory");
201 int gettimeofday(struct timeval
*, struct timezone
*)
202 __attribute__((weak
, alias("__vdso_gettimeofday")));
205 * This will break when the xtime seconds get inaccurate, but that is
208 notrace
time_t __vdso_time(time_t *t
)
210 /* This is atomic on x86_64 so we don't need any locks. */
211 time_t result
= ACCESS_ONCE(VVAR(vsyscall_gtod_data
).wall_time_sec
);
218 __attribute__((weak
, alias("__vdso_time")));