2 * Copyright 2006 Andi Kleen, SUSE Labs.
3 * Subject to the GNU Public License, v.2
5 * Fast user context implementation of clock_gettime, gettimeofday, and time.
7 * The code should have no internal unresolved relocations.
8 * Check with readelf after changing.
9 * Also alternative() doesn't work.
12 /* Disable profiling for userspace code: */
13 #define DISABLE_BRANCH_PROFILING
15 #include <linux/kernel.h>
16 #include <linux/posix-timers.h>
17 #include <linux/time.h>
18 #include <linux/string.h>
19 #include <asm/vsyscall.h>
20 #include <asm/vgtod.h>
21 #include <asm/timex.h>
23 #include <asm/unistd.h>
26 #define gtod (&VVAR(vsyscall_gtod_data))
28 notrace
static long vdso_fallback_gettime(long clock
, struct timespec
*ts
)
31 asm("syscall" : "=a" (ret
) :
32 "0" (__NR_clock_gettime
),"D" (clock
), "S" (ts
) : "memory");
36 notrace
static inline long vgetns(void)
39 cycles_t (*vread
)(void);
40 vread
= gtod
->clock
.vread
;
41 v
= (vread() - gtod
->clock
.cycle_last
) & gtod
->clock
.mask
;
42 return (v
* gtod
->clock
.mult
) >> gtod
->clock
.shift
;
45 notrace
static noinline
int do_realtime(struct timespec
*ts
)
47 unsigned long seq
, ns
;
49 seq
= read_seqbegin(>od
->lock
);
50 ts
->tv_sec
= gtod
->wall_time_sec
;
51 ts
->tv_nsec
= gtod
->wall_time_nsec
;
53 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
54 timespec_add_ns(ts
, ns
);
58 notrace
static noinline
int do_monotonic(struct timespec
*ts
)
60 unsigned long seq
, ns
, secs
;
62 seq
= read_seqbegin(>od
->lock
);
63 secs
= gtod
->wall_time_sec
;
64 ns
= gtod
->wall_time_nsec
+ vgetns();
65 secs
+= gtod
->wall_to_monotonic
.tv_sec
;
66 ns
+= gtod
->wall_to_monotonic
.tv_nsec
;
67 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
69 /* wall_time_nsec, vgetns(), and wall_to_monotonic.tv_nsec
70 * are all guaranteed to be nonnegative.
72 while (ns
>= NSEC_PER_SEC
) {
82 notrace
static noinline
int do_realtime_coarse(struct timespec
*ts
)
86 seq
= read_seqbegin(>od
->lock
);
87 ts
->tv_sec
= gtod
->wall_time_coarse
.tv_sec
;
88 ts
->tv_nsec
= gtod
->wall_time_coarse
.tv_nsec
;
89 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
93 notrace
static noinline
int do_monotonic_coarse(struct timespec
*ts
)
95 unsigned long seq
, ns
, secs
;
97 seq
= read_seqbegin(>od
->lock
);
98 secs
= gtod
->wall_time_coarse
.tv_sec
;
99 ns
= gtod
->wall_time_coarse
.tv_nsec
;
100 secs
+= gtod
->wall_to_monotonic
.tv_sec
;
101 ns
+= gtod
->wall_to_monotonic
.tv_nsec
;
102 } while (unlikely(read_seqretry(>od
->lock
, seq
)));
104 /* wall_time_nsec and wall_to_monotonic.tv_nsec are
105 * guaranteed to be between 0 and NSEC_PER_SEC.
107 if (ns
>= NSEC_PER_SEC
) {
117 notrace
int __vdso_clock_gettime(clockid_t clock
, struct timespec
*ts
)
119 if (likely(gtod
->sysctl_enabled
))
122 if (likely(gtod
->clock
.vread
))
123 return do_realtime(ts
);
125 case CLOCK_MONOTONIC
:
126 if (likely(gtod
->clock
.vread
))
127 return do_monotonic(ts
);
129 case CLOCK_REALTIME_COARSE
:
130 return do_realtime_coarse(ts
);
131 case CLOCK_MONOTONIC_COARSE
:
132 return do_monotonic_coarse(ts
);
134 return vdso_fallback_gettime(clock
, ts
);
136 int clock_gettime(clockid_t
, struct timespec
*)
137 __attribute__((weak
, alias("__vdso_clock_gettime")));
139 notrace
int __vdso_gettimeofday(struct timeval
*tv
, struct timezone
*tz
)
142 if (likely(gtod
->sysctl_enabled
&& gtod
->clock
.vread
)) {
143 if (likely(tv
!= NULL
)) {
144 BUILD_BUG_ON(offsetof(struct timeval
, tv_usec
) !=
145 offsetof(struct timespec
, tv_nsec
) ||
146 sizeof(*tv
) != sizeof(struct timespec
));
147 do_realtime((struct timespec
*)tv
);
150 if (unlikely(tz
!= NULL
)) {
151 /* Avoid memcpy. Some old compilers fail to inline it */
152 tz
->tz_minuteswest
= gtod
->sys_tz
.tz_minuteswest
;
153 tz
->tz_dsttime
= gtod
->sys_tz
.tz_dsttime
;
157 asm("syscall" : "=a" (ret
) :
158 "0" (__NR_gettimeofday
), "D" (tv
), "S" (tz
) : "memory");
161 int gettimeofday(struct timeval
*, struct timezone
*)
162 __attribute__((weak
, alias("__vdso_gettimeofday")));
164 /* This will break when the xtime seconds get inaccurate, but that is
167 static __always_inline
long time_syscall(long *t
)
170 asm volatile("syscall"
172 : "0" (__NR_time
), "D" (t
) : "cc", "r11", "cx", "memory");
176 notrace
time_t __vdso_time(time_t *t
)
180 if (unlikely(!VVAR(vsyscall_gtod_data
).sysctl_enabled
))
181 return time_syscall(t
);
183 /* This is atomic on x86_64 so we don't need any locks. */
184 result
= ACCESS_ONCE(VVAR(vsyscall_gtod_data
).wall_time_sec
);
191 __attribute__((weak
, alias("__vdso_time")));