1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
4 #include <linux/compiler.h>
5 #include <linux/hrtimer.h>
6 #include <linux/time.h>
8 #include <asm/barrier.h>
11 #include <asm/unistd.h>
12 #include <asm/vdso_datapage.h>
13 #include <asm/vdso_timer_info.h>
14 #include <asm/asm-offsets.h>
19 extern struct vdso_data
*__get_datapage(void);
20 extern struct vdso_data
*__get_timerpage(void);
22 static notrace
unsigned int __vdso_read_begin(const struct vdso_data
*vdata
)
26 seq
= READ_ONCE(vdata
->seq_count
);
34 static notrace
unsigned int vdso_read_begin(const struct vdso_data
*vdata
)
38 seq
= __vdso_read_begin(vdata
);
40 smp_rmb(); /* Pairs with smp_wmb in vdso_write_end */
44 static notrace
int vdso_read_retry(const struct vdso_data
*vdata
, u32 start
)
46 smp_rmb(); /* Pairs with smp_wmb in vdso_write_begin */
47 return vdata
->seq_count
!= start
;
50 static notrace
long clock_gettime_fallback(clockid_t _clkid
,
51 struct __kernel_old_timespec
*_ts
)
53 register struct __kernel_old_timespec
*ts
asm("$r1") = _ts
;
54 register clockid_t clkid
asm("$r0") = _clkid
;
55 register long ret
asm("$r0");
57 asm volatile ("movi $r15, %3\n"
60 :"r"(clkid
), "r"(ts
), "i"(__NR_clock_gettime
)
66 static notrace
int do_realtime_coarse(struct __kernel_old_timespec
*ts
,
67 struct vdso_data
*vdata
)
72 seq
= vdso_read_begin(vdata
);
74 ts
->tv_sec
= vdata
->xtime_coarse_sec
;
75 ts
->tv_nsec
= vdata
->xtime_coarse_nsec
;
77 } while (vdso_read_retry(vdata
, seq
));
81 static notrace
int do_monotonic_coarse(struct __kernel_old_timespec
*ts
,
82 struct vdso_data
*vdata
)
88 seq
= vdso_read_begin(vdata
);
90 ts
->tv_sec
= vdata
->xtime_coarse_sec
+ vdata
->wtm_clock_sec
;
91 ns
= vdata
->xtime_coarse_nsec
+ vdata
->wtm_clock_nsec
;
93 } while (vdso_read_retry(vdata
, seq
));
95 ts
->tv_sec
+= __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
101 static notrace
inline u64
vgetsns(struct vdso_data
*vdso
)
105 u32
*timer_cycle_base
;
108 (u32
*) ((char *)__get_timerpage() + vdso
->cycle_count_offset
);
109 cycle_now
= readl_relaxed(timer_cycle_base
);
110 if (true == vdso
->cycle_count_down
)
111 cycle_now
= ~(*timer_cycle_base
);
112 cycle_delta
= cycle_now
- (u32
) vdso
->cs_cycle_last
;
113 return ((u64
) cycle_delta
& vdso
->cs_mask
) * vdso
->cs_mult
;
116 static notrace
int do_realtime(struct __kernel_old_timespec
*ts
, struct vdso_data
*vdata
)
121 count
= vdso_read_begin(vdata
);
122 ts
->tv_sec
= vdata
->xtime_clock_sec
;
123 ns
= vdata
->xtime_clock_nsec
;
124 ns
+= vgetsns(vdata
);
125 ns
>>= vdata
->cs_shift
;
126 } while (vdso_read_retry(vdata
, count
));
128 ts
->tv_sec
+= __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
134 static notrace
int do_monotonic(struct __kernel_old_timespec
*ts
, struct vdso_data
*vdata
)
140 seq
= vdso_read_begin(vdata
);
142 ts
->tv_sec
= vdata
->xtime_clock_sec
;
143 ns
= vdata
->xtime_clock_nsec
;
144 ns
+= vgetsns(vdata
);
145 ns
>>= vdata
->cs_shift
;
147 ts
->tv_sec
+= vdata
->wtm_clock_sec
;
148 ns
+= vdata
->wtm_clock_nsec
;
150 } while (vdso_read_retry(vdata
, seq
));
152 ts
->tv_sec
+= __iter_div_u64_rem(ns
, NSEC_PER_SEC
, &ns
);
158 notrace
int __vdso_clock_gettime(clockid_t clkid
, struct __kernel_old_timespec
*ts
)
160 struct vdso_data
*vdata
;
163 vdata
= __get_datapage();
164 if (vdata
->cycle_count_offset
== EMPTY_REG_OFFSET
)
165 return clock_gettime_fallback(clkid
, ts
);
168 case CLOCK_REALTIME_COARSE
:
169 ret
= do_realtime_coarse(ts
, vdata
);
171 case CLOCK_MONOTONIC_COARSE
:
172 ret
= do_monotonic_coarse(ts
, vdata
);
175 ret
= do_realtime(ts
, vdata
);
177 case CLOCK_MONOTONIC
:
178 ret
= do_monotonic(ts
, vdata
);
185 ret
= clock_gettime_fallback(clkid
, ts
);
190 static notrace
int clock_getres_fallback(clockid_t _clk_id
,
191 struct __kernel_old_timespec
*_res
)
193 register clockid_t clk_id
asm("$r0") = _clk_id
;
194 register struct __kernel_old_timespec
*res
asm("$r1") = _res
;
195 register int ret
asm("$r0");
197 asm volatile ("movi $r15, %3\n"
200 :"r"(clk_id
), "r"(res
), "i"(__NR_clock_getres
)
206 notrace
int __vdso_clock_getres(clockid_t clk_id
, struct __kernel_old_timespec
*res
)
208 struct vdso_data
*vdata
= __get_datapage();
214 case CLOCK_MONOTONIC
:
215 case CLOCK_MONOTONIC_RAW
:
217 res
->tv_nsec
= vdata
->hrtimer_res
;
219 case CLOCK_REALTIME_COARSE
:
220 case CLOCK_MONOTONIC_COARSE
:
222 res
->tv_nsec
= CLOCK_COARSE_RES
;
225 return clock_getres_fallback(clk_id
, res
);
230 static notrace
inline int gettimeofday_fallback(struct __kernel_old_timeval
*_tv
,
231 struct timezone
*_tz
)
233 register struct __kernel_old_timeval
*tv
asm("$r0") = _tv
;
234 register struct timezone
*tz
asm("$r1") = _tz
;
235 register int ret
asm("$r0");
237 asm volatile ("movi $r15, %3\n"
240 :"r"(tv
), "r"(tz
), "i"(__NR_gettimeofday
)
246 notrace
int __vdso_gettimeofday(struct __kernel_old_timeval
*tv
, struct timezone
*tz
)
248 struct __kernel_old_timespec ts
;
249 struct vdso_data
*vdata
;
252 vdata
= __get_datapage();
254 if (vdata
->cycle_count_offset
== EMPTY_REG_OFFSET
)
255 return gettimeofday_fallback(tv
, tz
);
257 ret
= do_realtime(&ts
, vdata
);
260 tv
->tv_sec
= ts
.tv_sec
;
261 tv
->tv_usec
= ts
.tv_nsec
/ 1000;
264 tz
->tz_minuteswest
= vdata
->tz_minuteswest
;
265 tz
->tz_dsttime
= vdata
->tz_dsttime
;