1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Userland implementation of clock_gettime() for 64 bits processes in a
4 * s390 kernel for use in the vDSO
6 * Copyright IBM Corp. 2008
7 * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
10 #include <asm/asm-offsets.h>
11 #include <asm/unistd.h>
12 #include <asm/dwarf.h>
16 .globl __kernel_clock_gettime
17 .type __kernel_clock_gettime,@function
18 __kernel_clock_gettime:
21 CFI_DEF_CFA_OFFSET 176
22 CFI_VAL_OFFSET 15, -160
24 cghi %r2,__CLOCK_REALTIME_COARSE
26 cghi %r2,__CLOCK_REALTIME
28 cghi %r2,-3 /* Per-thread CPUCLOCK with PID=0, VIRT=1 */
30 cghi %r2,__CLOCK_MONOTONIC_COARSE
32 cghi %r2,__CLOCK_MONOTONIC
36 0: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
37 tmll %r4,0x0001 /* pending update ? loop */
39 stcke 0(%r15) /* Store TOD clock */
40 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
41 lg %r0,__VDSO_WTOM_SEC(%r5)
43 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
44 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
45 alg %r1,__VDSO_WTOM_NSEC(%r5)
46 srlg %r1,%r1,0(%r2) /* >> tk->shift */
47 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
55 2: stg %r0,0(%r3) /* store tp->tv_sec */
56 stg %r1,8(%r3) /* store tp->tv_nsec */
59 CFI_DEF_CFA_OFFSET 160
63 /* CLOCK_MONOTONIC_COARSE */
64 CFI_DEF_CFA_OFFSET 176
65 CFI_VAL_OFFSET 15, -160
66 3: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
67 tmll %r4,0x0001 /* pending update ? loop */
69 lg %r0,__VDSO_WTOM_CRS_SEC(%r5)
70 lg %r1,__VDSO_WTOM_CRS_NSEC(%r5)
71 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
75 /* CLOCK_REALTIME_COARSE */
76 4: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
77 tmll %r4,0x0001 /* pending update ? loop */
79 lg %r0,__VDSO_XTIME_CRS_SEC(%r5)
80 lg %r1,__VDSO_XTIME_CRS_NSEC(%r5)
81 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
86 5: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
87 tmll %r4,0x0001 /* pending update ? loop */
89 stcke 0(%r15) /* Store TOD clock */
91 lg %r0,__VDSO_TS_END(%r5) /* TOD steering end time */
92 slgr %r0,%r1 /* now - ts_steering_end */
93 ltgr %r0,%r0 /* past end of steering ? */
95 srlg %r0,%r0,15 /* 1 per 2^16 */
96 tm __VDSO_TS_DIR+3(%r5),0x01 /* steering direction? */
98 lcgr %r0,%r0 /* negative TOD offset */
99 18: algr %r1,%r0 /* add steering offset */
100 17: lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
101 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
102 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
103 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
104 srlg %r1,%r1,0(%r2) /* >> tk->shift */
105 lg %r0,__VDSO_XTIME_SEC(%r5) /* tk->xtime_sec */
106 clg %r4,__VDSO_UPD_COUNT(%r5) /* check update counter */
114 7: stg %r0,0(%r3) /* store tp->tv_sec */
115 stg %r1,8(%r3) /* store tp->tv_nsec */
118 CFI_DEF_CFA_OFFSET 160
122 /* CPUCLOCK_VIRT for this thread */
123 CFI_DEF_CFA_OFFSET 176
124 CFI_VAL_OFFSET 15, -160
126 icm %r0,15,__VDSO_ECTG_OK(%r5)
128 sacf 256 /* Magic ectg instruction */
129 .insn ssf,0xc80100000000,__VDSO_ECTG_BASE(4),__VDSO_ECTG_USER(4),4
131 algr %r1,%r0 /* r1 = cputime as TOD value */
132 mghi %r1,1000 /* convert to nanoseconds */
133 srlg %r1,%r1,12 /* r1 = cputime in nanosec */
136 srlg %r1,%r1,9 /* divide by 1000000000 */
138 srlg %r0,%r0,11 /* r0 = tv_sec */
140 msg %r0,0(%r5) /* calculate tv_nsec */
141 slgr %r4,%r0 /* r4 = tv_nsec */
145 CFI_DEF_CFA_OFFSET 160
149 /* Fallback to system call */
150 CFI_DEF_CFA_OFFSET 176
151 CFI_VAL_OFFSET 15, -160
152 12: lghi %r1,__NR_clock_gettime
155 CFI_DEF_CFA_OFFSET 160
161 14: .quad 19342813113834067
162 .size __kernel_clock_gettime,.-__kernel_clock_gettime