1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021, Red Hat, Inc.
5 * Tests for Hyper-V clocksources
12 struct ms_hyperv_tsc_page
{
13 volatile u32 tsc_sequence
;
15 volatile u64 tsc_scale
;
16 volatile s64 tsc_offset
;
19 /* Simplified mul_u64_u64_shr() */
20 static inline u64
mul_u64_u64_shr64(u64 a
, u64 b
)
33 rm
.ll
= (u64
)a0
.l
.low
* b0
.l
.high
;
34 rn
.ll
= (u64
)a0
.l
.high
* b0
.l
.low
;
35 rh
.ll
= (u64
)a0
.l
.high
* b0
.l
.high
;
37 rh
.l
.low
= c
= rm
.l
.high
+ rn
.l
.high
+ rh
.l
.low
;
38 rh
.l
.high
= (c
>> 32) + rh
.l
.high
;
43 static inline void nop_loop(void)
47 for (i
= 0; i
< 100000000; i
++)
51 static inline void check_tsc_msr_rdtsc(void)
53 u64 tsc_freq
, r1
, r2
, t1
, t2
;
56 tsc_freq
= rdmsr(HV_X64_MSR_TSC_FREQUENCY
);
57 GUEST_ASSERT(tsc_freq
> 0);
59 /* For increased accuracy, take mean rdtsc() before and afrer rdmsr() */
61 t1
= rdmsr(HV_X64_MSR_TIME_REF_COUNT
);
62 r1
= (r1
+ rdtsc()) / 2;
65 t2
= rdmsr(HV_X64_MSR_TIME_REF_COUNT
);
66 r2
= (r2
+ rdtsc()) / 2;
68 GUEST_ASSERT(r2
> r1
&& t2
> t1
);
70 /* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
71 delta_ns
= ((t2
- t1
) * 100) - ((r2
- r1
) * 1000000000 / tsc_freq
);
76 GUEST_ASSERT(delta_ns
* 100 < (t2
- t1
) * 100);
79 static inline u64
get_tscpage_ts(struct ms_hyperv_tsc_page
*tsc_page
)
81 return mul_u64_u64_shr64(rdtsc(), tsc_page
->tsc_scale
) + tsc_page
->tsc_offset
;
84 static inline void check_tsc_msr_tsc_page(struct ms_hyperv_tsc_page
*tsc_page
)
88 /* Compare TSC page clocksource with HV_X64_MSR_TIME_REF_COUNT */
89 t1
= get_tscpage_ts(tsc_page
);
90 r1
= rdmsr(HV_X64_MSR_TIME_REF_COUNT
);
93 GUEST_ASSERT(r1
>= t1
&& r1
- t1
< 100000);
96 t2
= get_tscpage_ts(tsc_page
);
97 r2
= rdmsr(HV_X64_MSR_TIME_REF_COUNT
);
98 GUEST_ASSERT(r2
>= t1
&& r2
- t2
< 100000);
101 static void guest_main(struct ms_hyperv_tsc_page
*tsc_page
, vm_paddr_t tsc_page_gpa
)
103 u64 tsc_scale
, tsc_offset
;
105 /* Set Guest OS id to enable Hyper-V emulation */
107 wrmsr(HV_X64_MSR_GUEST_OS_ID
, HYPERV_LINUX_OS_ID
);
110 check_tsc_msr_rdtsc();
114 /* Set up TSC page is disabled state, check that it's clean */
115 wrmsr(HV_X64_MSR_REFERENCE_TSC
, tsc_page_gpa
);
116 GUEST_ASSERT(tsc_page
->tsc_sequence
== 0);
117 GUEST_ASSERT(tsc_page
->tsc_scale
== 0);
118 GUEST_ASSERT(tsc_page
->tsc_offset
== 0);
122 /* Set up TSC page is enabled state */
123 wrmsr(HV_X64_MSR_REFERENCE_TSC
, tsc_page_gpa
| 0x1);
124 GUEST_ASSERT(tsc_page
->tsc_sequence
!= 0);
128 check_tsc_msr_tsc_page(tsc_page
);
132 tsc_offset
= tsc_page
->tsc_offset
;
133 /* Call KVM_SET_CLOCK from userspace, check that TSC page was updated */
136 /* Sanity check TSC page timestamp, it should be close to 0 */
137 GUEST_ASSERT(get_tscpage_ts(tsc_page
) < 100000);
139 GUEST_ASSERT(tsc_page
->tsc_offset
!= tsc_offset
);
144 * Enable Re-enlightenment and check that TSC page stays constant across
147 wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0x1 << 16 | 0xff);
148 wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL
, 0x1);
149 tsc_offset
= tsc_page
->tsc_offset
;
150 tsc_scale
= tsc_page
->tsc_scale
;
152 GUEST_ASSERT(tsc_page
->tsc_offset
== tsc_offset
);
153 GUEST_ASSERT(tsc_page
->tsc_scale
== tsc_scale
);
157 check_tsc_msr_tsc_page(tsc_page
);
160 * Disable re-enlightenment and TSC page, check that KVM doesn't update
163 wrmsr(HV_X64_MSR_REENLIGHTENMENT_CONTROL
, 0);
164 wrmsr(HV_X64_MSR_TSC_EMULATION_CONTROL
, 0);
165 wrmsr(HV_X64_MSR_REFERENCE_TSC
, 0);
166 memset(tsc_page
, 0, sizeof(*tsc_page
));
169 GUEST_ASSERT(tsc_page
->tsc_sequence
== 0);
170 GUEST_ASSERT(tsc_page
->tsc_offset
== 0);
171 GUEST_ASSERT(tsc_page
->tsc_scale
== 0);
176 static void host_check_tsc_msr_rdtsc(struct kvm_vcpu
*vcpu
)
178 u64 tsc_freq
, r1
, r2
, t1
, t2
;
181 tsc_freq
= vcpu_get_msr(vcpu
, HV_X64_MSR_TSC_FREQUENCY
);
182 TEST_ASSERT(tsc_freq
> 0, "TSC frequency must be nonzero");
184 /* For increased accuracy, take mean rdtsc() before and afrer ioctl */
186 t1
= vcpu_get_msr(vcpu
, HV_X64_MSR_TIME_REF_COUNT
);
187 r1
= (r1
+ rdtsc()) / 2;
190 t2
= vcpu_get_msr(vcpu
, HV_X64_MSR_TIME_REF_COUNT
);
191 r2
= (r2
+ rdtsc()) / 2;
193 TEST_ASSERT(t2
> t1
, "Time reference MSR is not monotonic (%ld <= %ld)", t1
, t2
);
195 /* HV_X64_MSR_TIME_REF_COUNT is in 100ns */
196 delta_ns
= ((t2
- t1
) * 100) - ((r2
- r1
) * 1000000000 / tsc_freq
);
198 delta_ns
= -delta_ns
;
201 TEST_ASSERT(delta_ns
* 100 < (t2
- t1
) * 100,
202 "Elapsed time does not match (MSR=%ld, TSC=%ld)",
203 (t2
- t1
) * 100, (r2
- r1
) * 1000000000 / tsc_freq
);
208 struct kvm_vcpu
*vcpu
;
211 vm_vaddr_t tsc_page_gva
;
214 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TIME
));
215 TEST_REQUIRE(sys_clocksource_is_based_on_tsc());
217 vm
= vm_create_with_one_vcpu(&vcpu
, guest_main
);
219 vcpu_set_hv_cpuid(vcpu
);
221 tsc_page_gva
= vm_vaddr_alloc_page(vm
);
222 memset(addr_gva2hva(vm
, tsc_page_gva
), 0x0, getpagesize());
223 TEST_ASSERT((addr_gva2gpa(vm
, tsc_page_gva
) & (getpagesize() - 1)) == 0,
224 "TSC page has to be page aligned");
225 vcpu_args_set(vcpu
, 2, tsc_page_gva
, addr_gva2gpa(vm
, tsc_page_gva
));
227 host_check_tsc_msr_rdtsc(vcpu
);
229 for (stage
= 1;; stage
++) {
231 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
233 switch (get_ucall(vcpu
, &uc
)) {
235 REPORT_GUEST_ASSERT(uc
);
240 /* Keep in sync with guest_main() */
241 TEST_ASSERT(stage
== 11, "Testing ended prematurely, stage %d",
245 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
248 TEST_ASSERT(!strcmp((const char *)uc
.args
[0], "hello") &&
250 "Stage %d: Unexpected register values vmexit, got %lx",
251 stage
, (ulong
)uc
.args
[1]);
253 /* Reset kvmclock triggering TSC page update */
254 if (stage
== 7 || stage
== 8 || stage
== 10) {
255 struct kvm_clock_data clock
= {0};
257 vm_ioctl(vm
, KVM_SET_CLOCK
, &clock
);