1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2021, Red Hat, Inc.
5 * Tests for Hyper-V features enablement
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
11 #include "test_util.h"
13 #include "processor.h"
17 * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18 * but to activate the feature it is sufficient to set it to a non-zero
19 * value. Use BIT(0) for that.
21 #define HV_PV_SPINLOCKS_TEST \
22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
37 static bool is_write_only_msr(uint32_t msr
)
39 return msr
== HV_X64_MSR_EOI
;
42 static void guest_msr(struct msr_data
*msr
)
47 GUEST_ASSERT(msr
->idx
);
50 vector
= wrmsr_safe(msr
->idx
, msr
->write_val
);
52 if (!vector
&& (!msr
->write
|| !is_write_only_msr(msr
->idx
)))
53 vector
= rdmsr_safe(msr
->idx
, &msr_val
);
55 if (msr
->fault_expected
)
56 __GUEST_ASSERT(vector
== GP_VECTOR
,
57 "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
58 msr
->write
? "WR" : "RD", msr
->idx
, vector
);
60 __GUEST_ASSERT(!vector
,
61 "Expected success on %sMSR(0x%x), got vector '0x%x'",
62 msr
->write
? "WR" : "RD", msr
->idx
, vector
);
64 if (vector
|| is_write_only_msr(msr
->idx
))
68 __GUEST_ASSERT(!vector
,
69 "WRMSR(0x%x) to '0x%lx', RDMSR read '0x%lx'",
70 msr
->idx
, msr
->write_val
, msr_val
);
72 /* Invariant TSC bit appears when TSC invariant control MSR is written to */
73 if (msr
->idx
== HV_X64_MSR_TSC_INVARIANT_CONTROL
) {
74 if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT
))
75 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC
));
77 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC
) ==
78 !!(msr_val
& HV_INVARIANT_TSC_EXPOSED
));
85 static void guest_hcall(vm_vaddr_t pgs_gpa
, struct hcall_data
*hcall
)
87 u64 res
, input
, output
;
90 GUEST_ASSERT_NE(hcall
->control
, 0);
92 wrmsr(HV_X64_MSR_GUEST_OS_ID
, HYPERV_LINUX_OS_ID
);
93 wrmsr(HV_X64_MSR_HYPERCALL
, pgs_gpa
);
95 if (!(hcall
->control
& HV_HYPERCALL_FAST_BIT
)) {
97 output
= pgs_gpa
+ 4096;
102 vector
= __hyperv_hypercall(hcall
->control
, input
, output
, &res
);
103 if (hcall
->ud_expected
) {
104 __GUEST_ASSERT(vector
== UD_VECTOR
,
105 "Expected #UD for control '%lu', got vector '0x%x'",
106 hcall
->control
, vector
);
108 __GUEST_ASSERT(!vector
,
109 "Expected no exception for control '%lu', got vector '0x%x'",
110 hcall
->control
, vector
);
111 GUEST_ASSERT_EQ(res
, hcall
->expect
);
117 static void vcpu_reset_hv_cpuid(struct kvm_vcpu
*vcpu
)
120 * Enable all supported Hyper-V features, then clear the leafs holding
121 * the features that will be tested one by one.
123 vcpu_set_hv_cpuid(vcpu
);
125 vcpu_clear_cpuid_entry(vcpu
, HYPERV_CPUID_FEATURES
);
126 vcpu_clear_cpuid_entry(vcpu
, HYPERV_CPUID_ENLIGHTMENT_INFO
);
127 vcpu_clear_cpuid_entry(vcpu
, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES
);
130 static void guest_test_msrs_access(void)
132 struct kvm_cpuid2
*prev_cpuid
= NULL
;
133 struct kvm_vcpu
*vcpu
;
138 struct msr_data
*msr
;
139 bool has_invtsc
= kvm_cpu_has(X86_FEATURE_INVTSC
);
142 vm
= vm_create_with_one_vcpu(&vcpu
, guest_msr
);
144 msr_gva
= vm_vaddr_alloc_page(vm
);
145 memset(addr_gva2hva(vm
, msr_gva
), 0x0, getpagesize());
146 msr
= addr_gva2hva(vm
, msr_gva
);
148 vcpu_args_set(vcpu
, 1, msr_gva
);
149 vcpu_enable_cap(vcpu
, KVM_CAP_HYPERV_ENFORCE_CPUID
, 1);
152 vcpu_reset_hv_cpuid(vcpu
);
154 prev_cpuid
= allocate_kvm_cpuid2(vcpu
->cpuid
->nent
);
156 vcpu_init_cpuid(vcpu
, prev_cpuid
);
159 /* TODO: Make this entire test easier to maintain. */
161 vcpu_enable_cap(vcpu
, KVM_CAP_HYPERV_SYNIC2
, 0);
166 * Only available when Hyper-V identification is set
168 msr
->idx
= HV_X64_MSR_GUEST_OS_ID
;
170 msr
->fault_expected
= true;
173 msr
->idx
= HV_X64_MSR_HYPERCALL
;
175 msr
->fault_expected
= true;
178 vcpu_set_cpuid_feature(vcpu
, HV_MSR_HYPERCALL_AVAILABLE
);
180 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
181 * HV_X64_MSR_HYPERCALL available.
183 msr
->idx
= HV_X64_MSR_GUEST_OS_ID
;
185 msr
->write_val
= HYPERV_LINUX_OS_ID
;
186 msr
->fault_expected
= false;
189 msr
->idx
= HV_X64_MSR_GUEST_OS_ID
;
191 msr
->fault_expected
= false;
194 msr
->idx
= HV_X64_MSR_HYPERCALL
;
196 msr
->fault_expected
= false;
200 msr
->idx
= HV_X64_MSR_VP_RUNTIME
;
202 msr
->fault_expected
= true;
205 vcpu_set_cpuid_feature(vcpu
, HV_MSR_VP_RUNTIME_AVAILABLE
);
206 msr
->idx
= HV_X64_MSR_VP_RUNTIME
;
208 msr
->fault_expected
= false;
212 msr
->idx
= HV_X64_MSR_VP_RUNTIME
;
215 msr
->fault_expected
= true;
219 msr
->idx
= HV_X64_MSR_TIME_REF_COUNT
;
221 msr
->fault_expected
= true;
224 vcpu_set_cpuid_feature(vcpu
, HV_MSR_TIME_REF_COUNT_AVAILABLE
);
225 msr
->idx
= HV_X64_MSR_TIME_REF_COUNT
;
227 msr
->fault_expected
= false;
231 msr
->idx
= HV_X64_MSR_TIME_REF_COUNT
;
234 msr
->fault_expected
= true;
238 msr
->idx
= HV_X64_MSR_VP_INDEX
;
240 msr
->fault_expected
= true;
243 vcpu_set_cpuid_feature(vcpu
, HV_MSR_VP_INDEX_AVAILABLE
);
244 msr
->idx
= HV_X64_MSR_VP_INDEX
;
246 msr
->fault_expected
= false;
250 msr
->idx
= HV_X64_MSR_VP_INDEX
;
253 msr
->fault_expected
= true;
257 msr
->idx
= HV_X64_MSR_RESET
;
259 msr
->fault_expected
= true;
262 vcpu_set_cpuid_feature(vcpu
, HV_MSR_RESET_AVAILABLE
);
263 msr
->idx
= HV_X64_MSR_RESET
;
265 msr
->fault_expected
= false;
268 msr
->idx
= HV_X64_MSR_RESET
;
271 * TODO: the test only writes '0' to HV_X64_MSR_RESET
272 * at the moment, writing some other value there will
273 * trigger real vCPU reset and the code is not prepared
277 msr
->fault_expected
= false;
281 msr
->idx
= HV_X64_MSR_REFERENCE_TSC
;
283 msr
->fault_expected
= true;
286 vcpu_set_cpuid_feature(vcpu
, HV_MSR_REFERENCE_TSC_AVAILABLE
);
287 msr
->idx
= HV_X64_MSR_REFERENCE_TSC
;
289 msr
->fault_expected
= false;
292 msr
->idx
= HV_X64_MSR_REFERENCE_TSC
;
295 msr
->fault_expected
= false;
299 msr
->idx
= HV_X64_MSR_EOM
;
301 msr
->fault_expected
= true;
305 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
306 * capability enabled and guest visible CPUID bit unset.
308 msr
->idx
= HV_X64_MSR_EOM
;
310 msr
->fault_expected
= true;
313 vcpu_set_cpuid_feature(vcpu
, HV_MSR_SYNIC_AVAILABLE
);
314 msr
->idx
= HV_X64_MSR_EOM
;
316 msr
->fault_expected
= false;
319 msr
->idx
= HV_X64_MSR_EOM
;
322 msr
->fault_expected
= false;
326 msr
->idx
= HV_X64_MSR_STIMER0_CONFIG
;
328 msr
->fault_expected
= true;
331 vcpu_set_cpuid_feature(vcpu
, HV_MSR_SYNTIMER_AVAILABLE
);
332 msr
->idx
= HV_X64_MSR_STIMER0_CONFIG
;
334 msr
->fault_expected
= false;
337 msr
->idx
= HV_X64_MSR_STIMER0_CONFIG
;
340 msr
->fault_expected
= false;
343 /* Direct mode test */
344 msr
->idx
= HV_X64_MSR_STIMER0_CONFIG
;
346 msr
->write_val
= 1 << 12;
347 msr
->fault_expected
= true;
350 vcpu_set_cpuid_feature(vcpu
, HV_STIMER_DIRECT_MODE_AVAILABLE
);
351 msr
->idx
= HV_X64_MSR_STIMER0_CONFIG
;
353 msr
->write_val
= 1 << 12;
354 msr
->fault_expected
= false;
358 msr
->idx
= HV_X64_MSR_EOI
;
360 msr
->fault_expected
= true;
363 vcpu_set_cpuid_feature(vcpu
, HV_MSR_APIC_ACCESS_AVAILABLE
);
364 msr
->idx
= HV_X64_MSR_EOI
;
367 msr
->fault_expected
= false;
371 msr
->idx
= HV_X64_MSR_TSC_FREQUENCY
;
373 msr
->fault_expected
= true;
376 vcpu_set_cpuid_feature(vcpu
, HV_ACCESS_FREQUENCY_MSRS
);
377 msr
->idx
= HV_X64_MSR_TSC_FREQUENCY
;
379 msr
->fault_expected
= false;
383 msr
->idx
= HV_X64_MSR_TSC_FREQUENCY
;
386 msr
->fault_expected
= true;
390 msr
->idx
= HV_X64_MSR_REENLIGHTENMENT_CONTROL
;
392 msr
->fault_expected
= true;
395 vcpu_set_cpuid_feature(vcpu
, HV_ACCESS_REENLIGHTENMENT
);
396 msr
->idx
= HV_X64_MSR_REENLIGHTENMENT_CONTROL
;
398 msr
->fault_expected
= false;
401 msr
->idx
= HV_X64_MSR_REENLIGHTENMENT_CONTROL
;
404 msr
->fault_expected
= false;
407 /* Can only write '0' */
408 msr
->idx
= HV_X64_MSR_TSC_EMULATION_STATUS
;
411 msr
->fault_expected
= true;
415 msr
->idx
= HV_X64_MSR_CRASH_P0
;
417 msr
->fault_expected
= true;
420 vcpu_set_cpuid_feature(vcpu
, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE
);
421 msr
->idx
= HV_X64_MSR_CRASH_P0
;
423 msr
->fault_expected
= false;
426 msr
->idx
= HV_X64_MSR_CRASH_P0
;
429 msr
->fault_expected
= false;
433 msr
->idx
= HV_X64_MSR_SYNDBG_STATUS
;
435 msr
->fault_expected
= true;
438 vcpu_set_cpuid_feature(vcpu
, HV_FEATURE_DEBUG_MSRS_AVAILABLE
);
439 vcpu_set_cpuid_feature(vcpu
, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING
);
440 msr
->idx
= HV_X64_MSR_SYNDBG_STATUS
;
442 msr
->fault_expected
= false;
445 msr
->idx
= HV_X64_MSR_SYNDBG_STATUS
;
448 msr
->fault_expected
= false;
452 /* MSR is not available when CPUID feature bit is unset */
455 msr
->idx
= HV_X64_MSR_TSC_INVARIANT_CONTROL
;
457 msr
->fault_expected
= true;
460 /* MSR is vailable when CPUID feature bit is set */
463 vcpu_set_cpuid_feature(vcpu
, HV_ACCESS_TSC_INVARIANT
);
464 msr
->idx
= HV_X64_MSR_TSC_INVARIANT_CONTROL
;
466 msr
->fault_expected
= false;
469 /* Writing bits other than 0 is forbidden */
472 msr
->idx
= HV_X64_MSR_TSC_INVARIANT_CONTROL
;
474 msr
->write_val
= 0xdeadbeef;
475 msr
->fault_expected
= true;
478 /* Setting bit 0 enables the feature */
481 msr
->idx
= HV_X64_MSR_TSC_INVARIANT_CONTROL
;
484 msr
->fault_expected
= false;
492 vcpu_set_cpuid(vcpu
);
494 memcpy(prev_cpuid
, vcpu
->cpuid
, kvm_cpuid2_size(vcpu
->cpuid
->nent
));
496 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage
,
497 msr
->idx
, msr
->write
? "write" : "read");
500 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
502 switch (get_ucall(vcpu
, &uc
)) {
504 REPORT_GUEST_ASSERT(uc
);
509 TEST_FAIL("Unhandled ucall: %ld", uc
.cmd
);
519 static void guest_test_hcalls_access(void)
521 struct kvm_cpuid2
*prev_cpuid
= NULL
;
522 struct kvm_vcpu
*vcpu
;
526 vm_vaddr_t hcall_page
, hcall_params
;
527 struct hcall_data
*hcall
;
530 vm
= vm_create_with_one_vcpu(&vcpu
, guest_hcall
);
532 /* Hypercall input/output */
533 hcall_page
= vm_vaddr_alloc_pages(vm
, 2);
534 memset(addr_gva2hva(vm
, hcall_page
), 0x0, 2 * getpagesize());
536 hcall_params
= vm_vaddr_alloc_page(vm
);
537 memset(addr_gva2hva(vm
, hcall_params
), 0x0, getpagesize());
538 hcall
= addr_gva2hva(vm
, hcall_params
);
540 vcpu_args_set(vcpu
, 2, addr_gva2gpa(vm
, hcall_page
), hcall_params
);
541 vcpu_enable_cap(vcpu
, KVM_CAP_HYPERV_ENFORCE_CPUID
, 1);
544 vcpu_reset_hv_cpuid(vcpu
);
546 prev_cpuid
= allocate_kvm_cpuid2(vcpu
->cpuid
->nent
);
548 vcpu_init_cpuid(vcpu
, prev_cpuid
);
553 vcpu_set_cpuid_feature(vcpu
, HV_MSR_HYPERCALL_AVAILABLE
);
554 hcall
->control
= 0xbeef;
555 hcall
->expect
= HV_STATUS_INVALID_HYPERCALL_CODE
;
559 hcall
->control
= HVCALL_POST_MESSAGE
;
560 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
563 vcpu_set_cpuid_feature(vcpu
, HV_POST_MESSAGES
);
564 hcall
->control
= HVCALL_POST_MESSAGE
;
565 hcall
->expect
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
569 hcall
->control
= HVCALL_SIGNAL_EVENT
;
570 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
573 vcpu_set_cpuid_feature(vcpu
, HV_SIGNAL_EVENTS
);
574 hcall
->control
= HVCALL_SIGNAL_EVENT
;
575 hcall
->expect
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
579 hcall
->control
= HVCALL_RESET_DEBUG_SESSION
;
580 hcall
->expect
= HV_STATUS_INVALID_HYPERCALL_CODE
;
583 vcpu_set_cpuid_feature(vcpu
, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING
);
584 hcall
->control
= HVCALL_RESET_DEBUG_SESSION
;
585 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
588 vcpu_set_cpuid_feature(vcpu
, HV_DEBUGGING
);
589 hcall
->control
= HVCALL_RESET_DEBUG_SESSION
;
590 hcall
->expect
= HV_STATUS_OPERATION_DENIED
;
594 hcall
->control
= HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
;
595 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
598 vcpu_set_cpuid_feature(vcpu
, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED
);
599 hcall
->control
= HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
;
600 hcall
->expect
= HV_STATUS_SUCCESS
;
603 hcall
->control
= HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
;
604 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
607 vcpu_set_cpuid_feature(vcpu
, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED
);
608 hcall
->control
= HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
;
609 hcall
->expect
= HV_STATUS_SUCCESS
;
613 hcall
->control
= HVCALL_SEND_IPI
;
614 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
617 vcpu_set_cpuid_feature(vcpu
, HV_X64_CLUSTER_IPI_RECOMMENDED
);
618 hcall
->control
= HVCALL_SEND_IPI
;
619 hcall
->expect
= HV_STATUS_INVALID_HYPERCALL_INPUT
;
622 /* Nothing in 'sparse banks' -> success */
623 hcall
->control
= HVCALL_SEND_IPI_EX
;
624 hcall
->expect
= HV_STATUS_SUCCESS
;
628 hcall
->control
= HVCALL_NOTIFY_LONG_SPIN_WAIT
;
629 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
632 vcpu_set_cpuid_feature(vcpu
, HV_PV_SPINLOCKS_TEST
);
633 hcall
->control
= HVCALL_NOTIFY_LONG_SPIN_WAIT
;
634 hcall
->expect
= HV_STATUS_SUCCESS
;
637 /* XMM fast hypercall */
638 hcall
->control
= HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
| HV_HYPERCALL_FAST_BIT
;
639 hcall
->ud_expected
= true;
642 vcpu_set_cpuid_feature(vcpu
, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE
);
643 hcall
->control
= HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
| HV_HYPERCALL_FAST_BIT
;
644 hcall
->ud_expected
= false;
645 hcall
->expect
= HV_STATUS_SUCCESS
;
648 hcall
->control
= HV_EXT_CALL_QUERY_CAPABILITIES
;
649 hcall
->expect
= HV_STATUS_ACCESS_DENIED
;
652 vcpu_set_cpuid_feature(vcpu
, HV_ENABLE_EXTENDED_HYPERCALLS
);
653 hcall
->control
= HV_EXT_CALL_QUERY_CAPABILITIES
| HV_HYPERCALL_FAST_BIT
;
654 hcall
->expect
= HV_STATUS_INVALID_PARAMETER
;
661 vcpu_set_cpuid(vcpu
);
663 memcpy(prev_cpuid
, vcpu
->cpuid
, kvm_cpuid2_size(vcpu
->cpuid
->nent
));
665 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage
, hcall
->control
);
668 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
670 switch (get_ucall(vcpu
, &uc
)) {
672 REPORT_GUEST_ASSERT(uc
);
677 TEST_FAIL("Unhandled ucall: %ld", uc
.cmd
);
688 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENFORCE_CPUID
));
690 pr_info("Testing access to Hyper-V specific MSRs\n");
691 guest_test_msrs_access();
693 pr_info("Testing access to Hyper-V hypercalls\n");
694 guest_test_hcalls_access();