accel/qaic: Add AIC200 support
[drm/drm-misc.git] / tools / testing / selftests / kvm / x86_64 / hyperv_features.c
blob068e9c69710d2e05f4561ddf4e49efc34e1b6e34
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2021, Red Hat, Inc.
5 * Tests for Hyper-V features enablement
6 */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <stdint.h>
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "processor.h"
14 #include "hyperv.h"
17 * HYPERV_CPUID_ENLIGHTMENT_INFO.EBX is not a 'feature' CPUID leaf
18 * but to activate the feature it is sufficient to set it to a non-zero
19 * value. Use BIT(0) for that.
21 #define HV_PV_SPINLOCKS_TEST \
22 KVM_X86_CPU_FEATURE(HYPERV_CPUID_ENLIGHTMENT_INFO, 0, EBX, 0)
24 struct msr_data {
25 uint32_t idx;
26 bool fault_expected;
27 bool write;
28 u64 write_val;
31 struct hcall_data {
32 uint64_t control;
33 uint64_t expect;
34 bool ud_expected;
37 static bool is_write_only_msr(uint32_t msr)
39 return msr == HV_X64_MSR_EOI;
42 static void guest_msr(struct msr_data *msr)
44 uint8_t vector = 0;
45 uint64_t msr_val = 0;
47 GUEST_ASSERT(msr->idx);
49 if (msr->write)
50 vector = wrmsr_safe(msr->idx, msr->write_val);
52 if (!vector && (!msr->write || !is_write_only_msr(msr->idx)))
53 vector = rdmsr_safe(msr->idx, &msr_val);
55 if (msr->fault_expected)
56 __GUEST_ASSERT(vector == GP_VECTOR,
57 "Expected #GP on %sMSR(0x%x), got vector '0x%x'",
58 msr->write ? "WR" : "RD", msr->idx, vector);
59 else
60 __GUEST_ASSERT(!vector,
61 "Expected success on %sMSR(0x%x), got vector '0x%x'",
62 msr->write ? "WR" : "RD", msr->idx, vector);
64 if (vector || is_write_only_msr(msr->idx))
65 goto done;
67 if (msr->write)
68 __GUEST_ASSERT(!vector,
69 "WRMSR(0x%x) to '0x%lx', RDMSR read '0x%lx'",
70 msr->idx, msr->write_val, msr_val);
72 /* Invariant TSC bit appears when TSC invariant control MSR is written to */
73 if (msr->idx == HV_X64_MSR_TSC_INVARIANT_CONTROL) {
74 if (!this_cpu_has(HV_ACCESS_TSC_INVARIANT))
75 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC));
76 else
77 GUEST_ASSERT(this_cpu_has(X86_FEATURE_INVTSC) ==
78 !!(msr_val & HV_INVARIANT_TSC_EXPOSED));
81 done:
82 GUEST_DONE();
85 static void guest_hcall(vm_vaddr_t pgs_gpa, struct hcall_data *hcall)
87 u64 res, input, output;
88 uint8_t vector;
90 GUEST_ASSERT_NE(hcall->control, 0);
92 wrmsr(HV_X64_MSR_GUEST_OS_ID, HYPERV_LINUX_OS_ID);
93 wrmsr(HV_X64_MSR_HYPERCALL, pgs_gpa);
95 if (!(hcall->control & HV_HYPERCALL_FAST_BIT)) {
96 input = pgs_gpa;
97 output = pgs_gpa + 4096;
98 } else {
99 input = output = 0;
102 vector = __hyperv_hypercall(hcall->control, input, output, &res);
103 if (hcall->ud_expected) {
104 __GUEST_ASSERT(vector == UD_VECTOR,
105 "Expected #UD for control '%lu', got vector '0x%x'",
106 hcall->control, vector);
107 } else {
108 __GUEST_ASSERT(!vector,
109 "Expected no exception for control '%lu', got vector '0x%x'",
110 hcall->control, vector);
111 GUEST_ASSERT_EQ(res, hcall->expect);
114 GUEST_DONE();
117 static void vcpu_reset_hv_cpuid(struct kvm_vcpu *vcpu)
120 * Enable all supported Hyper-V features, then clear the leafs holding
121 * the features that will be tested one by one.
123 vcpu_set_hv_cpuid(vcpu);
125 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_FEATURES);
126 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_ENLIGHTMENT_INFO);
127 vcpu_clear_cpuid_entry(vcpu, HYPERV_CPUID_SYNDBG_PLATFORM_CAPABILITIES);
130 static void guest_test_msrs_access(void)
132 struct kvm_cpuid2 *prev_cpuid = NULL;
133 struct kvm_vcpu *vcpu;
134 struct kvm_vm *vm;
135 struct ucall uc;
136 int stage = 0;
137 vm_vaddr_t msr_gva;
138 struct msr_data *msr;
139 bool has_invtsc = kvm_cpu_has(X86_FEATURE_INVTSC);
141 while (true) {
142 vm = vm_create_with_one_vcpu(&vcpu, guest_msr);
144 msr_gva = vm_vaddr_alloc_page(vm);
145 memset(addr_gva2hva(vm, msr_gva), 0x0, getpagesize());
146 msr = addr_gva2hva(vm, msr_gva);
148 vcpu_args_set(vcpu, 1, msr_gva);
149 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
151 if (!prev_cpuid) {
152 vcpu_reset_hv_cpuid(vcpu);
154 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
155 } else {
156 vcpu_init_cpuid(vcpu, prev_cpuid);
159 /* TODO: Make this entire test easier to maintain. */
160 if (stage >= 21)
161 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_SYNIC2, 0);
163 switch (stage) {
164 case 0:
166 * Only available when Hyper-V identification is set
168 msr->idx = HV_X64_MSR_GUEST_OS_ID;
169 msr->write = false;
170 msr->fault_expected = true;
171 break;
172 case 1:
173 msr->idx = HV_X64_MSR_HYPERCALL;
174 msr->write = false;
175 msr->fault_expected = true;
176 break;
177 case 2:
178 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
180 * HV_X64_MSR_GUEST_OS_ID has to be written first to make
181 * HV_X64_MSR_HYPERCALL available.
183 msr->idx = HV_X64_MSR_GUEST_OS_ID;
184 msr->write = true;
185 msr->write_val = HYPERV_LINUX_OS_ID;
186 msr->fault_expected = false;
187 break;
188 case 3:
189 msr->idx = HV_X64_MSR_GUEST_OS_ID;
190 msr->write = false;
191 msr->fault_expected = false;
192 break;
193 case 4:
194 msr->idx = HV_X64_MSR_HYPERCALL;
195 msr->write = false;
196 msr->fault_expected = false;
197 break;
199 case 5:
200 msr->idx = HV_X64_MSR_VP_RUNTIME;
201 msr->write = false;
202 msr->fault_expected = true;
203 break;
204 case 6:
205 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_RUNTIME_AVAILABLE);
206 msr->idx = HV_X64_MSR_VP_RUNTIME;
207 msr->write = false;
208 msr->fault_expected = false;
209 break;
210 case 7:
211 /* Read only */
212 msr->idx = HV_X64_MSR_VP_RUNTIME;
213 msr->write = true;
214 msr->write_val = 1;
215 msr->fault_expected = true;
216 break;
218 case 8:
219 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
220 msr->write = false;
221 msr->fault_expected = true;
222 break;
223 case 9:
224 vcpu_set_cpuid_feature(vcpu, HV_MSR_TIME_REF_COUNT_AVAILABLE);
225 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
226 msr->write = false;
227 msr->fault_expected = false;
228 break;
229 case 10:
230 /* Read only */
231 msr->idx = HV_X64_MSR_TIME_REF_COUNT;
232 msr->write = true;
233 msr->write_val = 1;
234 msr->fault_expected = true;
235 break;
237 case 11:
238 msr->idx = HV_X64_MSR_VP_INDEX;
239 msr->write = false;
240 msr->fault_expected = true;
241 break;
242 case 12:
243 vcpu_set_cpuid_feature(vcpu, HV_MSR_VP_INDEX_AVAILABLE);
244 msr->idx = HV_X64_MSR_VP_INDEX;
245 msr->write = false;
246 msr->fault_expected = false;
247 break;
248 case 13:
249 /* Read only */
250 msr->idx = HV_X64_MSR_VP_INDEX;
251 msr->write = true;
252 msr->write_val = 1;
253 msr->fault_expected = true;
254 break;
256 case 14:
257 msr->idx = HV_X64_MSR_RESET;
258 msr->write = false;
259 msr->fault_expected = true;
260 break;
261 case 15:
262 vcpu_set_cpuid_feature(vcpu, HV_MSR_RESET_AVAILABLE);
263 msr->idx = HV_X64_MSR_RESET;
264 msr->write = false;
265 msr->fault_expected = false;
266 break;
267 case 16:
268 msr->idx = HV_X64_MSR_RESET;
269 msr->write = true;
271 * TODO: the test only writes '0' to HV_X64_MSR_RESET
272 * at the moment, writing some other value there will
273 * trigger real vCPU reset and the code is not prepared
274 * to handle it yet.
276 msr->write_val = 0;
277 msr->fault_expected = false;
278 break;
280 case 17:
281 msr->idx = HV_X64_MSR_REFERENCE_TSC;
282 msr->write = false;
283 msr->fault_expected = true;
284 break;
285 case 18:
286 vcpu_set_cpuid_feature(vcpu, HV_MSR_REFERENCE_TSC_AVAILABLE);
287 msr->idx = HV_X64_MSR_REFERENCE_TSC;
288 msr->write = false;
289 msr->fault_expected = false;
290 break;
291 case 19:
292 msr->idx = HV_X64_MSR_REFERENCE_TSC;
293 msr->write = true;
294 msr->write_val = 0;
295 msr->fault_expected = false;
296 break;
298 case 20:
299 msr->idx = HV_X64_MSR_EOM;
300 msr->write = false;
301 msr->fault_expected = true;
302 break;
303 case 21:
305 * Remains unavailable even with KVM_CAP_HYPERV_SYNIC2
306 * capability enabled and guest visible CPUID bit unset.
308 msr->idx = HV_X64_MSR_EOM;
309 msr->write = false;
310 msr->fault_expected = true;
311 break;
312 case 22:
313 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNIC_AVAILABLE);
314 msr->idx = HV_X64_MSR_EOM;
315 msr->write = false;
316 msr->fault_expected = false;
317 break;
318 case 23:
319 msr->idx = HV_X64_MSR_EOM;
320 msr->write = true;
321 msr->write_val = 0;
322 msr->fault_expected = false;
323 break;
325 case 24:
326 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
327 msr->write = false;
328 msr->fault_expected = true;
329 break;
330 case 25:
331 vcpu_set_cpuid_feature(vcpu, HV_MSR_SYNTIMER_AVAILABLE);
332 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
333 msr->write = false;
334 msr->fault_expected = false;
335 break;
336 case 26:
337 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
338 msr->write = true;
339 msr->write_val = 0;
340 msr->fault_expected = false;
341 break;
342 case 27:
343 /* Direct mode test */
344 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
345 msr->write = true;
346 msr->write_val = 1 << 12;
347 msr->fault_expected = true;
348 break;
349 case 28:
350 vcpu_set_cpuid_feature(vcpu, HV_STIMER_DIRECT_MODE_AVAILABLE);
351 msr->idx = HV_X64_MSR_STIMER0_CONFIG;
352 msr->write = true;
353 msr->write_val = 1 << 12;
354 msr->fault_expected = false;
355 break;
357 case 29:
358 msr->idx = HV_X64_MSR_EOI;
359 msr->write = false;
360 msr->fault_expected = true;
361 break;
362 case 30:
363 vcpu_set_cpuid_feature(vcpu, HV_MSR_APIC_ACCESS_AVAILABLE);
364 msr->idx = HV_X64_MSR_EOI;
365 msr->write = true;
366 msr->write_val = 1;
367 msr->fault_expected = false;
368 break;
370 case 31:
371 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
372 msr->write = false;
373 msr->fault_expected = true;
374 break;
375 case 32:
376 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_FREQUENCY_MSRS);
377 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
378 msr->write = false;
379 msr->fault_expected = false;
380 break;
381 case 33:
382 /* Read only */
383 msr->idx = HV_X64_MSR_TSC_FREQUENCY;
384 msr->write = true;
385 msr->write_val = 1;
386 msr->fault_expected = true;
387 break;
389 case 34:
390 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
391 msr->write = false;
392 msr->fault_expected = true;
393 break;
394 case 35:
395 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_REENLIGHTENMENT);
396 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
397 msr->write = false;
398 msr->fault_expected = false;
399 break;
400 case 36:
401 msr->idx = HV_X64_MSR_REENLIGHTENMENT_CONTROL;
402 msr->write = true;
403 msr->write_val = 1;
404 msr->fault_expected = false;
405 break;
406 case 37:
407 /* Can only write '0' */
408 msr->idx = HV_X64_MSR_TSC_EMULATION_STATUS;
409 msr->write = true;
410 msr->write_val = 1;
411 msr->fault_expected = true;
412 break;
414 case 38:
415 msr->idx = HV_X64_MSR_CRASH_P0;
416 msr->write = false;
417 msr->fault_expected = true;
418 break;
419 case 39:
420 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_GUEST_CRASH_MSR_AVAILABLE);
421 msr->idx = HV_X64_MSR_CRASH_P0;
422 msr->write = false;
423 msr->fault_expected = false;
424 break;
425 case 40:
426 msr->idx = HV_X64_MSR_CRASH_P0;
427 msr->write = true;
428 msr->write_val = 1;
429 msr->fault_expected = false;
430 break;
432 case 41:
433 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
434 msr->write = false;
435 msr->fault_expected = true;
436 break;
437 case 42:
438 vcpu_set_cpuid_feature(vcpu, HV_FEATURE_DEBUG_MSRS_AVAILABLE);
439 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
440 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
441 msr->write = false;
442 msr->fault_expected = false;
443 break;
444 case 43:
445 msr->idx = HV_X64_MSR_SYNDBG_STATUS;
446 msr->write = true;
447 msr->write_val = 0;
448 msr->fault_expected = false;
449 break;
451 case 44:
452 /* MSR is not available when CPUID feature bit is unset */
453 if (!has_invtsc)
454 goto next_stage;
455 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
456 msr->write = false;
457 msr->fault_expected = true;
458 break;
459 case 45:
460 /* MSR is vailable when CPUID feature bit is set */
461 if (!has_invtsc)
462 goto next_stage;
463 vcpu_set_cpuid_feature(vcpu, HV_ACCESS_TSC_INVARIANT);
464 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
465 msr->write = false;
466 msr->fault_expected = false;
467 break;
468 case 46:
469 /* Writing bits other than 0 is forbidden */
470 if (!has_invtsc)
471 goto next_stage;
472 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
473 msr->write = true;
474 msr->write_val = 0xdeadbeef;
475 msr->fault_expected = true;
476 break;
477 case 47:
478 /* Setting bit 0 enables the feature */
479 if (!has_invtsc)
480 goto next_stage;
481 msr->idx = HV_X64_MSR_TSC_INVARIANT_CONTROL;
482 msr->write = true;
483 msr->write_val = 1;
484 msr->fault_expected = false;
485 break;
487 default:
488 kvm_vm_free(vm);
489 return;
492 vcpu_set_cpuid(vcpu);
494 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
496 pr_debug("Stage %d: testing msr: 0x%x for %s\n", stage,
497 msr->idx, msr->write ? "write" : "read");
499 vcpu_run(vcpu);
500 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
502 switch (get_ucall(vcpu, &uc)) {
503 case UCALL_ABORT:
504 REPORT_GUEST_ASSERT(uc);
505 return;
506 case UCALL_DONE:
507 break;
508 default:
509 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
510 return;
513 next_stage:
514 stage++;
515 kvm_vm_free(vm);
519 static void guest_test_hcalls_access(void)
521 struct kvm_cpuid2 *prev_cpuid = NULL;
522 struct kvm_vcpu *vcpu;
523 struct kvm_vm *vm;
524 struct ucall uc;
525 int stage = 0;
526 vm_vaddr_t hcall_page, hcall_params;
527 struct hcall_data *hcall;
529 while (true) {
530 vm = vm_create_with_one_vcpu(&vcpu, guest_hcall);
532 /* Hypercall input/output */
533 hcall_page = vm_vaddr_alloc_pages(vm, 2);
534 memset(addr_gva2hva(vm, hcall_page), 0x0, 2 * getpagesize());
536 hcall_params = vm_vaddr_alloc_page(vm);
537 memset(addr_gva2hva(vm, hcall_params), 0x0, getpagesize());
538 hcall = addr_gva2hva(vm, hcall_params);
540 vcpu_args_set(vcpu, 2, addr_gva2gpa(vm, hcall_page), hcall_params);
541 vcpu_enable_cap(vcpu, KVM_CAP_HYPERV_ENFORCE_CPUID, 1);
543 if (!prev_cpuid) {
544 vcpu_reset_hv_cpuid(vcpu);
546 prev_cpuid = allocate_kvm_cpuid2(vcpu->cpuid->nent);
547 } else {
548 vcpu_init_cpuid(vcpu, prev_cpuid);
551 switch (stage) {
552 case 0:
553 vcpu_set_cpuid_feature(vcpu, HV_MSR_HYPERCALL_AVAILABLE);
554 hcall->control = 0xbeef;
555 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
556 break;
558 case 1:
559 hcall->control = HVCALL_POST_MESSAGE;
560 hcall->expect = HV_STATUS_ACCESS_DENIED;
561 break;
562 case 2:
563 vcpu_set_cpuid_feature(vcpu, HV_POST_MESSAGES);
564 hcall->control = HVCALL_POST_MESSAGE;
565 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
566 break;
568 case 3:
569 hcall->control = HVCALL_SIGNAL_EVENT;
570 hcall->expect = HV_STATUS_ACCESS_DENIED;
571 break;
572 case 4:
573 vcpu_set_cpuid_feature(vcpu, HV_SIGNAL_EVENTS);
574 hcall->control = HVCALL_SIGNAL_EVENT;
575 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
576 break;
578 case 5:
579 hcall->control = HVCALL_RESET_DEBUG_SESSION;
580 hcall->expect = HV_STATUS_INVALID_HYPERCALL_CODE;
581 break;
582 case 6:
583 vcpu_set_cpuid_feature(vcpu, HV_X64_SYNDBG_CAP_ALLOW_KERNEL_DEBUGGING);
584 hcall->control = HVCALL_RESET_DEBUG_SESSION;
585 hcall->expect = HV_STATUS_ACCESS_DENIED;
586 break;
587 case 7:
588 vcpu_set_cpuid_feature(vcpu, HV_DEBUGGING);
589 hcall->control = HVCALL_RESET_DEBUG_SESSION;
590 hcall->expect = HV_STATUS_OPERATION_DENIED;
591 break;
593 case 8:
594 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
595 hcall->expect = HV_STATUS_ACCESS_DENIED;
596 break;
597 case 9:
598 vcpu_set_cpuid_feature(vcpu, HV_X64_REMOTE_TLB_FLUSH_RECOMMENDED);
599 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE;
600 hcall->expect = HV_STATUS_SUCCESS;
601 break;
602 case 10:
603 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
604 hcall->expect = HV_STATUS_ACCESS_DENIED;
605 break;
606 case 11:
607 vcpu_set_cpuid_feature(vcpu, HV_X64_EX_PROCESSOR_MASKS_RECOMMENDED);
608 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX;
609 hcall->expect = HV_STATUS_SUCCESS;
610 break;
612 case 12:
613 hcall->control = HVCALL_SEND_IPI;
614 hcall->expect = HV_STATUS_ACCESS_DENIED;
615 break;
616 case 13:
617 vcpu_set_cpuid_feature(vcpu, HV_X64_CLUSTER_IPI_RECOMMENDED);
618 hcall->control = HVCALL_SEND_IPI;
619 hcall->expect = HV_STATUS_INVALID_HYPERCALL_INPUT;
620 break;
621 case 14:
622 /* Nothing in 'sparse banks' -> success */
623 hcall->control = HVCALL_SEND_IPI_EX;
624 hcall->expect = HV_STATUS_SUCCESS;
625 break;
627 case 15:
628 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
629 hcall->expect = HV_STATUS_ACCESS_DENIED;
630 break;
631 case 16:
632 vcpu_set_cpuid_feature(vcpu, HV_PV_SPINLOCKS_TEST);
633 hcall->control = HVCALL_NOTIFY_LONG_SPIN_WAIT;
634 hcall->expect = HV_STATUS_SUCCESS;
635 break;
636 case 17:
637 /* XMM fast hypercall */
638 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
639 hcall->ud_expected = true;
640 break;
641 case 18:
642 vcpu_set_cpuid_feature(vcpu, HV_X64_HYPERCALL_XMM_INPUT_AVAILABLE);
643 hcall->control = HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE | HV_HYPERCALL_FAST_BIT;
644 hcall->ud_expected = false;
645 hcall->expect = HV_STATUS_SUCCESS;
646 break;
647 case 19:
648 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES;
649 hcall->expect = HV_STATUS_ACCESS_DENIED;
650 break;
651 case 20:
652 vcpu_set_cpuid_feature(vcpu, HV_ENABLE_EXTENDED_HYPERCALLS);
653 hcall->control = HV_EXT_CALL_QUERY_CAPABILITIES | HV_HYPERCALL_FAST_BIT;
654 hcall->expect = HV_STATUS_INVALID_PARAMETER;
655 break;
656 case 21:
657 kvm_vm_free(vm);
658 return;
661 vcpu_set_cpuid(vcpu);
663 memcpy(prev_cpuid, vcpu->cpuid, kvm_cpuid2_size(vcpu->cpuid->nent));
665 pr_debug("Stage %d: testing hcall: 0x%lx\n", stage, hcall->control);
667 vcpu_run(vcpu);
668 TEST_ASSERT_KVM_EXIT_REASON(vcpu, KVM_EXIT_IO);
670 switch (get_ucall(vcpu, &uc)) {
671 case UCALL_ABORT:
672 REPORT_GUEST_ASSERT(uc);
673 return;
674 case UCALL_DONE:
675 break;
676 default:
677 TEST_FAIL("Unhandled ucall: %ld", uc.cmd);
678 return;
681 stage++;
682 kvm_vm_free(vm);
686 int main(void)
688 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_ENFORCE_CPUID));
690 pr_info("Testing access to Hyper-V specific MSRs\n");
691 guest_test_msrs_access();
693 pr_info("Testing access to Hyper-V hypercalls\n");
694 guest_test_hcalls_access();