1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2020, Google LLC.
7 * This work is licensed under the terms of the GNU GPL, version 2.
9 * The first subtest simply checks to see that an L2 guest can be
10 * launched with a valid APIC-access address that is backed by a
11 * page of L1 physical memory.
13 * The second subtest sets the APIC-access address to a (valid) L1
14 * physical address that is not backed by memory. KVM can't handle
15 * this situation, so resuming L2 should result in a KVM exit for
16 * internal error (emulation). This is not an architectural
17 * requirement. It is just a shortcoming of KVM. The internal error
18 * is unfortunate, but it's better than what used to happen!
21 #include "test_util.h"
23 #include "processor.h"
27 #include <sys/ioctl.h>
29 #include "kselftest.h"
31 static void l2_guest_code(void)
34 __asm__
__volatile__("vmcall");
37 static void l1_guest_code(struct vmx_pages
*vmx_pages
, unsigned long high_gpa
)
39 #define L2_GUEST_STACK_SIZE 64
40 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
43 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages
));
44 GUEST_ASSERT(load_vmcs(vmx_pages
));
46 /* Prepare the VMCS for L2 execution. */
47 prepare_vmcs(vmx_pages
, l2_guest_code
,
48 &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
49 control
= vmreadz(CPU_BASED_VM_EXEC_CONTROL
);
50 control
|= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
51 vmwrite(CPU_BASED_VM_EXEC_CONTROL
, control
);
52 control
= vmreadz(SECONDARY_VM_EXEC_CONTROL
);
53 control
|= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
54 vmwrite(SECONDARY_VM_EXEC_CONTROL
, control
);
55 vmwrite(APIC_ACCESS_ADDR
, vmx_pages
->apic_access_gpa
);
57 /* Try to launch L2 with the memory-backed APIC-access address. */
58 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR
));
59 GUEST_ASSERT(!vmlaunch());
60 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
62 vmwrite(APIC_ACCESS_ADDR
, high_gpa
);
64 /* Try to resume L2 with the unbacked APIC-access address. */
65 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR
));
66 GUEST_ASSERT(!vmresume());
67 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
72 int main(int argc
, char *argv
[])
74 unsigned long apic_access_addr
= ~0ul;
75 vm_vaddr_t vmx_pages_gva
;
76 unsigned long high_gpa
;
77 struct vmx_pages
*vmx
;
80 struct kvm_vcpu
*vcpu
;
83 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX
));
85 vm
= vm_create_with_one_vcpu(&vcpu
, l1_guest_code
);
87 high_gpa
= (vm
->max_gfn
- 1) << vm
->page_shift
;
89 vmx
= vcpu_alloc_vmx(vm
, &vmx_pages_gva
);
90 prepare_virtualize_apic_accesses(vmx
, vm
);
91 vcpu_args_set(vcpu
, 2, vmx_pages_gva
, high_gpa
);
94 volatile struct kvm_run
*run
= vcpu
->run
;
98 if (apic_access_addr
== high_gpa
) {
99 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_INTERNAL_ERROR
);
100 TEST_ASSERT(run
->internal
.suberror
==
101 KVM_INTERNAL_ERROR_EMULATION
,
102 "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u",
103 run
->internal
.suberror
);
106 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
108 switch (get_ucall(vcpu
, &uc
)) {
110 REPORT_GUEST_ASSERT(uc
);
113 apic_access_addr
= uc
.args
[1];
119 TEST_ASSERT(false, "Unknown ucall %lu", uc
.cmd
);