1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2020, Google LLC.
7 * This work is licensed under the terms of the GNU GPL, version 2.
9 * The first subtest simply checks to see that an L2 guest can be
10 * launched with a valid APIC-access address that is backed by a
11 * page of L1 physical memory.
13 * The second subtest sets the APIC-access address to a (valid) L1
14 * physical address that is not backed by memory. KVM can't handle
15 * this situation, so resuming L2 should result in a KVM exit for
16 * internal error (emulation). This is not an architectural
17 * requirement. It is just a shortcoming of KVM. The internal error
18 * is unfortunate, but it's better than what used to happen!
21 #include "test_util.h"
23 #include "processor.h"
27 #include <sys/ioctl.h>
29 #include "kselftest.h"
33 /* The virtual machine object. */
34 static struct kvm_vm
*vm
;
36 static void l2_guest_code(void)
39 __asm__
__volatile__("vmcall");
42 static void l1_guest_code(struct vmx_pages
*vmx_pages
, unsigned long high_gpa
)
44 #define L2_GUEST_STACK_SIZE 64
45 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
48 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages
));
49 GUEST_ASSERT(load_vmcs(vmx_pages
));
51 /* Prepare the VMCS for L2 execution. */
52 prepare_vmcs(vmx_pages
, l2_guest_code
,
53 &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
54 control
= vmreadz(CPU_BASED_VM_EXEC_CONTROL
);
55 control
|= CPU_BASED_ACTIVATE_SECONDARY_CONTROLS
;
56 vmwrite(CPU_BASED_VM_EXEC_CONTROL
, control
);
57 control
= vmreadz(SECONDARY_VM_EXEC_CONTROL
);
58 control
|= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES
;
59 vmwrite(SECONDARY_VM_EXEC_CONTROL
, control
);
60 vmwrite(APIC_ACCESS_ADDR
, vmx_pages
->apic_access_gpa
);
62 /* Try to launch L2 with the memory-backed APIC-access address. */
63 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR
));
64 GUEST_ASSERT(!vmlaunch());
65 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
67 vmwrite(APIC_ACCESS_ADDR
, high_gpa
);
69 /* Try to resume L2 with the unbacked APIC-access address. */
70 GUEST_SYNC(vmreadz(APIC_ACCESS_ADDR
));
71 GUEST_ASSERT(!vmresume());
72 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
77 int main(int argc
, char *argv
[])
79 unsigned long apic_access_addr
= ~0ul;
80 unsigned int paddr_width
;
81 unsigned int vaddr_width
;
82 vm_vaddr_t vmx_pages_gva
;
83 unsigned long high_gpa
;
84 struct vmx_pages
*vmx
;
87 nested_vmx_check_supported();
89 vm
= vm_create_default(VCPU_ID
, 0, (void *) l1_guest_code
);
91 kvm_get_cpu_address_width(&paddr_width
, &vaddr_width
);
92 high_gpa
= (1ul << paddr_width
) - getpagesize();
93 if ((unsigned long)DEFAULT_GUEST_PHY_PAGES
* getpagesize() > high_gpa
) {
94 print_skip("No unbacked physical page available");
98 vmx
= vcpu_alloc_vmx(vm
, &vmx_pages_gva
);
99 prepare_virtualize_apic_accesses(vmx
, vm
, 0);
100 vcpu_args_set(vm
, VCPU_ID
, 2, vmx_pages_gva
, high_gpa
);
103 volatile struct kvm_run
*run
= vcpu_state(vm
, VCPU_ID
);
106 vcpu_run(vm
, VCPU_ID
);
107 if (apic_access_addr
== high_gpa
) {
108 TEST_ASSERT(run
->exit_reason
==
109 KVM_EXIT_INTERNAL_ERROR
,
110 "Got exit reason other than KVM_EXIT_INTERNAL_ERROR: %u (%s)\n",
112 exit_reason_str(run
->exit_reason
));
113 TEST_ASSERT(run
->internal
.suberror
==
114 KVM_INTERNAL_ERROR_EMULATION
,
115 "Got internal suberror other than KVM_INTERNAL_ERROR_EMULATION: %u\n",
116 run
->internal
.suberror
);
119 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_IO
,
120 "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
122 exit_reason_str(run
->exit_reason
));
124 switch (get_ucall(vm
, VCPU_ID
, &uc
)) {
126 TEST_FAIL("%s at %s:%ld", (const char *)uc
.args
[0],
127 __FILE__
, uc
.args
[1]);
130 apic_access_addr
= uc
.args
[1];
136 TEST_ASSERT(false, "Unknown ucall %lu", uc
.cmd
);