1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2018, Red Hat, Inc.
7 * Tests for vCPU state save/restore, including nested guest state.
9 #define _GNU_SOURCE /* for program_invocation_short_name */
14 #include <sys/ioctl.h>
16 #include "test_util.h"
19 #include "processor.h"
24 #define L2_GUEST_STACK_SIZE 256
26 void svm_l2_guest_code(void)
32 /* Done, exit to L1 and never come back. */
36 static void svm_l1_guest_code(struct svm_test_data
*svm
)
38 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
39 struct vmcb
*vmcb
= svm
->vmcb
;
41 GUEST_ASSERT(svm
->vmcb_gpa
);
42 /* Prepare for L2 execution. */
43 generic_svm_setup(svm
, svm_l2_guest_code
,
44 &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
47 run_guest(vmcb
, svm
->vmcb_gpa
);
48 GUEST_ASSERT(vmcb
->control
.exit_code
== SVM_EXIT_VMMCALL
);
51 run_guest(vmcb
, svm
->vmcb_gpa
);
52 GUEST_ASSERT(vmcb
->control
.exit_code
== SVM_EXIT_VMMCALL
);
56 void vmx_l2_guest_code(void)
63 /* L1 has now set up a shadow VMCS for us. */
64 GUEST_ASSERT(vmreadz(GUEST_RIP
) == 0xc0ffee);
66 GUEST_ASSERT(vmreadz(GUEST_RIP
) == 0xc0ffee);
67 GUEST_ASSERT(!vmwrite(GUEST_RIP
, 0xc0fffee));
69 GUEST_ASSERT(vmreadz(GUEST_RIP
) == 0xc0fffee);
70 GUEST_ASSERT(!vmwrite(GUEST_RIP
, 0xc0ffffee));
73 /* Done, exit to L1 and never come back. */
77 static void vmx_l1_guest_code(struct vmx_pages
*vmx_pages
)
79 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
81 GUEST_ASSERT(vmx_pages
->vmcs_gpa
);
82 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages
));
84 GUEST_ASSERT(load_vmcs(vmx_pages
));
85 GUEST_ASSERT(vmptrstz() == vmx_pages
->vmcs_gpa
);
88 GUEST_ASSERT(vmptrstz() == vmx_pages
->vmcs_gpa
);
90 prepare_vmcs(vmx_pages
, vmx_l2_guest_code
,
91 &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
94 GUEST_ASSERT(vmptrstz() == vmx_pages
->vmcs_gpa
);
95 GUEST_ASSERT(!vmlaunch());
96 GUEST_ASSERT(vmptrstz() == vmx_pages
->vmcs_gpa
);
97 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
99 /* Check that the launched state is preserved. */
100 GUEST_ASSERT(vmlaunch());
102 GUEST_ASSERT(!vmresume());
103 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
106 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
108 GUEST_ASSERT(!vmresume());
109 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
111 vmwrite(GUEST_RIP
, vmreadz(GUEST_RIP
) + 3);
113 vmwrite(SECONDARY_VM_EXEC_CONTROL
, SECONDARY_EXEC_SHADOW_VMCS
);
114 vmwrite(VMCS_LINK_POINTER
, vmx_pages
->shadow_vmcs_gpa
);
116 GUEST_ASSERT(!vmptrld(vmx_pages
->shadow_vmcs_gpa
));
117 GUEST_ASSERT(vmlaunch());
119 GUEST_ASSERT(vmlaunch());
120 GUEST_ASSERT(vmresume());
122 vmwrite(GUEST_RIP
, 0xc0ffee);
124 GUEST_ASSERT(vmreadz(GUEST_RIP
) == 0xc0ffee);
126 GUEST_ASSERT(!vmptrld(vmx_pages
->vmcs_gpa
));
127 GUEST_ASSERT(!vmresume());
128 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
130 GUEST_ASSERT(!vmptrld(vmx_pages
->shadow_vmcs_gpa
));
131 GUEST_ASSERT(vmreadz(GUEST_RIP
) == 0xc0ffffee);
132 GUEST_ASSERT(vmlaunch());
133 GUEST_ASSERT(vmresume());
135 GUEST_ASSERT(vmreadz(GUEST_RIP
) == 0xc0ffffee);
136 GUEST_ASSERT(vmlaunch());
137 GUEST_ASSERT(vmresume());
140 static void __attribute__((__flatten__
)) guest_code(void *arg
)
147 svm_l1_guest_code(arg
);
149 vmx_l1_guest_code(arg
);
155 int main(int argc
, char *argv
[])
157 vm_vaddr_t nested_gva
= 0;
159 struct kvm_regs regs1
, regs2
;
162 struct kvm_x86_state
*state
;
167 vm
= vm_create_default(VCPU_ID
, 0, guest_code
);
168 run
= vcpu_state(vm
, VCPU_ID
);
170 vcpu_regs_get(vm
, VCPU_ID
, ®s1
);
172 if (kvm_check_cap(KVM_CAP_NESTED_STATE
)) {
173 if (nested_svm_supported())
174 vcpu_alloc_svm(vm
, &nested_gva
);
175 else if (nested_vmx_supported())
176 vcpu_alloc_vmx(vm
, &nested_gva
);
180 pr_info("will skip nested state checks\n");
182 vcpu_args_set(vm
, VCPU_ID
, 1, nested_gva
);
184 for (stage
= 1;; stage
++) {
185 _vcpu_run(vm
, VCPU_ID
);
186 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_IO
,
187 "Stage %d: unexpected exit reason: %u (%s),\n",
188 stage
, run
->exit_reason
,
189 exit_reason_str(run
->exit_reason
));
191 switch (get_ucall(vm
, VCPU_ID
, &uc
)) {
193 TEST_FAIL("%s at %s:%ld", (const char *)uc
.args
[0],
194 __FILE__
, uc
.args
[1]);
201 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
204 /* UCALL_SYNC is handled here. */
205 TEST_ASSERT(!strcmp((const char *)uc
.args
[0], "hello") &&
206 uc
.args
[1] == stage
, "Stage %d: Unexpected register values vmexit, got %lx",
207 stage
, (ulong
)uc
.args
[1]);
209 state
= vcpu_save_state(vm
, VCPU_ID
);
210 memset(®s1
, 0, sizeof(regs1
));
211 vcpu_regs_get(vm
, VCPU_ID
, ®s1
);
215 /* Restore state in a new VM. */
216 kvm_vm_restart(vm
, O_RDWR
);
217 vm_vcpu_add(vm
, VCPU_ID
);
218 vcpu_set_cpuid(vm
, VCPU_ID
, kvm_get_supported_cpuid());
219 vcpu_load_state(vm
, VCPU_ID
, state
);
220 run
= vcpu_state(vm
, VCPU_ID
);
223 memset(®s2
, 0, sizeof(regs2
));
224 vcpu_regs_get(vm
, VCPU_ID
, ®s2
);
225 TEST_ASSERT(!memcmp(®s1
, ®s2
, sizeof(regs2
)),
226 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
227 (ulong
) regs2
.rdi
, (ulong
) regs2
.rsi
);