1 // SPDX-License-Identifier: GPL-2.0-only
3 * VMX-preemption timer test
5 * Copyright (C) 2020, Google, LLC.
7 * Test to ensure the VM-Enter after migration doesn't
8 * incorrectly restarts the timer with the full timer
9 * value instead of partially decayed timer value
12 #define _GNU_SOURCE /* for program_invocation_short_name */
17 #include <sys/ioctl.h>
19 #include "test_util.h"
22 #include "processor.h"
26 #define PREEMPTION_TIMER_VALUE 100000000ull
27 #define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull
30 bool l2_save_restore_done
;
31 static u64 l2_vmx_pt_start
;
32 volatile u64 l2_vmx_pt_finish
;
34 union vmx_basic basic
;
35 union vmx_ctrl_msr ctrl_pin_rev
;
36 union vmx_ctrl_msr ctrl_exit_rev
;
38 void l2_guest_code(void)
43 l2_vmx_pt_start
= (rdtsc() >> vmx_pt_rate
) << vmx_pt_rate
;
46 * Wait until the 1st threshold has passed
49 l2_vmx_pt_finish
= rdtsc();
50 vmx_pt_delta
= (l2_vmx_pt_finish
- l2_vmx_pt_start
) >>
52 } while (vmx_pt_delta
< PREEMPTION_TIMER_VALUE_THRESHOLD1
);
55 * Force L2 through Save and Restore cycle
59 l2_save_restore_done
= 1;
62 * Now wait for the preemption timer to fire and
65 while ((l2_vmx_pt_finish
= rdtsc()))
69 void l1_guest_code(struct vmx_pages
*vmx_pages
)
71 #define L2_GUEST_STACK_SIZE 64
72 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
75 u64 l1_tsc_deadline
, l2_tsc_deadline
;
77 GUEST_ASSERT(vmx_pages
->vmcs_gpa
);
78 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages
));
79 GUEST_ASSERT(load_vmcs(vmx_pages
));
80 GUEST_ASSERT(vmptrstz() == vmx_pages
->vmcs_gpa
);
82 prepare_vmcs(vmx_pages
, l2_guest_code
,
83 &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
86 * Check for Preemption timer support
88 basic
.val
= rdmsr(MSR_IA32_VMX_BASIC
);
89 ctrl_pin_rev
.val
= rdmsr(basic
.ctrl
? MSR_IA32_VMX_TRUE_PINBASED_CTLS
90 : MSR_IA32_VMX_PINBASED_CTLS
);
91 ctrl_exit_rev
.val
= rdmsr(basic
.ctrl
? MSR_IA32_VMX_TRUE_EXIT_CTLS
92 : MSR_IA32_VMX_EXIT_CTLS
);
94 if (!(ctrl_pin_rev
.clr
& PIN_BASED_VMX_PREEMPTION_TIMER
) ||
95 !(ctrl_exit_rev
.clr
& VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
))
98 GUEST_ASSERT(!vmlaunch());
99 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
100 vmwrite(GUEST_RIP
, vmreadz(GUEST_RIP
) + vmreadz(VM_EXIT_INSTRUCTION_LEN
));
103 * Turn on PIN control and resume the guest
105 GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL
,
106 vmreadz(PIN_BASED_VM_EXEC_CONTROL
) |
107 PIN_BASED_VMX_PREEMPTION_TIMER
));
109 GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE
,
110 PREEMPTION_TIMER_VALUE
));
112 vmx_pt_rate
= rdmsr(MSR_IA32_VMX_MISC
) & 0x1F;
114 l2_save_restore_done
= 0;
116 l1_vmx_pt_start
= (rdtsc() >> vmx_pt_rate
) << vmx_pt_rate
;
118 GUEST_ASSERT(!vmresume());
120 l1_vmx_pt_finish
= rdtsc();
123 * Ensure exit from L2 happens after L2 goes through
126 GUEST_ASSERT(l2_save_restore_done
);
129 * Ensure the exit from L2 is due to preemption timer expiry
131 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_PREEMPTION_TIMER
);
133 l1_tsc_deadline
= l1_vmx_pt_start
+
134 (PREEMPTION_TIMER_VALUE
<< vmx_pt_rate
);
136 l2_tsc_deadline
= l2_vmx_pt_start
+
137 (PREEMPTION_TIMER_VALUE
<< vmx_pt_rate
);
140 * Sync with the host and pass the l1|l2 pt_expiry_finish times and
141 * tsc deadlines so that host can verify they are as expected
143 GUEST_SYNC_ARGS(2, l1_vmx_pt_finish
, l1_tsc_deadline
,
144 l2_vmx_pt_finish
, l2_tsc_deadline
);
147 void guest_code(struct vmx_pages
*vmx_pages
)
150 l1_guest_code(vmx_pages
);
155 int main(int argc
, char *argv
[])
157 vm_vaddr_t vmx_pages_gva
= 0;
159 struct kvm_regs regs1
, regs2
;
162 struct kvm_x86_state
*state
;
167 * AMD currently does not implement any VMX features, so for now we
170 nested_vmx_check_supported();
172 if (!kvm_check_cap(KVM_CAP_NESTED_STATE
)) {
173 print_skip("KVM_CAP_NESTED_STATE not supported");
178 vm
= vm_create_default(VCPU_ID
, 0, guest_code
);
179 run
= vcpu_state(vm
, VCPU_ID
);
181 vcpu_regs_get(vm
, VCPU_ID
, ®s1
);
183 vcpu_alloc_vmx(vm
, &vmx_pages_gva
);
184 vcpu_args_set(vm
, VCPU_ID
, 1, vmx_pages_gva
);
186 for (stage
= 1;; stage
++) {
187 _vcpu_run(vm
, VCPU_ID
);
188 TEST_ASSERT(run
->exit_reason
== KVM_EXIT_IO
,
189 "Stage %d: unexpected exit reason: %u (%s),\n",
190 stage
, run
->exit_reason
,
191 exit_reason_str(run
->exit_reason
));
193 switch (get_ucall(vm
, VCPU_ID
, &uc
)) {
195 TEST_FAIL("%s at %s:%ld", (const char *)uc
.args
[0],
196 __FILE__
, uc
.args
[1]);
203 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
206 /* UCALL_SYNC is handled here. */
207 TEST_ASSERT(!strcmp((const char *)uc
.args
[0], "hello") &&
208 uc
.args
[1] == stage
, "Stage %d: Unexpected register values vmexit, got %lx",
209 stage
, (ulong
)uc
.args
[1]);
211 * If this stage 2 then we should verify the vmx pt expiry
213 * From L1's perspective verify Preemption timer hasn't
215 * From L2's perspective verify Preemption timer hasn't
220 pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
221 stage
, uc
.args
[2], uc
.args
[3]);
223 pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
224 stage
, uc
.args
[4], uc
.args
[5]);
226 TEST_ASSERT(uc
.args
[2] >= uc
.args
[3],
227 "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
228 stage
, uc
.args
[2], uc
.args
[3]);
230 TEST_ASSERT(uc
.args
[4] < uc
.args
[5],
231 "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
232 stage
, uc
.args
[4], uc
.args
[5]);
235 state
= vcpu_save_state(vm
, VCPU_ID
);
236 memset(®s1
, 0, sizeof(regs1
));
237 vcpu_regs_get(vm
, VCPU_ID
, ®s1
);
241 /* Restore state in a new VM. */
242 kvm_vm_restart(vm
, O_RDWR
);
243 vm_vcpu_add(vm
, VCPU_ID
);
244 vcpu_set_cpuid(vm
, VCPU_ID
, kvm_get_supported_cpuid());
245 vcpu_load_state(vm
, VCPU_ID
, state
);
246 run
= vcpu_state(vm
, VCPU_ID
);
249 memset(®s2
, 0, sizeof(regs2
));
250 vcpu_regs_get(vm
, VCPU_ID
, ®s2
);
251 TEST_ASSERT(!memcmp(®s1
, ®s2
, sizeof(regs2
)),
252 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
253 (ulong
) regs2
.rdi
, (ulong
) regs2
.rsi
);