1 // SPDX-License-Identifier: GPL-2.0-only
3 * VMX-preemption timer test
5 * Copyright (C) 2020, Google, LLC.
7 * Test to ensure the VM-Enter after migration doesn't
8 * incorrectly restarts the timer with the full timer
9 * value instead of partially decayed timer value
16 #include <sys/ioctl.h>
18 #include "test_util.h"
21 #include "processor.h"
24 #define PREEMPTION_TIMER_VALUE 100000000ull
25 #define PREEMPTION_TIMER_VALUE_THRESHOLD1 80000000ull
28 bool l2_save_restore_done
;
29 static u64 l2_vmx_pt_start
;
30 volatile u64 l2_vmx_pt_finish
;
32 union vmx_basic basic
;
33 union vmx_ctrl_msr ctrl_pin_rev
;
34 union vmx_ctrl_msr ctrl_exit_rev
;
36 void l2_guest_code(void)
41 l2_vmx_pt_start
= (rdtsc() >> vmx_pt_rate
) << vmx_pt_rate
;
44 * Wait until the 1st threshold has passed
47 l2_vmx_pt_finish
= rdtsc();
48 vmx_pt_delta
= (l2_vmx_pt_finish
- l2_vmx_pt_start
) >>
50 } while (vmx_pt_delta
< PREEMPTION_TIMER_VALUE_THRESHOLD1
);
53 * Force L2 through Save and Restore cycle
57 l2_save_restore_done
= 1;
60 * Now wait for the preemption timer to fire and
63 while ((l2_vmx_pt_finish
= rdtsc()))
67 void l1_guest_code(struct vmx_pages
*vmx_pages
)
69 #define L2_GUEST_STACK_SIZE 64
70 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
73 u64 l1_tsc_deadline
, l2_tsc_deadline
;
75 GUEST_ASSERT(vmx_pages
->vmcs_gpa
);
76 GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages
));
77 GUEST_ASSERT(load_vmcs(vmx_pages
));
78 GUEST_ASSERT(vmptrstz() == vmx_pages
->vmcs_gpa
);
80 prepare_vmcs(vmx_pages
, l2_guest_code
,
81 &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
84 * Check for Preemption timer support
86 basic
.val
= rdmsr(MSR_IA32_VMX_BASIC
);
87 ctrl_pin_rev
.val
= rdmsr(basic
.ctrl
? MSR_IA32_VMX_TRUE_PINBASED_CTLS
88 : MSR_IA32_VMX_PINBASED_CTLS
);
89 ctrl_exit_rev
.val
= rdmsr(basic
.ctrl
? MSR_IA32_VMX_TRUE_EXIT_CTLS
90 : MSR_IA32_VMX_EXIT_CTLS
);
92 if (!(ctrl_pin_rev
.clr
& PIN_BASED_VMX_PREEMPTION_TIMER
) ||
93 !(ctrl_exit_rev
.clr
& VM_EXIT_SAVE_VMX_PREEMPTION_TIMER
))
96 GUEST_ASSERT(!vmlaunch());
97 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
98 vmwrite(GUEST_RIP
, vmreadz(GUEST_RIP
) + vmreadz(VM_EXIT_INSTRUCTION_LEN
));
101 * Turn on PIN control and resume the guest
103 GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL
,
104 vmreadz(PIN_BASED_VM_EXEC_CONTROL
) |
105 PIN_BASED_VMX_PREEMPTION_TIMER
));
107 GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE
,
108 PREEMPTION_TIMER_VALUE
));
110 vmx_pt_rate
= rdmsr(MSR_IA32_VMX_MISC
) & 0x1F;
112 l2_save_restore_done
= 0;
114 l1_vmx_pt_start
= (rdtsc() >> vmx_pt_rate
) << vmx_pt_rate
;
116 GUEST_ASSERT(!vmresume());
118 l1_vmx_pt_finish
= rdtsc();
121 * Ensure exit from L2 happens after L2 goes through
124 GUEST_ASSERT(l2_save_restore_done
);
127 * Ensure the exit from L2 is due to preemption timer expiry
129 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_PREEMPTION_TIMER
);
131 l1_tsc_deadline
= l1_vmx_pt_start
+
132 (PREEMPTION_TIMER_VALUE
<< vmx_pt_rate
);
134 l2_tsc_deadline
= l2_vmx_pt_start
+
135 (PREEMPTION_TIMER_VALUE
<< vmx_pt_rate
);
138 * Sync with the host and pass the l1|l2 pt_expiry_finish times and
139 * tsc deadlines so that host can verify they are as expected
141 GUEST_SYNC_ARGS(2, l1_vmx_pt_finish
, l1_tsc_deadline
,
142 l2_vmx_pt_finish
, l2_tsc_deadline
);
145 void guest_code(struct vmx_pages
*vmx_pages
)
148 l1_guest_code(vmx_pages
);
153 int main(int argc
, char *argv
[])
155 vm_vaddr_t vmx_pages_gva
= 0;
157 struct kvm_regs regs1
, regs2
;
159 struct kvm_vcpu
*vcpu
;
160 struct kvm_x86_state
*state
;
165 * AMD currently does not implement any VMX features, so for now we
168 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX
));
170 TEST_REQUIRE(kvm_has_cap(KVM_CAP_NESTED_STATE
));
173 vm
= vm_create_with_one_vcpu(&vcpu
, guest_code
);
175 vcpu_regs_get(vcpu
, ®s1
);
177 vcpu_alloc_vmx(vm
, &vmx_pages_gva
);
178 vcpu_args_set(vcpu
, 1, vmx_pages_gva
);
180 for (stage
= 1;; stage
++) {
182 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
184 switch (get_ucall(vcpu
, &uc
)) {
186 REPORT_GUEST_ASSERT(uc
);
193 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
196 /* UCALL_SYNC is handled here. */
197 TEST_ASSERT(!strcmp((const char *)uc
.args
[0], "hello") &&
198 uc
.args
[1] == stage
, "Stage %d: Unexpected register values vmexit, got %lx",
199 stage
, (ulong
)uc
.args
[1]);
201 * If this stage 2 then we should verify the vmx pt expiry
203 * From L1's perspective verify Preemption timer hasn't
205 * From L2's perspective verify Preemption timer hasn't
210 pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
211 stage
, uc
.args
[2], uc
.args
[3]);
213 pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
214 stage
, uc
.args
[4], uc
.args
[5]);
216 TEST_ASSERT(uc
.args
[2] >= uc
.args
[3],
217 "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
218 stage
, uc
.args
[2], uc
.args
[3]);
220 TEST_ASSERT(uc
.args
[4] < uc
.args
[5],
221 "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
222 stage
, uc
.args
[4], uc
.args
[5]);
225 state
= vcpu_save_state(vcpu
);
226 memset(®s1
, 0, sizeof(regs1
));
227 vcpu_regs_get(vcpu
, ®s1
);
231 /* Restore state in a new VM. */
232 vcpu
= vm_recreate_with_one_vcpu(vm
);
233 vcpu_load_state(vcpu
, state
);
234 kvm_x86_state_cleanup(state
);
236 memset(®s2
, 0, sizeof(regs2
));
237 vcpu_regs_get(vcpu
, ®s2
);
238 TEST_ASSERT(!memcmp(®s1
, ®s2
, sizeof(regs2
)),
239 "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
240 (ulong
) regs2
.rdi
, (ulong
) regs2
.rsi
);