1 // SPDX-License-Identifier: GPL-2.0-only
8 #define L2_GUEST_STACK_SIZE 256
11 * Arbitrary, never shoved into KVM/hardware, just need to avoid conflict with
12 * the "real" exceptions used, #SS/#GP/#DF (12/13/8).
14 #define FAKE_TRIPLE_FAULT_VECTOR 0xaa
16 /* Arbitrary 32-bit error code injected by this test. */
17 #define SS_ERROR_CODE 0xdeadbeef
20 * Bit '0' is set on Intel if the exception occurs while delivering a previous
21 * event/exception. AMD's wording is ambiguous, but presumably the bit is set
22 * if the exception occurs while delivering an external event, e.g. NMI or INTR,
23 * but not for exceptions that occur when delivering other exceptions or
24 * software interrupts.
26 * Note, Intel's name for it, "External event", is misleading and much more
27 * aligned with AMD's behavior, but the SDM is quite clear on its behavior.
29 #define ERROR_CODE_EXT_FLAG BIT(0)
32 * Bit '1' is set if the fault occurred when looking up a descriptor in the
33 * IDT, which is the case here as the IDT is empty/NULL.
35 #define ERROR_CODE_IDT_FLAG BIT(1)
38 * The #GP that occurs when vectoring #SS should show the index into the IDT
39 * for #SS, plus have the "IDT flag" set.
41 #define GP_ERROR_CODE_AMD ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG)
42 #define GP_ERROR_CODE_INTEL ((SS_VECTOR * 8) | ERROR_CODE_IDT_FLAG | ERROR_CODE_EXT_FLAG)
45 * Intel and AMD both shove '0' into the error code on #DF, regardless of what
46 * led to the double fault.
48 #define DF_ERROR_CODE 0
50 #define INTERCEPT_SS (BIT_ULL(SS_VECTOR))
51 #define INTERCEPT_SS_DF (INTERCEPT_SS | BIT_ULL(DF_VECTOR))
52 #define INTERCEPT_SS_GP_DF (INTERCEPT_SS_DF | BIT_ULL(GP_VECTOR))
54 static void l2_ss_pending_test(void)
56 GUEST_SYNC(SS_VECTOR
);
59 static void l2_ss_injected_gp_test(void)
61 GUEST_SYNC(GP_VECTOR
);
64 static void l2_ss_injected_df_test(void)
66 GUEST_SYNC(DF_VECTOR
);
69 static void l2_ss_injected_tf_test(void)
71 GUEST_SYNC(FAKE_TRIPLE_FAULT_VECTOR
);
74 static void svm_run_l2(struct svm_test_data
*svm
, void *l2_code
, int vector
,
77 struct vmcb
*vmcb
= svm
->vmcb
;
78 struct vmcb_control_area
*ctrl
= &vmcb
->control
;
80 vmcb
->save
.rip
= (u64
)l2_code
;
81 run_guest(vmcb
, svm
->vmcb_gpa
);
83 if (vector
== FAKE_TRIPLE_FAULT_VECTOR
)
86 GUEST_ASSERT_EQ(ctrl
->exit_code
, (SVM_EXIT_EXCP_BASE
+ vector
));
87 GUEST_ASSERT_EQ(ctrl
->exit_info_1
, error_code
);
90 static void l1_svm_code(struct svm_test_data
*svm
)
92 struct vmcb_control_area
*ctrl
= &svm
->vmcb
->control
;
93 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
95 generic_svm_setup(svm
, NULL
, &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
96 svm
->vmcb
->save
.idtr
.limit
= 0;
97 ctrl
->intercept
|= BIT_ULL(INTERCEPT_SHUTDOWN
);
99 ctrl
->intercept_exceptions
= INTERCEPT_SS_GP_DF
;
100 svm_run_l2(svm
, l2_ss_pending_test
, SS_VECTOR
, SS_ERROR_CODE
);
101 svm_run_l2(svm
, l2_ss_injected_gp_test
, GP_VECTOR
, GP_ERROR_CODE_AMD
);
103 ctrl
->intercept_exceptions
= INTERCEPT_SS_DF
;
104 svm_run_l2(svm
, l2_ss_injected_df_test
, DF_VECTOR
, DF_ERROR_CODE
);
106 ctrl
->intercept_exceptions
= INTERCEPT_SS
;
107 svm_run_l2(svm
, l2_ss_injected_tf_test
, FAKE_TRIPLE_FAULT_VECTOR
, 0);
108 GUEST_ASSERT_EQ(ctrl
->exit_code
, SVM_EXIT_SHUTDOWN
);
113 static void vmx_run_l2(void *l2_code
, int vector
, uint32_t error_code
)
115 GUEST_ASSERT(!vmwrite(GUEST_RIP
, (u64
)l2_code
));
117 GUEST_ASSERT_EQ(vector
== SS_VECTOR
? vmlaunch() : vmresume(), 0);
119 if (vector
== FAKE_TRIPLE_FAULT_VECTOR
)
122 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON
), EXIT_REASON_EXCEPTION_NMI
);
123 GUEST_ASSERT_EQ((vmreadz(VM_EXIT_INTR_INFO
) & 0xff), vector
);
124 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_INTR_ERROR_CODE
), error_code
);
127 static void l1_vmx_code(struct vmx_pages
*vmx
)
129 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
131 GUEST_ASSERT_EQ(prepare_for_vmx_operation(vmx
), true);
133 GUEST_ASSERT_EQ(load_vmcs(vmx
), true);
135 prepare_vmcs(vmx
, NULL
, &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
136 GUEST_ASSERT_EQ(vmwrite(GUEST_IDTR_LIMIT
, 0), 0);
139 * VMX disallows injecting an exception with error_code[31:16] != 0,
140 * and hardware will never generate a VM-Exit with bits 31:16 set.
141 * KVM should likewise truncate the "bad" userspace value.
143 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP
, INTERCEPT_SS_GP_DF
), 0);
144 vmx_run_l2(l2_ss_pending_test
, SS_VECTOR
, (u16
)SS_ERROR_CODE
);
145 vmx_run_l2(l2_ss_injected_gp_test
, GP_VECTOR
, GP_ERROR_CODE_INTEL
);
147 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP
, INTERCEPT_SS_DF
), 0);
148 vmx_run_l2(l2_ss_injected_df_test
, DF_VECTOR
, DF_ERROR_CODE
);
150 GUEST_ASSERT_EQ(vmwrite(EXCEPTION_BITMAP
, INTERCEPT_SS
), 0);
151 vmx_run_l2(l2_ss_injected_tf_test
, FAKE_TRIPLE_FAULT_VECTOR
, 0);
152 GUEST_ASSERT_EQ(vmreadz(VM_EXIT_REASON
), EXIT_REASON_TRIPLE_FAULT
);
157 static void __attribute__((__flatten__
)) l1_guest_code(void *test_data
)
159 if (this_cpu_has(X86_FEATURE_SVM
))
160 l1_svm_code(test_data
);
162 l1_vmx_code(test_data
);
165 static void assert_ucall_vector(struct kvm_vcpu
*vcpu
, int vector
)
169 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
171 switch (get_ucall(vcpu
, &uc
)) {
173 TEST_ASSERT(vector
== uc
.args
[1],
174 "Expected L2 to ask for %d, got %ld", vector
, uc
.args
[1]);
177 TEST_ASSERT(vector
== -1,
178 "Expected L2 to ask for %d, L2 says it's done", vector
);
181 REPORT_GUEST_ASSERT(uc
);
184 TEST_FAIL("Expected L2 to ask for %d, got unexpected ucall %lu", vector
, uc
.cmd
);
188 static void queue_ss_exception(struct kvm_vcpu
*vcpu
, bool inject
)
190 struct kvm_vcpu_events events
;
192 vcpu_events_get(vcpu
, &events
);
194 TEST_ASSERT(!events
.exception
.pending
,
195 "Vector %d unexpectedlt pending", events
.exception
.nr
);
196 TEST_ASSERT(!events
.exception
.injected
,
197 "Vector %d unexpectedly injected", events
.exception
.nr
);
199 events
.flags
= KVM_VCPUEVENT_VALID_PAYLOAD
;
200 events
.exception
.pending
= !inject
;
201 events
.exception
.injected
= inject
;
202 events
.exception
.nr
= SS_VECTOR
;
203 events
.exception
.has_error_code
= true;
204 events
.exception
.error_code
= SS_ERROR_CODE
;
205 vcpu_events_set(vcpu
, &events
);
209 * Verify KVM_{G,S}ET_EVENTS play nice with pending vs. injected exceptions
210 * when an exception is being queued for L2. Specifically, verify that KVM
211 * honors L1 exception intercept controls when a #SS is pending/injected,
212 * triggers a #GP on vectoring the #SS, morphs to #DF if #GP isn't intercepted
213 * by L1, and finally causes (nested) SHUTDOWN if #DF isn't intercepted by L1.
215 int main(int argc
, char *argv
[])
217 vm_vaddr_t nested_test_data_gva
;
218 struct kvm_vcpu_events events
;
219 struct kvm_vcpu
*vcpu
;
222 TEST_REQUIRE(kvm_has_cap(KVM_CAP_EXCEPTION_PAYLOAD
));
223 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SVM
) || kvm_cpu_has(X86_FEATURE_VMX
));
225 vm
= vm_create_with_one_vcpu(&vcpu
, l1_guest_code
);
226 vm_enable_cap(vm
, KVM_CAP_EXCEPTION_PAYLOAD
, -2ul);
228 if (kvm_cpu_has(X86_FEATURE_SVM
))
229 vcpu_alloc_svm(vm
, &nested_test_data_gva
);
231 vcpu_alloc_vmx(vm
, &nested_test_data_gva
);
233 vcpu_args_set(vcpu
, 1, nested_test_data_gva
);
235 /* Run L1 => L2. L2 should sync and request #SS. */
237 assert_ucall_vector(vcpu
, SS_VECTOR
);
239 /* Pend #SS and request immediate exit. #SS should still be pending. */
240 queue_ss_exception(vcpu
, false);
241 vcpu
->run
->immediate_exit
= true;
242 vcpu_run_complete_io(vcpu
);
244 /* Verify the pending events comes back out the same as it went in. */
245 vcpu_events_get(vcpu
, &events
);
246 TEST_ASSERT_EQ(events
.flags
& KVM_VCPUEVENT_VALID_PAYLOAD
,
247 KVM_VCPUEVENT_VALID_PAYLOAD
);
248 TEST_ASSERT_EQ(events
.exception
.pending
, true);
249 TEST_ASSERT_EQ(events
.exception
.nr
, SS_VECTOR
);
250 TEST_ASSERT_EQ(events
.exception
.has_error_code
, true);
251 TEST_ASSERT_EQ(events
.exception
.error_code
, SS_ERROR_CODE
);
254 * Run for real with the pending #SS, L1 should get a VM-Exit due to
255 * #SS interception and re-enter L2 to request #GP (via injected #SS).
257 vcpu
->run
->immediate_exit
= false;
259 assert_ucall_vector(vcpu
, GP_VECTOR
);
262 * Inject #SS, the #SS should bypass interception and cause #GP, which
263 * L1 should intercept before KVM morphs it to #DF. L1 should then
264 * disable #GP interception and run L2 to request #DF (via #SS => #GP).
266 queue_ss_exception(vcpu
, true);
268 assert_ucall_vector(vcpu
, DF_VECTOR
);
271 * Inject #SS, the #SS should bypass interception and cause #GP, which
272 * L1 is no longer interception, and so should see a #DF VM-Exit. L1
273 * should then signal that is done.
275 queue_ss_exception(vcpu
, true);
277 assert_ucall_vector(vcpu
, FAKE_TRIPLE_FAULT_VECTOR
);
280 * Inject #SS yet again. L1 is not intercepting #GP or #DF, and so
281 * should see nested TRIPLE_FAULT / SHUTDOWN.
283 queue_ss_exception(vcpu
, true);
285 assert_ucall_vector(vcpu
, -1);