1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/psp-sev.h>
10 #include "test_util.h"
12 #include "processor.h"
14 #include "kselftest.h"
16 #define NR_MIGRATE_TEST_VCPUS 4
17 #define NR_MIGRATE_TEST_VMS 3
18 #define NR_LOCK_TESTING_THREADS 3
19 #define NR_LOCK_TESTING_ITERATIONS 10000
23 static struct kvm_vm
*sev_vm_create(bool es
)
28 vm
= vm_create_barebones();
34 for (i
= 0; i
< NR_MIGRATE_TEST_VCPUS
; ++i
)
37 sev_vm_launch(vm
, es
? SEV_POLICY_ES
: 0);
40 vm_sev_ioctl(vm
, KVM_SEV_LAUNCH_UPDATE_VMSA
, NULL
);
44 static struct kvm_vm
*aux_vm_create(bool with_vcpus
)
49 vm
= vm_create_barebones();
53 for (i
= 0; i
< NR_MIGRATE_TEST_VCPUS
; ++i
)
59 static int __sev_migrate_from(struct kvm_vm
*dst
, struct kvm_vm
*src
)
61 return __vm_enable_cap(dst
, KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
, src
->fd
);
65 static void sev_migrate_from(struct kvm_vm
*dst
, struct kvm_vm
*src
)
69 ret
= __sev_migrate_from(dst
, src
);
70 TEST_ASSERT(!ret
, "Migration failed, ret: %d, errno: %d", ret
, errno
);
73 static void test_sev_migrate_from(bool es
)
75 struct kvm_vm
*src_vm
;
76 struct kvm_vm
*dst_vms
[NR_MIGRATE_TEST_VMS
];
79 src_vm
= sev_vm_create(es
);
80 for (i
= 0; i
< NR_MIGRATE_TEST_VMS
; ++i
)
81 dst_vms
[i
] = aux_vm_create(true);
83 /* Initial migration from the src to the first dst. */
84 sev_migrate_from(dst_vms
[0], src_vm
);
86 for (i
= 1; i
< NR_MIGRATE_TEST_VMS
; i
++)
87 sev_migrate_from(dst_vms
[i
], dst_vms
[i
- 1]);
89 /* Migrate the guest back to the original VM. */
90 ret
= __sev_migrate_from(src_vm
, dst_vms
[NR_MIGRATE_TEST_VMS
- 1]);
91 TEST_ASSERT(ret
== -1 && errno
== EIO
,
92 "VM that was migrated from should be dead. ret %d, errno: %d", ret
,
96 for (i
= 0; i
< NR_MIGRATE_TEST_VMS
; ++i
)
97 kvm_vm_free(dst_vms
[i
]);
100 struct locking_thread_input
{
102 struct kvm_vm
*source_vms
[NR_LOCK_TESTING_THREADS
];
105 static void *locking_test_thread(void *arg
)
108 struct locking_thread_input
*input
= (struct locking_thread_input
*)arg
;
110 for (i
= 0; i
< NR_LOCK_TESTING_ITERATIONS
; ++i
) {
111 j
= i
% NR_LOCK_TESTING_THREADS
;
112 __sev_migrate_from(input
->vm
, input
->source_vms
[j
]);
118 static void test_sev_migrate_locking(void)
120 struct locking_thread_input input
[NR_LOCK_TESTING_THREADS
];
121 pthread_t pt
[NR_LOCK_TESTING_THREADS
];
124 for (i
= 0; i
< NR_LOCK_TESTING_THREADS
; ++i
) {
125 input
[i
].vm
= sev_vm_create(/* es= */ false);
126 input
[0].source_vms
[i
] = input
[i
].vm
;
128 for (i
= 1; i
< NR_LOCK_TESTING_THREADS
; ++i
)
129 memcpy(input
[i
].source_vms
, input
[0].source_vms
,
130 sizeof(input
[i
].source_vms
));
132 for (i
= 0; i
< NR_LOCK_TESTING_THREADS
; ++i
)
133 pthread_create(&pt
[i
], NULL
, locking_test_thread
, &input
[i
]);
135 for (i
= 0; i
< NR_LOCK_TESTING_THREADS
; ++i
)
136 pthread_join(pt
[i
], NULL
);
137 for (i
= 0; i
< NR_LOCK_TESTING_THREADS
; ++i
)
138 kvm_vm_free(input
[i
].vm
);
141 static void test_sev_migrate_parameters(void)
143 struct kvm_vm
*sev_vm
, *sev_es_vm
, *vm_no_vcpu
, *vm_no_sev
,
147 vm_no_vcpu
= vm_create_barebones();
148 vm_no_sev
= aux_vm_create(true);
149 ret
= __sev_migrate_from(vm_no_vcpu
, vm_no_sev
);
150 TEST_ASSERT(ret
== -1 && errno
== EINVAL
,
151 "Migrations require SEV enabled. ret %d, errno: %d", ret
,
157 sev_vm
= sev_vm_create(/* es= */ false);
158 sev_es_vm
= sev_vm_create(/* es= */ true);
159 sev_es_vm_no_vmsa
= vm_create_barebones();
160 sev_es_vm_init(sev_es_vm_no_vmsa
);
161 __vm_vcpu_add(sev_es_vm_no_vmsa
, 1);
163 ret
= __sev_migrate_from(sev_vm
, sev_es_vm
);
165 ret
== -1 && errno
== EINVAL
,
166 "Should not be able migrate to SEV enabled VM. ret: %d, errno: %d",
169 ret
= __sev_migrate_from(sev_es_vm
, sev_vm
);
171 ret
== -1 && errno
== EINVAL
,
172 "Should not be able migrate to SEV-ES enabled VM. ret: %d, errno: %d",
175 ret
= __sev_migrate_from(vm_no_vcpu
, sev_es_vm
);
177 ret
== -1 && errno
== EINVAL
,
178 "SEV-ES migrations require same number of vCPUS. ret: %d, errno: %d",
181 ret
= __sev_migrate_from(vm_no_vcpu
, sev_es_vm_no_vmsa
);
183 ret
== -1 && errno
== EINVAL
,
184 "SEV-ES migrations require UPDATE_VMSA. ret %d, errno: %d",
188 kvm_vm_free(sev_es_vm
);
189 kvm_vm_free(sev_es_vm_no_vmsa
);
191 kvm_vm_free(vm_no_vcpu
);
192 kvm_vm_free(vm_no_sev
);
195 static int __sev_mirror_create(struct kvm_vm
*dst
, struct kvm_vm
*src
)
197 return __vm_enable_cap(dst
, KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
, src
->fd
);
201 static void sev_mirror_create(struct kvm_vm
*dst
, struct kvm_vm
*src
)
205 ret
= __sev_mirror_create(dst
, src
);
206 TEST_ASSERT(!ret
, "Copying context failed, ret: %d, errno: %d", ret
, errno
);
209 static void verify_mirror_allowed_cmds(struct kvm_vm
*vm
)
211 struct kvm_sev_guest_status status
;
214 for (cmd_id
= KVM_SEV_INIT
; cmd_id
< KVM_SEV_NR_MAX
; ++cmd_id
) {
218 * These commands are allowed for mirror VMs, all others are
222 case KVM_SEV_LAUNCH_UPDATE_VMSA
:
223 case KVM_SEV_GUEST_STATUS
:
224 case KVM_SEV_DBG_DECRYPT
:
225 case KVM_SEV_DBG_ENCRYPT
:
232 * These commands should be disallowed before the data
233 * parameter is examined so NULL is OK here.
235 ret
= __vm_sev_ioctl(vm
, cmd_id
, NULL
);
237 ret
== -1 && errno
== EINVAL
,
238 "Should not be able call command: %d. ret: %d, errno: %d",
242 vm_sev_ioctl(vm
, KVM_SEV_GUEST_STATUS
, &status
);
245 static void test_sev_mirror(bool es
)
247 struct kvm_vm
*src_vm
, *dst_vm
;
250 src_vm
= sev_vm_create(es
);
251 dst_vm
= aux_vm_create(false);
253 sev_mirror_create(dst_vm
, src_vm
);
255 /* Check that we can complete creation of the mirror VM. */
256 for (i
= 0; i
< NR_MIGRATE_TEST_VCPUS
; ++i
)
257 __vm_vcpu_add(dst_vm
, i
);
260 vm_sev_ioctl(dst_vm
, KVM_SEV_LAUNCH_UPDATE_VMSA
, NULL
);
262 verify_mirror_allowed_cmds(dst_vm
);
268 static void test_sev_mirror_parameters(void)
270 struct kvm_vm
*sev_vm
, *sev_es_vm
, *vm_no_vcpu
, *vm_with_vcpu
;
273 sev_vm
= sev_vm_create(/* es= */ false);
274 vm_with_vcpu
= aux_vm_create(true);
275 vm_no_vcpu
= aux_vm_create(false);
277 ret
= __sev_mirror_create(sev_vm
, sev_vm
);
279 ret
== -1 && errno
== EINVAL
,
280 "Should not be able copy context to self. ret: %d, errno: %d",
283 ret
= __sev_mirror_create(vm_no_vcpu
, vm_with_vcpu
);
284 TEST_ASSERT(ret
== -1 && errno
== EINVAL
,
285 "Copy context requires SEV enabled. ret %d, errno: %d", ret
,
288 ret
= __sev_mirror_create(vm_with_vcpu
, sev_vm
);
290 ret
== -1 && errno
== EINVAL
,
291 "SEV copy context requires no vCPUS on the destination. ret: %d, errno: %d",
297 sev_es_vm
= sev_vm_create(/* es= */ true);
298 ret
= __sev_mirror_create(sev_vm
, sev_es_vm
);
300 ret
== -1 && errno
== EINVAL
,
301 "Should not be able copy context to SEV enabled VM. ret: %d, errno: %d",
304 ret
= __sev_mirror_create(sev_es_vm
, sev_vm
);
306 ret
== -1 && errno
== EINVAL
,
307 "Should not be able copy context to SEV-ES enabled VM. ret: %d, errno: %d",
310 kvm_vm_free(sev_es_vm
);
314 kvm_vm_free(vm_with_vcpu
);
315 kvm_vm_free(vm_no_vcpu
);
318 static void test_sev_move_copy(void)
320 struct kvm_vm
*dst_vm
, *dst2_vm
, *dst3_vm
, *sev_vm
, *mirror_vm
,
321 *dst_mirror_vm
, *dst2_mirror_vm
, *dst3_mirror_vm
;
323 sev_vm
= sev_vm_create(/* es= */ false);
324 dst_vm
= aux_vm_create(true);
325 dst2_vm
= aux_vm_create(true);
326 dst3_vm
= aux_vm_create(true);
327 mirror_vm
= aux_vm_create(false);
328 dst_mirror_vm
= aux_vm_create(false);
329 dst2_mirror_vm
= aux_vm_create(false);
330 dst3_mirror_vm
= aux_vm_create(false);
332 sev_mirror_create(mirror_vm
, sev_vm
);
334 sev_migrate_from(dst_mirror_vm
, mirror_vm
);
335 sev_migrate_from(dst_vm
, sev_vm
);
337 sev_migrate_from(dst2_vm
, dst_vm
);
338 sev_migrate_from(dst2_mirror_vm
, dst_mirror_vm
);
340 sev_migrate_from(dst3_mirror_vm
, dst2_mirror_vm
);
341 sev_migrate_from(dst3_vm
, dst2_vm
);
345 kvm_vm_free(dst2_vm
);
346 kvm_vm_free(dst3_vm
);
347 kvm_vm_free(mirror_vm
);
348 kvm_vm_free(dst_mirror_vm
);
349 kvm_vm_free(dst2_mirror_vm
);
350 kvm_vm_free(dst3_mirror_vm
);
353 * Run similar test be destroy mirrors before mirrored VMs to ensure
354 * destruction is done safely.
356 sev_vm
= sev_vm_create(/* es= */ false);
357 dst_vm
= aux_vm_create(true);
358 mirror_vm
= aux_vm_create(false);
359 dst_mirror_vm
= aux_vm_create(false);
361 sev_mirror_create(mirror_vm
, sev_vm
);
363 sev_migrate_from(dst_mirror_vm
, mirror_vm
);
364 sev_migrate_from(dst_vm
, sev_vm
);
366 kvm_vm_free(mirror_vm
);
367 kvm_vm_free(dst_mirror_vm
);
372 int main(int argc
, char *argv
[])
374 TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
));
375 TEST_REQUIRE(kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
));
377 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_SEV
));
379 have_sev_es
= kvm_cpu_has(X86_FEATURE_SEV_ES
);
381 if (kvm_has_cap(KVM_CAP_VM_MOVE_ENC_CONTEXT_FROM
)) {
382 test_sev_migrate_from(/* es= */ false);
384 test_sev_migrate_from(/* es= */ true);
385 test_sev_migrate_locking();
386 test_sev_migrate_parameters();
387 if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
))
388 test_sev_move_copy();
390 if (kvm_has_cap(KVM_CAP_VM_COPY_ENC_CONTEXT_FROM
)) {
391 test_sev_mirror(/* es= */ false);
393 test_sev_mirror(/* es= */ true);
394 test_sev_mirror_parameters();