1 // SPDX-License-Identifier: GPL-2.0
3 * Hyper-V HvFlushVirtualAddress{List,Space}{,Ex} tests
5 * Copyright (C) 2022, Red Hat, Inc.
8 #include <asm/barrier.h>
13 #include "processor.h"
15 #include "test_util.h"
18 #define WORKER_VCPU_ID_1 2
19 #define WORKER_VCPU_ID_2 65
30 enum HV_GENERIC_SET_FORMAT
{
31 HV_GENERIC_SET_SPARSE_4K
,
35 #define HV_FLUSH_ALL_PROCESSORS BIT(0)
36 #define HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES BIT(1)
37 #define HV_FLUSH_NON_GLOBAL_MAPPINGS_ONLY BIT(2)
38 #define HV_FLUSH_USE_EXTENDED_RANGE_FORMAT BIT(3)
40 /* HvFlushVirtualAddressSpace, HvFlushVirtualAddressList hypercalls */
48 /* HvFlushVirtualAddressSpaceEx, HvFlushVirtualAddressListEx hypercalls */
49 struct hv_tlb_flush_ex
{
52 struct hv_vpset hv_vp_set
;
57 * Pass the following info to 'workers' and 'sender'
58 * - Hypercall page's GVA
59 * - Hypercall page's GPA
61 * - GVAs of the test pages' PTEs
66 vm_vaddr_t test_pages
;
67 vm_vaddr_t test_pages_pte
[NTEST_PAGES
];
70 /* 'Worker' vCPU code checking the contents of the test page */
71 static void worker_guest_code(vm_vaddr_t test_data
)
73 struct test_data
*data
= (struct test_data
*)test_data
;
74 u32 vcpu_id
= rdmsr(HV_X64_MSR_VP_INDEX
);
75 void *exp_page
= (void *)data
->test_pages
+ PAGE_SIZE
* NTEST_PAGES
;
76 u64
*this_cpu
= (u64
*)(exp_page
+ vcpu_id
* sizeof(u64
));
80 wrmsr(HV_X64_MSR_GUEST_OS_ID
, HYPERV_LINUX_OS_ID
);
85 expected
= READ_ONCE(*this_cpu
);
88 * Make sure the value in the test page is read after reading
89 * the expectation for the first time. Pairs with wmb() in
94 val
= READ_ONCE(*(u64
*)data
->test_pages
);
97 * Make sure the value in the test page is read after before
98 * reading the expectation for the second time. Pairs with wmb()
104 * '0' indicates the sender is between iterations, wait until
105 * the sender is ready for this vCPU to start checking again.
111 * Re-read the per-vCPU byte to ensure the sender didn't move
112 * onto a new iteration.
114 if (expected
!= READ_ONCE(*this_cpu
))
117 GUEST_ASSERT(val
== expected
);
122 * Write per-CPU info indicating what each 'worker' CPU is supposed to see in
123 * test page. '0' means don't check.
125 static void set_expected_val(void *addr
, u64 val
, int vcpu_id
)
127 void *exp_page
= addr
+ PAGE_SIZE
* NTEST_PAGES
;
129 *(u64
*)(exp_page
+ vcpu_id
* sizeof(u64
)) = val
;
133 * Update PTEs swapping two test pages.
134 * TODO: use swap()/xchg() when these are provided.
136 static void swap_two_test_pages(vm_paddr_t pte_gva1
, vm_paddr_t pte_gva2
)
138 uint64_t tmp
= *(uint64_t *)pte_gva1
;
140 *(uint64_t *)pte_gva1
= *(uint64_t *)pte_gva2
;
141 *(uint64_t *)pte_gva2
= tmp
;
145 * TODO: replace the silly NOP loop with a proper udelay() implementation.
147 static inline void do_delay(void)
151 for (i
= 0; i
< 1000000; i
++)
156 * Prepare to test: 'disable' workers by setting the expectation to '0',
157 * clear hypercall input page and then swap two test pages.
159 static inline void prepare_to_test(struct test_data
*data
)
161 /* Clear hypercall input page */
162 memset((void *)data
->hcall_gva
, 0, PAGE_SIZE
);
164 /* 'Disable' workers */
165 set_expected_val((void *)data
->test_pages
, 0x0, WORKER_VCPU_ID_1
);
166 set_expected_val((void *)data
->test_pages
, 0x0, WORKER_VCPU_ID_2
);
168 /* Make sure workers are 'disabled' before we swap PTEs. */
171 /* Make sure workers have enough time to notice */
174 /* Swap test page mappings */
175 swap_two_test_pages(data
->test_pages_pte
[0], data
->test_pages_pte
[1]);
179 * Finalize the test: check hypercall resule set the expected val for
180 * 'worker' CPUs and give them some time to test.
182 static inline void post_test(struct test_data
*data
, u64 exp1
, u64 exp2
)
184 /* Make sure we change the expectation after swapping PTEs */
187 /* Set the expectation for workers, '0' means don't test */
188 set_expected_val((void *)data
->test_pages
, exp1
, WORKER_VCPU_ID_1
);
189 set_expected_val((void *)data
->test_pages
, exp2
, WORKER_VCPU_ID_2
);
191 /* Make sure workers have enough time to test */
195 #define TESTVAL1 0x0101010101010101
196 #define TESTVAL2 0x0202020202020202
198 /* Main vCPU doing the test */
199 static void sender_guest_code(vm_vaddr_t test_data
)
201 struct test_data
*data
= (struct test_data
*)test_data
;
202 struct hv_tlb_flush
*flush
= (struct hv_tlb_flush
*)data
->hcall_gva
;
203 struct hv_tlb_flush_ex
*flush_ex
= (struct hv_tlb_flush_ex
*)data
->hcall_gva
;
204 vm_paddr_t hcall_gpa
= data
->hcall_gpa
;
207 wrmsr(HV_X64_MSR_GUEST_OS_ID
, HYPERV_LINUX_OS_ID
);
208 wrmsr(HV_X64_MSR_HYPERCALL
, data
->hcall_gpa
);
210 /* "Slow" hypercalls */
214 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
215 for (i
= 0; i
< NTRY
; i
++) {
216 prepare_to_test(data
);
217 flush
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
218 flush
->processor_mask
= BIT(WORKER_VCPU_ID_1
);
219 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
, hcall_gpa
,
220 hcall_gpa
+ PAGE_SIZE
);
221 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
, 0x0);
226 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
227 for (i
= 0; i
< NTRY
; i
++) {
228 prepare_to_test(data
);
229 flush
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
230 flush
->processor_mask
= BIT(WORKER_VCPU_ID_1
);
231 flush
->gva_list
[0] = (u64
)data
->test_pages
;
232 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST
|
233 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
234 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
235 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
, 0x0);
240 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
241 for (i
= 0; i
< NTRY
; i
++) {
242 prepare_to_test(data
);
243 flush
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
|
244 HV_FLUSH_ALL_PROCESSORS
;
245 flush
->processor_mask
= 0;
246 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
, hcall_gpa
,
247 hcall_gpa
+ PAGE_SIZE
);
248 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
, i
% 2 ? TESTVAL1
: TESTVAL2
);
253 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
254 for (i
= 0; i
< NTRY
; i
++) {
255 prepare_to_test(data
);
256 flush
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
|
257 HV_FLUSH_ALL_PROCESSORS
;
258 flush
->gva_list
[0] = (u64
)data
->test_pages
;
259 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST
|
260 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
261 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
262 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
263 i
% 2 ? TESTVAL1
: TESTVAL2
);
268 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
269 for (i
= 0; i
< NTRY
; i
++) {
270 prepare_to_test(data
);
271 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
272 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
273 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_2
/ 64);
274 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
275 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
|
276 (1 << HV_HYPERCALL_VARHEAD_OFFSET
),
277 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
278 post_test(data
, 0x0, i
% 2 ? TESTVAL1
: TESTVAL2
);
283 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
284 for (i
= 0; i
< NTRY
; i
++) {
285 prepare_to_test(data
);
286 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
287 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
288 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_2
/ 64);
289 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
290 /* bank_contents and gva_list occupy the same space, thus [1] */
291 flush_ex
->gva_list
[1] = (u64
)data
->test_pages
;
292 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
|
293 (1 << HV_HYPERCALL_VARHEAD_OFFSET
) |
294 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
295 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
296 post_test(data
, 0x0, i
% 2 ? TESTVAL1
: TESTVAL2
);
301 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
302 for (i
= 0; i
< NTRY
; i
++) {
303 prepare_to_test(data
);
304 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
305 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
306 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_2
/ 64) |
307 BIT_ULL(WORKER_VCPU_ID_1
/ 64);
308 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_1
% 64);
309 flush_ex
->hv_vp_set
.bank_contents
[1] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
310 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
|
311 (2 << HV_HYPERCALL_VARHEAD_OFFSET
),
312 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
313 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
314 i
% 2 ? TESTVAL1
: TESTVAL2
);
319 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
320 for (i
= 0; i
< NTRY
; i
++) {
321 prepare_to_test(data
);
322 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
323 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
324 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_1
/ 64) |
325 BIT_ULL(WORKER_VCPU_ID_2
/ 64);
326 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_1
% 64);
327 flush_ex
->hv_vp_set
.bank_contents
[1] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
328 /* bank_contents and gva_list occupy the same space, thus [2] */
329 flush_ex
->gva_list
[2] = (u64
)data
->test_pages
;
330 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
|
331 (2 << HV_HYPERCALL_VARHEAD_OFFSET
) |
332 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
333 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
334 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
335 i
% 2 ? TESTVAL1
: TESTVAL2
);
340 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
341 for (i
= 0; i
< NTRY
; i
++) {
342 prepare_to_test(data
);
343 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
344 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_ALL
;
345 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
,
346 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
347 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
348 i
% 2 ? TESTVAL1
: TESTVAL2
);
353 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
354 for (i
= 0; i
< NTRY
; i
++) {
355 prepare_to_test(data
);
356 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
357 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_ALL
;
358 flush_ex
->gva_list
[0] = (u64
)data
->test_pages
;
359 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
|
360 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
361 hcall_gpa
, hcall_gpa
+ PAGE_SIZE
);
362 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
363 i
% 2 ? TESTVAL1
: TESTVAL2
);
366 /* "Fast" hypercalls */
370 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for WORKER_VCPU_ID_1 */
371 for (i
= 0; i
< NTRY
; i
++) {
372 prepare_to_test(data
);
373 flush
->processor_mask
= BIT(WORKER_VCPU_ID_1
);
374 hyperv_write_xmm_input(&flush
->processor_mask
, 1);
375 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
|
376 HV_HYPERCALL_FAST_BIT
, 0x0,
377 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
378 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
, 0x0);
383 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for WORKER_VCPU_ID_1 */
384 for (i
= 0; i
< NTRY
; i
++) {
385 prepare_to_test(data
);
386 flush
->processor_mask
= BIT(WORKER_VCPU_ID_1
);
387 flush
->gva_list
[0] = (u64
)data
->test_pages
;
388 hyperv_write_xmm_input(&flush
->processor_mask
, 1);
389 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST
|
390 HV_HYPERCALL_FAST_BIT
|
391 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
392 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
393 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
, 0x0);
398 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE for HV_FLUSH_ALL_PROCESSORS */
399 for (i
= 0; i
< NTRY
; i
++) {
400 prepare_to_test(data
);
401 hyperv_write_xmm_input(&flush
->processor_mask
, 1);
402 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE
|
403 HV_HYPERCALL_FAST_BIT
, 0x0,
404 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
|
405 HV_FLUSH_ALL_PROCESSORS
);
406 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
407 i
% 2 ? TESTVAL1
: TESTVAL2
);
412 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST for HV_FLUSH_ALL_PROCESSORS */
413 for (i
= 0; i
< NTRY
; i
++) {
414 prepare_to_test(data
);
415 flush
->gva_list
[0] = (u64
)data
->test_pages
;
416 hyperv_write_xmm_input(&flush
->processor_mask
, 1);
417 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST
|
418 HV_HYPERCALL_FAST_BIT
|
419 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
), 0x0,
420 HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
|
421 HV_FLUSH_ALL_PROCESSORS
);
422 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
423 i
% 2 ? TESTVAL1
: TESTVAL2
);
428 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for WORKER_VCPU_ID_2 */
429 for (i
= 0; i
< NTRY
; i
++) {
430 prepare_to_test(data
);
431 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
432 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_2
/ 64);
433 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
434 hyperv_write_xmm_input(&flush_ex
->hv_vp_set
, 2);
435 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
|
436 HV_HYPERCALL_FAST_BIT
|
437 (1 << HV_HYPERCALL_VARHEAD_OFFSET
),
438 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
439 post_test(data
, 0x0, i
% 2 ? TESTVAL1
: TESTVAL2
);
444 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for WORKER_VCPU_ID_2 */
445 for (i
= 0; i
< NTRY
; i
++) {
446 prepare_to_test(data
);
447 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
448 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_2
/ 64);
449 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
450 /* bank_contents and gva_list occupy the same space, thus [1] */
451 flush_ex
->gva_list
[1] = (u64
)data
->test_pages
;
452 hyperv_write_xmm_input(&flush_ex
->hv_vp_set
, 2);
453 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
|
454 HV_HYPERCALL_FAST_BIT
|
455 (1 << HV_HYPERCALL_VARHEAD_OFFSET
) |
456 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
457 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
458 post_test(data
, 0x0, i
% 2 ? TESTVAL1
: TESTVAL2
);
463 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for both vCPUs */
464 for (i
= 0; i
< NTRY
; i
++) {
465 prepare_to_test(data
);
466 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
467 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_2
/ 64) |
468 BIT_ULL(WORKER_VCPU_ID_1
/ 64);
469 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_1
% 64);
470 flush_ex
->hv_vp_set
.bank_contents
[1] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
471 hyperv_write_xmm_input(&flush_ex
->hv_vp_set
, 2);
472 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
|
473 HV_HYPERCALL_FAST_BIT
|
474 (2 << HV_HYPERCALL_VARHEAD_OFFSET
),
475 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
476 post_test(data
, i
% 2 ? TESTVAL1
:
477 TESTVAL2
, i
% 2 ? TESTVAL1
: TESTVAL2
);
482 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for both vCPUs */
483 for (i
= 0; i
< NTRY
; i
++) {
484 prepare_to_test(data
);
485 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_SPARSE_4K
;
486 flush_ex
->hv_vp_set
.valid_bank_mask
= BIT_ULL(WORKER_VCPU_ID_1
/ 64) |
487 BIT_ULL(WORKER_VCPU_ID_2
/ 64);
488 flush_ex
->hv_vp_set
.bank_contents
[0] = BIT_ULL(WORKER_VCPU_ID_1
% 64);
489 flush_ex
->hv_vp_set
.bank_contents
[1] = BIT_ULL(WORKER_VCPU_ID_2
% 64);
490 /* bank_contents and gva_list occupy the same space, thus [2] */
491 flush_ex
->gva_list
[2] = (u64
)data
->test_pages
;
492 hyperv_write_xmm_input(&flush_ex
->hv_vp_set
, 3);
493 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
|
494 HV_HYPERCALL_FAST_BIT
|
495 (2 << HV_HYPERCALL_VARHEAD_OFFSET
) |
496 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
497 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
498 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
499 i
% 2 ? TESTVAL1
: TESTVAL2
);
504 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX for HV_GENERIC_SET_ALL */
505 for (i
= 0; i
< NTRY
; i
++) {
506 prepare_to_test(data
);
507 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
508 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_ALL
;
509 hyperv_write_xmm_input(&flush_ex
->hv_vp_set
, 2);
510 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_SPACE_EX
|
511 HV_HYPERCALL_FAST_BIT
,
512 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
513 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
514 i
% 2 ? TESTVAL1
: TESTVAL2
);
519 /* HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX for HV_GENERIC_SET_ALL */
520 for (i
= 0; i
< NTRY
; i
++) {
521 prepare_to_test(data
);
522 flush_ex
->flags
= HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
;
523 flush_ex
->hv_vp_set
.format
= HV_GENERIC_SET_ALL
;
524 flush_ex
->gva_list
[0] = (u64
)data
->test_pages
;
525 hyperv_write_xmm_input(&flush_ex
->hv_vp_set
, 2);
526 hyperv_hypercall(HVCALL_FLUSH_VIRTUAL_ADDRESS_LIST_EX
|
527 HV_HYPERCALL_FAST_BIT
|
528 (1UL << HV_HYPERCALL_REP_COMP_OFFSET
),
529 0x0, HV_FLUSH_ALL_VIRTUAL_ADDRESS_SPACES
);
530 post_test(data
, i
% 2 ? TESTVAL1
: TESTVAL2
,
531 i
% 2 ? TESTVAL1
: TESTVAL2
);
537 static void *vcpu_thread(void *arg
)
539 struct kvm_vcpu
*vcpu
= (struct kvm_vcpu
*)arg
;
544 r
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, &old
);
545 TEST_ASSERT(!r
, "pthread_setcanceltype failed on vcpu_id=%u with errno=%d",
549 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
551 switch (get_ucall(vcpu
, &uc
)) {
553 REPORT_GUEST_ASSERT(uc
);
556 TEST_FAIL("Unexpected ucall %lu, vCPU %d", uc
.cmd
, vcpu
->id
);
562 static void cancel_join_vcpu_thread(pthread_t thread
, struct kvm_vcpu
*vcpu
)
567 r
= pthread_cancel(thread
);
568 TEST_ASSERT(!r
, "pthread_cancel on vcpu_id=%d failed with errno=%d",
571 r
= pthread_join(thread
, &retval
);
572 TEST_ASSERT(!r
, "pthread_join on vcpu_id=%d failed with errno=%d",
574 TEST_ASSERT(retval
== PTHREAD_CANCELED
,
575 "expected retval=%p, got %p", PTHREAD_CANCELED
,
579 int main(int argc
, char *argv
[])
582 struct kvm_vcpu
*vcpu
[3];
583 pthread_t threads
[2];
584 vm_vaddr_t test_data_page
, gva
;
587 struct test_data
*data
;
591 TEST_REQUIRE(kvm_has_cap(KVM_CAP_HYPERV_TLBFLUSH
));
593 vm
= vm_create_with_one_vcpu(&vcpu
[0], sender_guest_code
);
596 test_data_page
= vm_vaddr_alloc_page(vm
);
597 data
= (struct test_data
*)addr_gva2hva(vm
, test_data_page
);
599 /* Hypercall input/output */
600 data
->hcall_gva
= vm_vaddr_alloc_pages(vm
, 2);
601 data
->hcall_gpa
= addr_gva2gpa(vm
, data
->hcall_gva
);
602 memset(addr_gva2hva(vm
, data
->hcall_gva
), 0x0, 2 * PAGE_SIZE
);
605 * Test pages: the first one is filled with '0x01's, the second with '0x02's
606 * and the test will swap their mappings. The third page keeps the indication
607 * about the current state of mappings.
609 data
->test_pages
= vm_vaddr_alloc_pages(vm
, NTEST_PAGES
+ 1);
610 for (i
= 0; i
< NTEST_PAGES
; i
++)
611 memset(addr_gva2hva(vm
, data
->test_pages
+ PAGE_SIZE
* i
),
612 (u8
)(i
+ 1), PAGE_SIZE
);
613 set_expected_val(addr_gva2hva(vm
, data
->test_pages
), 0x0, WORKER_VCPU_ID_1
);
614 set_expected_val(addr_gva2hva(vm
, data
->test_pages
), 0x0, WORKER_VCPU_ID_2
);
617 * Get PTE pointers for test pages and map them inside the guest.
618 * Use separate page for each PTE for simplicity.
620 gva
= vm_vaddr_unused_gap(vm
, NTEST_PAGES
* PAGE_SIZE
, KVM_UTIL_MIN_VADDR
);
621 for (i
= 0; i
< NTEST_PAGES
; i
++) {
622 pte
= vm_get_page_table_entry(vm
, data
->test_pages
+ i
* PAGE_SIZE
);
623 gpa
= addr_hva2gpa(vm
, pte
);
624 __virt_pg_map(vm
, gva
+ PAGE_SIZE
* i
, gpa
& PAGE_MASK
, PG_LEVEL_4K
);
625 data
->test_pages_pte
[i
] = gva
+ (gpa
& ~PAGE_MASK
);
629 * Sender vCPU which performs the test: swaps test pages, sets expectation
630 * for 'workers' and issues TLB flush hypercalls.
632 vcpu_args_set(vcpu
[0], 1, test_data_page
);
633 vcpu_set_hv_cpuid(vcpu
[0]);
635 /* Create worker vCPUs which check the contents of the test pages */
636 vcpu
[1] = vm_vcpu_add(vm
, WORKER_VCPU_ID_1
, worker_guest_code
);
637 vcpu_args_set(vcpu
[1], 1, test_data_page
);
638 vcpu_set_msr(vcpu
[1], HV_X64_MSR_VP_INDEX
, WORKER_VCPU_ID_1
);
639 vcpu_set_hv_cpuid(vcpu
[1]);
641 vcpu
[2] = vm_vcpu_add(vm
, WORKER_VCPU_ID_2
, worker_guest_code
);
642 vcpu_args_set(vcpu
[2], 1, test_data_page
);
643 vcpu_set_msr(vcpu
[2], HV_X64_MSR_VP_INDEX
, WORKER_VCPU_ID_2
);
644 vcpu_set_hv_cpuid(vcpu
[2]);
646 r
= pthread_create(&threads
[0], NULL
, vcpu_thread
, vcpu
[1]);
647 TEST_ASSERT(!r
, "pthread_create() failed");
649 r
= pthread_create(&threads
[1], NULL
, vcpu_thread
, vcpu
[2]);
650 TEST_ASSERT(!r
, "pthread_create() failed");
654 TEST_ASSERT_KVM_EXIT_REASON(vcpu
[0], KVM_EXIT_IO
);
656 switch (get_ucall(vcpu
[0], &uc
)) {
658 TEST_ASSERT(uc
.args
[1] == stage
,
659 "Unexpected stage: %ld (%d expected)",
663 REPORT_GUEST_ASSERT(uc
);
668 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
675 cancel_join_vcpu_thread(threads
[0], vcpu
[1]);
676 cancel_join_vcpu_thread(threads
[1], vcpu
[2]);