1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2020, Google LLC.
7 #include "perf_test_util.h"
10 struct perf_test_args perf_test_args
;
12 uint64_t guest_test_phys_mem
;
15 * Guest virtual memory offset of the testing memory slot.
16 * Must not conflict with identity mapped test code.
18 static uint64_t guest_test_virt_mem
= DEFAULT_GUEST_TEST_MEM
;
21 * Continuously write to the first 8 bytes of each page in the
24 static void guest_code(uint32_t vcpu_id
)
26 struct perf_test_vcpu_args
*vcpu_args
= &perf_test_args
.vcpu_args
[vcpu_id
];
31 /* Make sure vCPU args data structure is not corrupt. */
32 GUEST_ASSERT(vcpu_args
->vcpu_id
== vcpu_id
);
35 pages
= vcpu_args
->pages
;
38 for (i
= 0; i
< pages
; i
++) {
39 uint64_t addr
= gva
+ (i
* perf_test_args
.guest_page_size
);
41 if (i
% perf_test_args
.wr_fract
== 0)
42 *(uint64_t *)addr
= 0x0123456789ABCDEF;
44 READ_ONCE(*(uint64_t *)addr
);
51 struct kvm_vm
*perf_test_create_vm(enum vm_guest_mode mode
, int vcpus
,
52 uint64_t vcpu_memory_bytes
)
55 uint64_t guest_num_pages
;
57 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode
));
59 perf_test_args
.host_page_size
= getpagesize();
60 perf_test_args
.guest_page_size
= vm_guest_mode_params
[mode
].page_size
;
62 guest_num_pages
= vm_adjust_num_guest_pages(mode
,
63 (vcpus
* vcpu_memory_bytes
) / perf_test_args
.guest_page_size
);
65 TEST_ASSERT(vcpu_memory_bytes
% perf_test_args
.host_page_size
== 0,
66 "Guest memory size is not host page size aligned.");
67 TEST_ASSERT(vcpu_memory_bytes
% perf_test_args
.guest_page_size
== 0,
68 "Guest memory size is not guest page size aligned.");
70 vm
= vm_create_with_vcpus(mode
, vcpus
,
71 (vcpus
* vcpu_memory_bytes
) / perf_test_args
.guest_page_size
,
74 perf_test_args
.vm
= vm
;
77 * If there should be more memory in the guest test region than there
78 * can be pages in the guest, it will definitely cause problems.
80 TEST_ASSERT(guest_num_pages
< vm_get_max_gfn(vm
),
81 "Requested more guest memory than address space allows.\n"
82 " guest pages: %lx max gfn: %x vcpus: %d wss: %lx]\n",
83 guest_num_pages
, vm_get_max_gfn(vm
), vcpus
,
86 guest_test_phys_mem
= (vm_get_max_gfn(vm
) - guest_num_pages
) *
87 perf_test_args
.guest_page_size
;
88 guest_test_phys_mem
&= ~(perf_test_args
.host_page_size
- 1);
90 /* Align to 1M (segment size) */
91 guest_test_phys_mem
&= ~((1 << 20) - 1);
93 pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem
);
95 /* Add an extra memory slot for testing */
96 vm_userspace_mem_region_add(vm
, VM_MEM_SRC_ANONYMOUS
,
98 PERF_TEST_MEM_SLOT_INDEX
,
101 /* Do mapping for the demand paging memory slot */
102 virt_map(vm
, guest_test_virt_mem
, guest_test_phys_mem
, guest_num_pages
, 0);
104 ucall_init(vm
, NULL
);
109 void perf_test_destroy_vm(struct kvm_vm
*vm
)
115 void perf_test_setup_vcpus(struct kvm_vm
*vm
, int vcpus
, uint64_t vcpu_memory_bytes
)
118 struct perf_test_vcpu_args
*vcpu_args
;
121 for (vcpu_id
= 0; vcpu_id
< vcpus
; vcpu_id
++) {
122 vcpu_args
= &perf_test_args
.vcpu_args
[vcpu_id
];
124 vcpu_args
->vcpu_id
= vcpu_id
;
125 vcpu_args
->gva
= guest_test_virt_mem
+
126 (vcpu_id
* vcpu_memory_bytes
);
127 vcpu_args
->pages
= vcpu_memory_bytes
/
128 perf_test_args
.guest_page_size
;
130 vcpu_gpa
= guest_test_phys_mem
+ (vcpu_id
* vcpu_memory_bytes
);
131 pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
132 vcpu_id
, vcpu_gpa
, vcpu_gpa
+ vcpu_memory_bytes
);