1 // SPDX-License-Identifier: GPL-2.0
3 * KVM dirty page logging test
5 * Copyright (C) 2018, Red Hat, Inc.
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
12 #include "test_util.h"
14 #include "processor.h"
17 /* The memory slot index to track dirty pages */
18 #define TEST_MEM_SLOT_INDEX 1
19 #define TEST_MEM_PAGES 3
21 /* L1 guest test virtual memory offset */
22 #define GUEST_TEST_MEM 0xc0000000
24 /* L2 guest test virtual memory offset */
25 #define NESTED_TEST_MEM1 0xc0001000
26 #define NESTED_TEST_MEM2 0xc0002000
28 static void l2_guest_code(u64
*a
, u64
*b
)
41 /* Exit to L1 and never come back. */
45 static void l2_guest_code_ept_enabled(void)
47 l2_guest_code((u64
*)NESTED_TEST_MEM1
, (u64
*)NESTED_TEST_MEM2
);
50 static void l2_guest_code_ept_disabled(void)
52 /* Access the same L1 GPAs as l2_guest_code_ept_enabled() */
53 l2_guest_code((u64
*)GUEST_TEST_MEM
, (u64
*)GUEST_TEST_MEM
);
56 void l1_guest_code(struct vmx_pages
*vmx
)
58 #define L2_GUEST_STACK_SIZE 64
59 unsigned long l2_guest_stack
[L2_GUEST_STACK_SIZE
];
62 GUEST_ASSERT(vmx
->vmcs_gpa
);
63 GUEST_ASSERT(prepare_for_vmx_operation(vmx
));
64 GUEST_ASSERT(load_vmcs(vmx
));
67 l2_rip
= l2_guest_code_ept_enabled
;
69 l2_rip
= l2_guest_code_ept_disabled
;
71 prepare_vmcs(vmx
, l2_rip
, &l2_guest_stack
[L2_GUEST_STACK_SIZE
]);
74 GUEST_ASSERT(!vmlaunch());
76 GUEST_ASSERT(vmreadz(VM_EXIT_REASON
) == EXIT_REASON_VMCALL
);
80 static void test_vmx_dirty_log(bool enable_ept
)
82 vm_vaddr_t vmx_pages_gva
= 0;
83 struct vmx_pages
*vmx
;
85 uint64_t *host_test_mem
;
87 struct kvm_vcpu
*vcpu
;
92 pr_info("Nested EPT: %s\n", enable_ept
? "enabled" : "disabled");
95 vm
= vm_create_with_one_vcpu(&vcpu
, l1_guest_code
);
96 vmx
= vcpu_alloc_vmx(vm
, &vmx_pages_gva
);
97 vcpu_args_set(vcpu
, 1, vmx_pages_gva
);
99 /* Add an extra memory slot for testing dirty logging */
100 vm_userspace_mem_region_add(vm
, VM_MEM_SRC_ANONYMOUS
,
104 KVM_MEM_LOG_DIRTY_PAGES
);
107 * Add an identity map for GVA range [0xc0000000, 0xc0002000). This
108 * affects both L1 and L2. However...
110 virt_map(vm
, GUEST_TEST_MEM
, GUEST_TEST_MEM
, TEST_MEM_PAGES
);
113 * ... pages in the L2 GPA range [0xc0001000, 0xc0003000) will map to
116 * Note that prepare_eptp should be called only L1's GPA map is done,
117 * meaning after the last call to virt_map.
119 * When EPT is disabled, the L2 guest code will still access the same L1
120 * GPAs as the EPT enabled case.
123 prepare_eptp(vmx
, vm
, 0);
124 nested_map_memslot(vmx
, vm
, 0);
125 nested_map(vmx
, vm
, NESTED_TEST_MEM1
, GUEST_TEST_MEM
, 4096);
126 nested_map(vmx
, vm
, NESTED_TEST_MEM2
, GUEST_TEST_MEM
, 4096);
129 bmap
= bitmap_zalloc(TEST_MEM_PAGES
);
130 host_test_mem
= addr_gpa2hva(vm
, GUEST_TEST_MEM
);
133 memset(host_test_mem
, 0xaa, TEST_MEM_PAGES
* 4096);
135 TEST_ASSERT_KVM_EXIT_REASON(vcpu
, KVM_EXIT_IO
);
137 switch (get_ucall(vcpu
, &uc
)) {
139 REPORT_GUEST_ASSERT(uc
);
143 * The nested guest wrote at offset 0x1000 in the memslot, but the
144 * dirty bitmap must be filled in according to L1 GPA, not L2.
146 kvm_vm_get_dirty_log(vm
, TEST_MEM_SLOT_INDEX
, bmap
);
148 TEST_ASSERT(test_bit(0, bmap
), "Page 0 incorrectly reported clean");
149 TEST_ASSERT(host_test_mem
[0] == 1, "Page 0 not written by guest");
151 TEST_ASSERT(!test_bit(0, bmap
), "Page 0 incorrectly reported dirty");
152 TEST_ASSERT(host_test_mem
[0] == 0xaaaaaaaaaaaaaaaaULL
, "Page 0 written by guest");
155 TEST_ASSERT(!test_bit(1, bmap
), "Page 1 incorrectly reported dirty");
156 TEST_ASSERT(host_test_mem
[4096 / 8] == 0xaaaaaaaaaaaaaaaaULL
, "Page 1 written by guest");
157 TEST_ASSERT(!test_bit(2, bmap
), "Page 2 incorrectly reported dirty");
158 TEST_ASSERT(host_test_mem
[8192 / 8] == 0xaaaaaaaaaaaaaaaaULL
, "Page 2 written by guest");
164 TEST_FAIL("Unknown ucall %lu", uc
.cmd
);
169 int main(int argc
, char *argv
[])
171 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX
));
173 test_vmx_dirty_log(/*enable_ept=*/false);
175 if (kvm_cpu_has_ept())
176 test_vmx_dirty_log(/*enable_ept=*/true);