Linux 4.19.133
[linux/fpc-iii.git] / tools / testing / selftests / kvm / lib / vmx.c
blobb987c3c970ebbfb21636379fb9f7ad1e6b50de89
1 /*
2 * tools/testing/selftests/kvm/lib/x86.c
4 * Copyright (C) 2018, Google LLC.
6 * This work is licensed under the terms of the GNU GPL, version 2.
7 */
9 #define _GNU_SOURCE /* for program_invocation_name */
11 #include "test_util.h"
12 #include "kvm_util.h"
13 #include "x86.h"
14 #include "vmx.h"
16 /* Allocate memory regions for nested VMX tests.
18 * Input Args:
19 * vm - The VM to allocate guest-virtual addresses in.
21 * Output Args:
22 * p_vmx_gva - The guest virtual address for the struct vmx_pages.
24 * Return:
25 * Pointer to structure with the addresses of the VMX areas.
27 struct vmx_pages *
28 vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva)
30 vm_vaddr_t vmx_gva = vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
31 struct vmx_pages *vmx = addr_gva2hva(vm, vmx_gva);
33 /* Setup of a region of guest memory for the vmxon region. */
34 vmx->vmxon = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
35 vmx->vmxon_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmxon);
36 vmx->vmxon_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmxon);
38 /* Setup of a region of guest memory for a vmcs. */
39 vmx->vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
40 vmx->vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmcs);
41 vmx->vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmcs);
43 /* Setup of a region of guest memory for the MSR bitmap. */
44 vmx->msr = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
45 vmx->msr_hva = addr_gva2hva(vm, (uintptr_t)vmx->msr);
46 vmx->msr_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->msr);
47 memset(vmx->msr_hva, 0, getpagesize());
49 /* Setup of a region of guest memory for the shadow VMCS. */
50 vmx->shadow_vmcs = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
51 vmx->shadow_vmcs_hva = addr_gva2hva(vm, (uintptr_t)vmx->shadow_vmcs);
52 vmx->shadow_vmcs_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->shadow_vmcs);
54 /* Setup of a region of guest memory for the VMREAD and VMWRITE bitmaps. */
55 vmx->vmread = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
56 vmx->vmread_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmread);
57 vmx->vmread_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmread);
58 memset(vmx->vmread_hva, 0, getpagesize());
60 vmx->vmwrite = (void *)vm_vaddr_alloc(vm, getpagesize(), 0x10000, 0, 0);
61 vmx->vmwrite_hva = addr_gva2hva(vm, (uintptr_t)vmx->vmwrite);
62 vmx->vmwrite_gpa = addr_gva2gpa(vm, (uintptr_t)vmx->vmwrite);
63 memset(vmx->vmwrite_hva, 0, getpagesize());
65 *p_vmx_gva = vmx_gva;
66 return vmx;
69 bool prepare_for_vmx_operation(struct vmx_pages *vmx)
71 uint64_t feature_control;
72 uint64_t required;
73 unsigned long cr0;
74 unsigned long cr4;
77 * Ensure bits in CR0 and CR4 are valid in VMX operation:
78 * - Bit X is 1 in _FIXED0: bit X is fixed to 1 in CRx.
79 * - Bit X is 0 in _FIXED1: bit X is fixed to 0 in CRx.
81 __asm__ __volatile__("mov %%cr0, %0" : "=r"(cr0) : : "memory");
82 cr0 &= rdmsr(MSR_IA32_VMX_CR0_FIXED1);
83 cr0 |= rdmsr(MSR_IA32_VMX_CR0_FIXED0);
84 __asm__ __volatile__("mov %0, %%cr0" : : "r"(cr0) : "memory");
86 __asm__ __volatile__("mov %%cr4, %0" : "=r"(cr4) : : "memory");
87 cr4 &= rdmsr(MSR_IA32_VMX_CR4_FIXED1);
88 cr4 |= rdmsr(MSR_IA32_VMX_CR4_FIXED0);
89 /* Enable VMX operation */
90 cr4 |= X86_CR4_VMXE;
91 __asm__ __volatile__("mov %0, %%cr4" : : "r"(cr4) : "memory");
94 * Configure IA32_FEATURE_CONTROL MSR to allow VMXON:
95 * Bit 0: Lock bit. If clear, VMXON causes a #GP.
96 * Bit 2: Enables VMXON outside of SMX operation. If clear, VMXON
97 * outside of SMX causes a #GP.
99 required = FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX;
100 required |= FEATURE_CONTROL_LOCKED;
101 feature_control = rdmsr(MSR_IA32_FEATURE_CONTROL);
102 if ((feature_control & required) != required)
103 wrmsr(MSR_IA32_FEATURE_CONTROL, feature_control | required);
105 /* Enter VMX root operation. */
106 *(uint32_t *)(vmx->vmxon) = vmcs_revision();
107 if (vmxon(vmx->vmxon_gpa))
108 return false;
110 /* Load a VMCS. */
111 *(uint32_t *)(vmx->vmcs) = vmcs_revision();
112 if (vmclear(vmx->vmcs_gpa))
113 return false;
115 if (vmptrld(vmx->vmcs_gpa))
116 return false;
118 /* Setup shadow VMCS, do not load it yet. */
119 *(uint32_t *)(vmx->shadow_vmcs) = vmcs_revision() | 0x80000000ul;
120 if (vmclear(vmx->shadow_vmcs_gpa))
121 return false;
123 return true;
127 * Initialize the control fields to the most basic settings possible.
129 static inline void init_vmcs_control_fields(struct vmx_pages *vmx)
131 vmwrite(VIRTUAL_PROCESSOR_ID, 0);
132 vmwrite(POSTED_INTR_NV, 0);
134 vmwrite(PIN_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PINBASED_CTLS));
135 if (!vmwrite(SECONDARY_VM_EXEC_CONTROL, 0))
136 vmwrite(CPU_BASED_VM_EXEC_CONTROL,
137 rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS) | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS);
138 else
139 vmwrite(CPU_BASED_VM_EXEC_CONTROL, rdmsr(MSR_IA32_VMX_TRUE_PROCBASED_CTLS));
140 vmwrite(EXCEPTION_BITMAP, 0);
141 vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
142 vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, -1); /* Never match */
143 vmwrite(CR3_TARGET_COUNT, 0);
144 vmwrite(VM_EXIT_CONTROLS, rdmsr(MSR_IA32_VMX_EXIT_CTLS) |
145 VM_EXIT_HOST_ADDR_SPACE_SIZE); /* 64-bit host */
146 vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
147 vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
148 vmwrite(VM_ENTRY_CONTROLS, rdmsr(MSR_IA32_VMX_ENTRY_CTLS) |
149 VM_ENTRY_IA32E_MODE); /* 64-bit guest */
150 vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
151 vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
152 vmwrite(TPR_THRESHOLD, 0);
154 vmwrite(CR0_GUEST_HOST_MASK, 0);
155 vmwrite(CR4_GUEST_HOST_MASK, 0);
156 vmwrite(CR0_READ_SHADOW, get_cr0());
157 vmwrite(CR4_READ_SHADOW, get_cr4());
159 vmwrite(MSR_BITMAP, vmx->msr_gpa);
160 vmwrite(VMREAD_BITMAP, vmx->vmread_gpa);
161 vmwrite(VMWRITE_BITMAP, vmx->vmwrite_gpa);
165 * Initialize the host state fields based on the current host state, with
166 * the exception of HOST_RSP and HOST_RIP, which should be set by vmlaunch
167 * or vmresume.
169 static inline void init_vmcs_host_state(void)
171 uint32_t exit_controls = vmreadz(VM_EXIT_CONTROLS);
173 vmwrite(HOST_ES_SELECTOR, get_es());
174 vmwrite(HOST_CS_SELECTOR, get_cs());
175 vmwrite(HOST_SS_SELECTOR, get_ss());
176 vmwrite(HOST_DS_SELECTOR, get_ds());
177 vmwrite(HOST_FS_SELECTOR, get_fs());
178 vmwrite(HOST_GS_SELECTOR, get_gs());
179 vmwrite(HOST_TR_SELECTOR, get_tr());
181 if (exit_controls & VM_EXIT_LOAD_IA32_PAT)
182 vmwrite(HOST_IA32_PAT, rdmsr(MSR_IA32_CR_PAT));
183 if (exit_controls & VM_EXIT_LOAD_IA32_EFER)
184 vmwrite(HOST_IA32_EFER, rdmsr(MSR_EFER));
185 if (exit_controls & VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL)
186 vmwrite(HOST_IA32_PERF_GLOBAL_CTRL,
187 rdmsr(MSR_CORE_PERF_GLOBAL_CTRL));
189 vmwrite(HOST_IA32_SYSENTER_CS, rdmsr(MSR_IA32_SYSENTER_CS));
191 vmwrite(HOST_CR0, get_cr0());
192 vmwrite(HOST_CR3, get_cr3());
193 vmwrite(HOST_CR4, get_cr4());
194 vmwrite(HOST_FS_BASE, rdmsr(MSR_FS_BASE));
195 vmwrite(HOST_GS_BASE, rdmsr(MSR_GS_BASE));
196 vmwrite(HOST_TR_BASE,
197 get_desc64_base((struct desc64 *)(get_gdt_base() + get_tr())));
198 vmwrite(HOST_GDTR_BASE, get_gdt_base());
199 vmwrite(HOST_IDTR_BASE, get_idt_base());
200 vmwrite(HOST_IA32_SYSENTER_ESP, rdmsr(MSR_IA32_SYSENTER_ESP));
201 vmwrite(HOST_IA32_SYSENTER_EIP, rdmsr(MSR_IA32_SYSENTER_EIP));
205 * Initialize the guest state fields essentially as a clone of
206 * the host state fields. Some host state fields have fixed
207 * values, and we set the corresponding guest state fields accordingly.
209 static inline void init_vmcs_guest_state(void *rip, void *rsp)
211 vmwrite(GUEST_ES_SELECTOR, vmreadz(HOST_ES_SELECTOR));
212 vmwrite(GUEST_CS_SELECTOR, vmreadz(HOST_CS_SELECTOR));
213 vmwrite(GUEST_SS_SELECTOR, vmreadz(HOST_SS_SELECTOR));
214 vmwrite(GUEST_DS_SELECTOR, vmreadz(HOST_DS_SELECTOR));
215 vmwrite(GUEST_FS_SELECTOR, vmreadz(HOST_FS_SELECTOR));
216 vmwrite(GUEST_GS_SELECTOR, vmreadz(HOST_GS_SELECTOR));
217 vmwrite(GUEST_LDTR_SELECTOR, 0);
218 vmwrite(GUEST_TR_SELECTOR, vmreadz(HOST_TR_SELECTOR));
219 vmwrite(GUEST_INTR_STATUS, 0);
220 vmwrite(GUEST_PML_INDEX, 0);
222 vmwrite(VMCS_LINK_POINTER, -1ll);
223 vmwrite(GUEST_IA32_DEBUGCTL, 0);
224 vmwrite(GUEST_IA32_PAT, vmreadz(HOST_IA32_PAT));
225 vmwrite(GUEST_IA32_EFER, vmreadz(HOST_IA32_EFER));
226 vmwrite(GUEST_IA32_PERF_GLOBAL_CTRL,
227 vmreadz(HOST_IA32_PERF_GLOBAL_CTRL));
229 vmwrite(GUEST_ES_LIMIT, -1);
230 vmwrite(GUEST_CS_LIMIT, -1);
231 vmwrite(GUEST_SS_LIMIT, -1);
232 vmwrite(GUEST_DS_LIMIT, -1);
233 vmwrite(GUEST_FS_LIMIT, -1);
234 vmwrite(GUEST_GS_LIMIT, -1);
235 vmwrite(GUEST_LDTR_LIMIT, -1);
236 vmwrite(GUEST_TR_LIMIT, 0x67);
237 vmwrite(GUEST_GDTR_LIMIT, 0xffff);
238 vmwrite(GUEST_IDTR_LIMIT, 0xffff);
239 vmwrite(GUEST_ES_AR_BYTES,
240 vmreadz(GUEST_ES_SELECTOR) == 0 ? 0x10000 : 0xc093);
241 vmwrite(GUEST_CS_AR_BYTES, 0xa09b);
242 vmwrite(GUEST_SS_AR_BYTES, 0xc093);
243 vmwrite(GUEST_DS_AR_BYTES,
244 vmreadz(GUEST_DS_SELECTOR) == 0 ? 0x10000 : 0xc093);
245 vmwrite(GUEST_FS_AR_BYTES,
246 vmreadz(GUEST_FS_SELECTOR) == 0 ? 0x10000 : 0xc093);
247 vmwrite(GUEST_GS_AR_BYTES,
248 vmreadz(GUEST_GS_SELECTOR) == 0 ? 0x10000 : 0xc093);
249 vmwrite(GUEST_LDTR_AR_BYTES, 0x10000);
250 vmwrite(GUEST_TR_AR_BYTES, 0x8b);
251 vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
252 vmwrite(GUEST_ACTIVITY_STATE, 0);
253 vmwrite(GUEST_SYSENTER_CS, vmreadz(HOST_IA32_SYSENTER_CS));
254 vmwrite(VMX_PREEMPTION_TIMER_VALUE, 0);
256 vmwrite(GUEST_CR0, vmreadz(HOST_CR0));
257 vmwrite(GUEST_CR3, vmreadz(HOST_CR3));
258 vmwrite(GUEST_CR4, vmreadz(HOST_CR4));
259 vmwrite(GUEST_ES_BASE, 0);
260 vmwrite(GUEST_CS_BASE, 0);
261 vmwrite(GUEST_SS_BASE, 0);
262 vmwrite(GUEST_DS_BASE, 0);
263 vmwrite(GUEST_FS_BASE, vmreadz(HOST_FS_BASE));
264 vmwrite(GUEST_GS_BASE, vmreadz(HOST_GS_BASE));
265 vmwrite(GUEST_LDTR_BASE, 0);
266 vmwrite(GUEST_TR_BASE, vmreadz(HOST_TR_BASE));
267 vmwrite(GUEST_GDTR_BASE, vmreadz(HOST_GDTR_BASE));
268 vmwrite(GUEST_IDTR_BASE, vmreadz(HOST_IDTR_BASE));
269 vmwrite(GUEST_DR7, 0x400);
270 vmwrite(GUEST_RSP, (uint64_t)rsp);
271 vmwrite(GUEST_RIP, (uint64_t)rip);
272 vmwrite(GUEST_RFLAGS, 2);
273 vmwrite(GUEST_PENDING_DBG_EXCEPTIONS, 0);
274 vmwrite(GUEST_SYSENTER_ESP, vmreadz(HOST_IA32_SYSENTER_ESP));
275 vmwrite(GUEST_SYSENTER_EIP, vmreadz(HOST_IA32_SYSENTER_EIP));
278 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp)
280 init_vmcs_control_fields(vmx);
281 init_vmcs_host_state();
282 init_vmcs_guest_state(guest_rip, guest_rsp);