2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 * Based on Veertu vddh/vmm/vmx.h
6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
28 #include <Hypervisor/hv.h>
29 #include <Hypervisor/hv_vmx.h>
34 #include "exec/address-spaces.h"
36 static inline uint64_t rreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
)
40 if (hv_vcpu_read_register(vcpu
, reg
, &v
)) {
48 static inline void wreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
, uint64_t v
)
50 if (hv_vcpu_write_register(vcpu
, reg
, v
)) {
56 static inline uint64_t rvmcs(hv_vcpuid_t vcpu
, uint32_t field
)
60 hv_vmx_vcpu_read_vmcs(vcpu
, field
, &v
);
65 /* write VMCS field */
66 static inline void wvmcs(hv_vcpuid_t vcpu
, uint32_t field
, uint64_t v
)
68 hv_vmx_vcpu_write_vmcs(vcpu
, field
, v
);
71 /* desired control word constrained by hardware/hypervisor capabilities */
72 static inline uint64_t cap2ctrl(uint64_t cap
, uint64_t ctrl
)
74 return (ctrl
| (cap
& 0xffffffff)) & (cap
>> 32);
77 #define VM_ENTRY_GUEST_LMA (1LL << 9)
79 #define AR_TYPE_ACCESSES_MASK 1
80 #define AR_TYPE_READABLE_MASK (1 << 1)
81 #define AR_TYPE_WRITEABLE_MASK (1 << 2)
82 #define AR_TYPE_CODE_MASK (1 << 3)
83 #define AR_TYPE_MASK 0x0f
84 #define AR_TYPE_BUSY_64_TSS 11
85 #define AR_TYPE_BUSY_32_TSS 11
86 #define AR_TYPE_BUSY_16_TSS 3
89 static void enter_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
94 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
95 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
96 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, rvmcs(vcpu
, VMCS_ENTRY_CTLS
) |
99 uint64_t guest_tr_ar
= rvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
);
100 if ((efer
& MSR_EFER_LME
) &&
101 (guest_tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_64_TSS
) {
102 wvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
,
103 (guest_tr_ar
& ~AR_TYPE_MASK
) | AR_TYPE_BUSY_64_TSS
);
107 static void exit_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
111 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
112 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
& ~VM_ENTRY_GUEST_LMA
);
114 efer
&= ~MSR_EFER_LMA
;
115 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
118 static inline void macvm_set_cr0(hv_vcpuid_t vcpu
, uint64_t cr0
)
121 uint64_t pdpte
[4] = {0, 0, 0, 0};
122 uint64_t efer
= rvmcs(vcpu
, VMCS_GUEST_IA32_EFER
);
123 uint64_t old_cr0
= rvmcs(vcpu
, VMCS_GUEST_CR0
);
125 if ((cr0
& CR0_PG
) && (rvmcs(vcpu
, VMCS_GUEST_CR4
) & CR4_PAE
) &&
126 !(efer
& MSR_EFER_LME
)) {
127 address_space_rw(&address_space_memory
,
128 rvmcs(vcpu
, VMCS_GUEST_CR3
) & ~0x1f,
129 MEMTXATTRS_UNSPECIFIED
,
130 (uint8_t *)pdpte
, 32, 0);
133 for (i
= 0; i
< 4; i
++) {
134 wvmcs(vcpu
, VMCS_GUEST_PDPTE0
+ i
* 2, pdpte
[i
]);
137 wvmcs(vcpu
, VMCS_CR0_MASK
, CR0_CD
| CR0_NE
| CR0_PG
);
138 wvmcs(vcpu
, VMCS_CR0_SHADOW
, cr0
);
141 wvmcs(vcpu
, VMCS_GUEST_CR0
, cr0
| CR0_NE
| CR0_ET
);
143 if (efer
& MSR_EFER_LME
) {
144 if (!(old_cr0
& CR0_PG
) && (cr0
& CR0_PG
)) {
145 enter_long_mode(vcpu
, cr0
, efer
);
147 if (/*(old_cr0 & CR0_PG) &&*/ !(cr0
& CR0_PG
)) {
148 exit_long_mode(vcpu
, cr0
, efer
);
152 hv_vcpu_invalidate_tlb(vcpu
);
156 static inline void macvm_set_cr4(hv_vcpuid_t vcpu
, uint64_t cr4
)
158 uint64_t guest_cr4
= cr4
| CR4_VMXE
;
160 wvmcs(vcpu
, VMCS_GUEST_CR4
, guest_cr4
);
161 wvmcs(vcpu
, VMCS_CR4_SHADOW
, cr4
);
163 hv_vcpu_invalidate_tlb(vcpu
);
167 static inline void macvm_set_rip(CPUState
*cpu
, uint64_t rip
)
171 /* BUG, should take considering overlap.. */
172 wreg(cpu
->hvf_fd
, HV_X86_RIP
, rip
);
174 /* after moving forward in rip, we need to clean INTERRUPTABILITY */
175 val
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
);
176 if (val
& (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
177 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
178 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
,
179 val
& ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
180 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
));
184 static inline void vmx_clear_nmi_blocking(CPUState
*cpu
)
186 X86CPU
*x86_cpu
= X86_CPU(cpu
);
187 CPUX86State
*env
= &x86_cpu
->env
;
189 env
->hflags2
&= ~HF2_NMI_MASK
;
190 uint32_t gi
= (uint32_t) rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
);
191 gi
&= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
192 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
195 static inline void vmx_set_nmi_blocking(CPUState
*cpu
)
197 X86CPU
*x86_cpu
= X86_CPU(cpu
);
198 CPUX86State
*env
= &x86_cpu
->env
;
200 env
->hflags2
|= HF2_NMI_MASK
;
201 uint32_t gi
= (uint32_t)rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
);
202 gi
|= VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
203 wvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
206 static inline void vmx_set_nmi_window_exiting(CPUState
*cpu
)
209 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
210 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
211 VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);
215 static inline void vmx_clear_nmi_window_exiting(CPUState
*cpu
)
219 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
220 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
221 ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);