2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 * Based on Veertu vddh/vmm/vmx.h
6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
28 #include <Hypervisor/hv.h>
29 #include <Hypervisor/hv_vmx.h>
33 #include "sysemu/hvf.h"
34 #include "sysemu/hvf_int.h"
36 #include "exec/address-spaces.h"
38 static inline uint64_t rreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
)
42 if (hv_vcpu_read_register(vcpu
, reg
, &v
)) {
50 static inline void wreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
, uint64_t v
)
52 if (hv_vcpu_write_register(vcpu
, reg
, v
)) {
58 static inline uint64_t rvmcs(hv_vcpuid_t vcpu
, uint32_t field
)
62 hv_vmx_vcpu_read_vmcs(vcpu
, field
, &v
);
67 /* write VMCS field */
68 static inline void wvmcs(hv_vcpuid_t vcpu
, uint32_t field
, uint64_t v
)
70 hv_vmx_vcpu_write_vmcs(vcpu
, field
, v
);
73 /* desired control word constrained by hardware/hypervisor capabilities */
74 static inline uint64_t cap2ctrl(uint64_t cap
, uint64_t ctrl
)
76 return (ctrl
| (cap
& 0xffffffff)) & (cap
>> 32);
79 #define VM_ENTRY_GUEST_LMA (1LL << 9)
81 #define AR_TYPE_ACCESSES_MASK 1
82 #define AR_TYPE_READABLE_MASK (1 << 1)
83 #define AR_TYPE_WRITABLE_MASK (1 << 2)
84 #define AR_TYPE_CODE_MASK (1 << 3)
85 #define AR_TYPE_MASK 0x0f
86 #define AR_TYPE_BUSY_64_TSS 11
87 #define AR_TYPE_BUSY_32_TSS 11
88 #define AR_TYPE_BUSY_16_TSS 3
91 static void enter_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
96 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
97 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
98 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, rvmcs(vcpu
, VMCS_ENTRY_CTLS
) |
101 uint64_t guest_tr_ar
= rvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
);
102 if ((efer
& MSR_EFER_LME
) &&
103 (guest_tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_64_TSS
) {
104 wvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
,
105 (guest_tr_ar
& ~AR_TYPE_MASK
) | AR_TYPE_BUSY_64_TSS
);
109 static void exit_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
113 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
114 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
& ~VM_ENTRY_GUEST_LMA
);
116 efer
&= ~MSR_EFER_LMA
;
117 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
120 static inline void macvm_set_cr0(hv_vcpuid_t vcpu
, uint64_t cr0
)
123 uint64_t pdpte
[4] = {0, 0, 0, 0};
124 uint64_t efer
= rvmcs(vcpu
, VMCS_GUEST_IA32_EFER
);
125 uint64_t old_cr0
= rvmcs(vcpu
, VMCS_GUEST_CR0
);
126 uint64_t changed_cr0
= old_cr0
^ cr0
;
127 uint64_t mask
= CR0_PG_MASK
| CR0_CD_MASK
| CR0_NW_MASK
|
128 CR0_NE_MASK
| CR0_ET_MASK
;
131 if ((cr0
& CR0_PG_MASK
) && (rvmcs(vcpu
, VMCS_GUEST_CR4
) & CR4_PAE_MASK
) &&
132 !(efer
& MSR_EFER_LME
)) {
133 address_space_read(&address_space_memory
,
134 rvmcs(vcpu
, VMCS_GUEST_CR3
) & ~0x1f,
135 MEMTXATTRS_UNSPECIFIED
, pdpte
, 32);
136 /* Only set PDPTE when appropriate. */
137 for (i
= 0; i
< 4; i
++) {
138 wvmcs(vcpu
, VMCS_GUEST_PDPTE0
+ i
* 2, pdpte
[i
]);
142 wvmcs(vcpu
, VMCS_CR0_MASK
, mask
);
143 wvmcs(vcpu
, VMCS_CR0_SHADOW
, cr0
);
145 if (efer
& MSR_EFER_LME
) {
146 if (changed_cr0
& CR0_PG_MASK
) {
147 if (cr0
& CR0_PG_MASK
) {
148 enter_long_mode(vcpu
, cr0
, efer
);
150 exit_long_mode(vcpu
, cr0
, efer
);
154 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
155 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
& ~VM_ENTRY_GUEST_LMA
);
158 /* Filter new CR0 after we are finished examining it above. */
159 cr0
= (cr0
& ~(mask
& ~CR0_PG_MASK
));
160 wvmcs(vcpu
, VMCS_GUEST_CR0
, cr0
| CR0_NE_MASK
| CR0_ET_MASK
);
162 hv_vcpu_invalidate_tlb(vcpu
);
165 static inline void macvm_set_cr4(hv_vcpuid_t vcpu
, uint64_t cr4
)
167 uint64_t guest_cr4
= cr4
| CR4_VMXE_MASK
;
169 wvmcs(vcpu
, VMCS_GUEST_CR4
, guest_cr4
);
170 wvmcs(vcpu
, VMCS_CR4_SHADOW
, cr4
);
171 wvmcs(vcpu
, VMCS_CR4_MASK
, CR4_VMXE_MASK
);
173 hv_vcpu_invalidate_tlb(vcpu
);
176 static inline void macvm_set_rip(CPUState
*cpu
, uint64_t rip
)
178 X86CPU
*x86_cpu
= X86_CPU(cpu
);
179 CPUX86State
*env
= &x86_cpu
->env
;
182 /* BUG, should take considering overlap.. */
183 wreg(cpu
->hvf
->fd
, HV_X86_RIP
, rip
);
186 /* after moving forward in rip, we need to clean INTERRUPTABILITY */
187 val
= rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
);
188 if (val
& (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
189 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
190 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
191 wvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
,
192 val
& ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
193 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
));
197 static inline void vmx_clear_nmi_blocking(CPUState
*cpu
)
199 X86CPU
*x86_cpu
= X86_CPU(cpu
);
200 CPUX86State
*env
= &x86_cpu
->env
;
202 env
->hflags2
&= ~HF2_NMI_MASK
;
203 uint32_t gi
= (uint32_t) rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
);
204 gi
&= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
205 wvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
208 static inline void vmx_set_nmi_blocking(CPUState
*cpu
)
210 X86CPU
*x86_cpu
= X86_CPU(cpu
);
211 CPUX86State
*env
= &x86_cpu
->env
;
213 env
->hflags2
|= HF2_NMI_MASK
;
214 uint32_t gi
= (uint32_t)rvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
);
215 gi
|= VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
216 wvmcs(cpu
->hvf
->fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
219 static inline void vmx_set_nmi_window_exiting(CPUState
*cpu
)
222 val
= rvmcs(cpu
->hvf
->fd
, VMCS_PRI_PROC_BASED_CTLS
);
223 wvmcs(cpu
->hvf
->fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
224 VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);
228 static inline void vmx_clear_nmi_window_exiting(CPUState
*cpu
)
232 val
= rvmcs(cpu
->hvf
->fd
, VMCS_PRI_PROC_BASED_CTLS
);
233 wvmcs(cpu
->hvf
->fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
234 ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);