2 * Copyright (C) 2016 Veertu Inc,
3 * Copyright (C) 2017 Google Inc,
4 * Based on Veertu vddh/vmm/vmx.h
6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU Lesser General Public
10 * License as published by the Free Software Foundation; either
11 * version 2.1 of the License, or (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * Lesser General Public License for more details.
18 * You should have received a copy of the GNU Lesser General Public
19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
28 #include <Hypervisor/hv.h>
29 #include <Hypervisor/hv_vmx.h>
33 #include "sysemu/hvf.h"
34 #include "sysemu/hvf_int.h"
36 #include "exec/address-spaces.h"
38 static inline uint64_t rreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
)
42 if (hv_vcpu_read_register(vcpu
, reg
, &v
)) {
50 static inline void wreg(hv_vcpuid_t vcpu
, hv_x86_reg_t reg
, uint64_t v
)
52 if (hv_vcpu_write_register(vcpu
, reg
, v
)) {
58 static inline uint64_t rvmcs(hv_vcpuid_t vcpu
, uint32_t field
)
62 hv_vmx_vcpu_read_vmcs(vcpu
, field
, &v
);
67 /* write VMCS field */
68 static inline void wvmcs(hv_vcpuid_t vcpu
, uint32_t field
, uint64_t v
)
70 hv_vmx_vcpu_write_vmcs(vcpu
, field
, v
);
73 /* desired control word constrained by hardware/hypervisor capabilities */
74 static inline uint64_t cap2ctrl(uint64_t cap
, uint64_t ctrl
)
76 return (ctrl
| (cap
& 0xffffffff)) & (cap
>> 32);
79 #define VM_ENTRY_GUEST_LMA (1LL << 9)
81 #define AR_TYPE_ACCESSES_MASK 1
82 #define AR_TYPE_READABLE_MASK (1 << 1)
83 #define AR_TYPE_WRITABLE_MASK (1 << 2)
84 #define AR_TYPE_CODE_MASK (1 << 3)
85 #define AR_TYPE_MASK 0x0f
86 #define AR_TYPE_BUSY_64_TSS 11
87 #define AR_TYPE_BUSY_32_TSS 11
88 #define AR_TYPE_BUSY_16_TSS 3
91 static void enter_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
96 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
97 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
98 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
| VM_ENTRY_GUEST_LMA
);
100 uint64_t guest_tr_ar
= rvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
);
101 if ((efer
& MSR_EFER_LME
) &&
102 (guest_tr_ar
& AR_TYPE_MASK
) != AR_TYPE_BUSY_64_TSS
) {
103 wvmcs(vcpu
, VMCS_GUEST_TR_ACCESS_RIGHTS
,
104 (guest_tr_ar
& ~AR_TYPE_MASK
) | AR_TYPE_BUSY_64_TSS
);
108 static void exit_long_mode(hv_vcpuid_t vcpu
, uint64_t cr0
, uint64_t efer
)
112 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
113 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
& ~VM_ENTRY_GUEST_LMA
);
115 efer
&= ~MSR_EFER_LMA
;
116 wvmcs(vcpu
, VMCS_GUEST_IA32_EFER
, efer
);
119 static inline void macvm_set_cr0(hv_vcpuid_t vcpu
, uint64_t cr0
)
122 uint64_t pdpte
[4] = {0, 0, 0, 0};
123 uint64_t efer
= rvmcs(vcpu
, VMCS_GUEST_IA32_EFER
);
124 uint64_t old_cr0
= rvmcs(vcpu
, VMCS_GUEST_CR0
);
125 uint64_t changed_cr0
= old_cr0
^ cr0
;
126 uint64_t mask
= CR0_PG_MASK
| CR0_CD_MASK
| CR0_NW_MASK
|
127 CR0_NE_MASK
| CR0_ET_MASK
;
130 if ((cr0
& CR0_PG_MASK
) && (rvmcs(vcpu
, VMCS_GUEST_CR4
) & CR4_PAE_MASK
) &&
131 !(efer
& MSR_EFER_LME
)) {
132 address_space_read(&address_space_memory
,
133 rvmcs(vcpu
, VMCS_GUEST_CR3
) & ~0x1f,
134 MEMTXATTRS_UNSPECIFIED
, pdpte
, 32);
135 /* Only set PDPTE when appropriate. */
136 for (i
= 0; i
< 4; i
++) {
137 wvmcs(vcpu
, VMCS_GUEST_PDPTE0
+ i
* 2, pdpte
[i
]);
141 wvmcs(vcpu
, VMCS_CR0_MASK
, mask
);
142 wvmcs(vcpu
, VMCS_CR0_SHADOW
, cr0
);
144 if (efer
& MSR_EFER_LME
) {
145 if (changed_cr0
& CR0_PG_MASK
) {
146 if (cr0
& CR0_PG_MASK
) {
147 enter_long_mode(vcpu
, cr0
, efer
);
149 exit_long_mode(vcpu
, cr0
, efer
);
153 entry_ctls
= rvmcs(vcpu
, VMCS_ENTRY_CTLS
);
154 wvmcs(vcpu
, VMCS_ENTRY_CTLS
, entry_ctls
& ~VM_ENTRY_GUEST_LMA
);
157 /* Filter new CR0 after we are finished examining it above. */
158 cr0
= (cr0
& ~(mask
& ~CR0_PG_MASK
));
159 wvmcs(vcpu
, VMCS_GUEST_CR0
, cr0
| CR0_NE_MASK
| CR0_ET_MASK
);
161 hv_vcpu_invalidate_tlb(vcpu
);
164 static inline void macvm_set_cr4(hv_vcpuid_t vcpu
, uint64_t cr4
)
166 uint64_t guest_cr4
= cr4
| CR4_VMXE_MASK
;
168 wvmcs(vcpu
, VMCS_GUEST_CR4
, guest_cr4
);
169 wvmcs(vcpu
, VMCS_CR4_SHADOW
, cr4
);
170 wvmcs(vcpu
, VMCS_CR4_MASK
, CR4_VMXE_MASK
);
172 hv_vcpu_invalidate_tlb(vcpu
);
175 static inline void macvm_set_rip(CPUState
*cpu
, uint64_t rip
)
177 X86CPU
*x86_cpu
= X86_CPU(cpu
);
178 CPUX86State
*env
= &x86_cpu
->env
;
181 /* BUG, should take considering overlap.. */
182 wreg(cpu
->accel
->fd
, HV_X86_RIP
, rip
);
185 /* after moving forward in rip, we need to clean INTERRUPTABILITY */
186 val
= rvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
);
187 if (val
& (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
188 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
189 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
190 wvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
,
191 val
& ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
192 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
));
196 static inline void vmx_clear_nmi_blocking(CPUState
*cpu
)
198 X86CPU
*x86_cpu
= X86_CPU(cpu
);
199 CPUX86State
*env
= &x86_cpu
->env
;
201 env
->hflags2
&= ~HF2_NMI_MASK
;
202 uint32_t gi
= (uint32_t) rvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
);
203 gi
&= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
204 wvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
207 static inline void vmx_set_nmi_blocking(CPUState
*cpu
)
209 X86CPU
*x86_cpu
= X86_CPU(cpu
);
210 CPUX86State
*env
= &x86_cpu
->env
;
212 env
->hflags2
|= HF2_NMI_MASK
;
213 uint32_t gi
= (uint32_t)rvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
);
214 gi
|= VMCS_INTERRUPTIBILITY_NMI_BLOCKING
;
215 wvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
, gi
);
218 static inline void vmx_set_nmi_window_exiting(CPUState
*cpu
)
221 val
= rvmcs(cpu
->accel
->fd
, VMCS_PRI_PROC_BASED_CTLS
);
222 wvmcs(cpu
->accel
->fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
223 VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);
227 static inline void vmx_clear_nmi_window_exiting(CPUState
*cpu
)
231 val
= rvmcs(cpu
->accel
->fd
, VMCS_PRI_PROC_BASED_CTLS
);
232 wvmcs(cpu
->accel
->fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
233 ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING
);