1 /* Copyright 2008 IBM Corporation
3 * Copyright 2011 Intel Corporation
4 * Copyright 2016 Veertu, Inc.
5 * Copyright 2017 The Android Open Source Project
7 * QEMU Hypervisor.framework support
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
24 * Parts Copyright (c) 2011 NetApp, Inc.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
36 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 #include "qemu/osdep.h"
50 #include "qemu-common.h"
51 #include "qemu/error-report.h"
53 #include "sysemu/hvf.h"
54 #include "sysemu/runstate.h"
59 #include "x86_descr.h"
61 #include "x86_decode.h"
66 #include <Hypervisor/hv.h>
67 #include <Hypervisor/hv_vmx.h>
69 #include "exec/address-spaces.h"
70 #include "hw/i386/apic_internal.h"
71 #include "qemu/main-loop.h"
72 #include "sysemu/accel.h"
73 #include "target/i386/cpu.h"
77 static void assert_hvf_ok(hv_return_t ret
)
79 if (ret
== HV_SUCCESS
) {
85 error_report("Error: HV_ERROR");
88 error_report("Error: HV_BUSY");
91 error_report("Error: HV_BAD_ARGUMENT");
94 error_report("Error: HV_NO_RESOURCES");
97 error_report("Error: HV_NO_DEVICE");
100 error_report("Error: HV_UNSUPPORTED");
103 error_report("Unknown Error");
110 hvf_slot
*hvf_find_overlap_slot(uint64_t start
, uint64_t size
)
114 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
115 slot
= &hvf_state
->slots
[x
];
116 if (slot
->size
&& start
< (slot
->start
+ slot
->size
) &&
117 (start
+ size
) > slot
->start
) {
131 struct mac_slot mac_slots
[32];
133 static int do_hvf_set_memory(hvf_slot
*slot
, hv_memory_flags_t flags
)
135 struct mac_slot
*macslot
;
138 macslot
= &mac_slots
[slot
->slot_id
];
140 if (macslot
->present
) {
141 if (macslot
->size
!= slot
->size
) {
142 macslot
->present
= 0;
143 ret
= hv_vm_unmap(macslot
->gpa_start
, macslot
->size
);
152 macslot
->present
= 1;
153 macslot
->gpa_start
= slot
->start
;
154 macslot
->size
= slot
->size
;
155 ret
= hv_vm_map((hv_uvaddr_t
)slot
->mem
, slot
->start
, slot
->size
, flags
);
160 void hvf_set_phys_mem(MemoryRegionSection
*section
, bool add
)
163 MemoryRegion
*area
= section
->mr
;
164 bool writeable
= !area
->readonly
&& !area
->rom_device
;
165 hv_memory_flags_t flags
;
167 if (!memory_region_is_ram(area
)) {
170 } else if (!memory_region_is_romd(area
)) {
172 * If the memory device is not in romd_mode, then we actually want
173 * to remove the hvf memory slot so all accesses will trap.
179 mem
= hvf_find_overlap_slot(
180 section
->offset_within_address_space
,
181 int128_get64(section
->size
));
184 if (mem
->size
== int128_get64(section
->size
) &&
185 mem
->start
== section
->offset_within_address_space
&&
186 mem
->mem
== (memory_region_get_ram_ptr(area
) +
187 section
->offset_within_region
)) {
188 return; /* Same region was attempted to register, go away. */
192 /* Region needs to be reset. set the size to 0 and remap it. */
195 if (do_hvf_set_memory(mem
, 0)) {
196 error_report("Failed to reset overlapping slot");
205 if (area
->readonly
||
206 (!memory_region_is_ram(area
) && memory_region_is_romd(area
))) {
207 flags
= HV_MEMORY_READ
| HV_MEMORY_EXEC
;
209 flags
= HV_MEMORY_READ
| HV_MEMORY_WRITE
| HV_MEMORY_EXEC
;
212 /* Now make a new slot. */
215 for (x
= 0; x
< hvf_state
->num_slots
; ++x
) {
216 mem
= &hvf_state
->slots
[x
];
222 if (x
== hvf_state
->num_slots
) {
223 error_report("No free slots");
227 mem
->size
= int128_get64(section
->size
);
228 mem
->mem
= memory_region_get_ram_ptr(area
) + section
->offset_within_region
;
229 mem
->start
= section
->offset_within_address_space
;
232 if (do_hvf_set_memory(mem
, flags
)) {
233 error_report("Error registering new memory slot");
238 void vmx_update_tpr(CPUState
*cpu
)
240 /* TODO: need integrate APIC handling */
241 X86CPU
*x86_cpu
= X86_CPU(cpu
);
242 int tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
) << 4;
243 int irr
= apic_get_highest_priority_irr(x86_cpu
->apic_state
);
245 wreg(cpu
->hvf_fd
, HV_X86_TPR
, tpr
);
247 wvmcs(cpu
->hvf_fd
, VMCS_TPR_THRESHOLD
, 0);
249 wvmcs(cpu
->hvf_fd
, VMCS_TPR_THRESHOLD
, (irr
> tpr
) ? tpr
>> 4 :
254 static void update_apic_tpr(CPUState
*cpu
)
256 X86CPU
*x86_cpu
= X86_CPU(cpu
);
257 int tpr
= rreg(cpu
->hvf_fd
, HV_X86_TPR
) >> 4;
258 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
261 #define VECTORING_INFO_VECTOR_MASK 0xff
263 static void hvf_handle_interrupt(CPUState
* cpu
, int mask
)
265 cpu
->interrupt_request
|= mask
;
266 if (!qemu_cpu_is_self(cpu
)) {
271 void hvf_handle_io(CPUArchState
*env
, uint16_t port
, void *buffer
,
272 int direction
, int size
, int count
)
275 uint8_t *ptr
= buffer
;
277 for (i
= 0; i
< count
; i
++) {
278 address_space_rw(&address_space_io
, port
, MEMTXATTRS_UNSPECIFIED
,
285 static void do_hvf_cpu_synchronize_state(CPUState
*cpu
, run_on_cpu_data arg
)
287 if (!cpu
->vcpu_dirty
) {
288 hvf_get_registers(cpu
);
289 cpu
->vcpu_dirty
= true;
293 void hvf_cpu_synchronize_state(CPUState
*cpu
)
295 if (!cpu
->vcpu_dirty
) {
296 run_on_cpu(cpu
, do_hvf_cpu_synchronize_state
, RUN_ON_CPU_NULL
);
300 static void do_hvf_cpu_synchronize_post_reset(CPUState
*cpu
,
303 hvf_put_registers(cpu
);
304 cpu
->vcpu_dirty
= false;
307 void hvf_cpu_synchronize_post_reset(CPUState
*cpu
)
309 run_on_cpu(cpu
, do_hvf_cpu_synchronize_post_reset
, RUN_ON_CPU_NULL
);
312 static void do_hvf_cpu_synchronize_post_init(CPUState
*cpu
,
315 hvf_put_registers(cpu
);
316 cpu
->vcpu_dirty
= false;
319 void hvf_cpu_synchronize_post_init(CPUState
*cpu
)
321 run_on_cpu(cpu
, do_hvf_cpu_synchronize_post_init
, RUN_ON_CPU_NULL
);
324 static void do_hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
,
327 cpu
->vcpu_dirty
= true;
330 void hvf_cpu_synchronize_pre_loadvm(CPUState
*cpu
)
332 run_on_cpu(cpu
, do_hvf_cpu_synchronize_pre_loadvm
, RUN_ON_CPU_NULL
);
335 static bool ept_emulation_fault(hvf_slot
*slot
, uint64_t gpa
, uint64_t ept_qual
)
339 /* EPT fault on an instruction fetch doesn't make sense here */
340 if (ept_qual
& EPT_VIOLATION_INST_FETCH
) {
344 /* EPT fault must be a read fault or a write fault */
345 read
= ept_qual
& EPT_VIOLATION_DATA_READ
? 1 : 0;
346 write
= ept_qual
& EPT_VIOLATION_DATA_WRITE
? 1 : 0;
347 if ((read
| write
) == 0) {
352 if (slot
->flags
& HVF_SLOT_LOG
) {
353 memory_region_set_dirty(slot
->region
, gpa
- slot
->start
, 1);
354 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
355 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
360 * The EPT violation must have been caused by accessing a
361 * guest-physical address that is a translation of a guest-linear
364 if ((ept_qual
& EPT_VIOLATION_GLA_VALID
) == 0 ||
365 (ept_qual
& EPT_VIOLATION_XLAT_VALID
) == 0) {
372 if (!memory_region_is_ram(slot
->region
) &&
373 !(read
&& memory_region_is_romd(slot
->region
))) {
379 static void hvf_set_dirty_tracking(MemoryRegionSection
*section
, bool on
)
383 slot
= hvf_find_overlap_slot(
384 section
->offset_within_address_space
,
385 int128_get64(section
->size
));
387 /* protect region against writes; begin tracking it */
389 slot
->flags
|= HVF_SLOT_LOG
;
390 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
392 /* stop tracking region*/
394 slot
->flags
&= ~HVF_SLOT_LOG
;
395 hv_vm_protect((hv_gpaddr_t
)slot
->start
, (size_t)slot
->size
,
396 HV_MEMORY_READ
| HV_MEMORY_WRITE
);
400 static void hvf_log_start(MemoryListener
*listener
,
401 MemoryRegionSection
*section
, int old
, int new)
407 hvf_set_dirty_tracking(section
, 1);
410 static void hvf_log_stop(MemoryListener
*listener
,
411 MemoryRegionSection
*section
, int old
, int new)
417 hvf_set_dirty_tracking(section
, 0);
420 static void hvf_log_sync(MemoryListener
*listener
,
421 MemoryRegionSection
*section
)
424 * sync of dirty pages is handled elsewhere; just make sure we keep
425 * tracking the region.
427 hvf_set_dirty_tracking(section
, 1);
430 static void hvf_region_add(MemoryListener
*listener
,
431 MemoryRegionSection
*section
)
433 hvf_set_phys_mem(section
, true);
436 static void hvf_region_del(MemoryListener
*listener
,
437 MemoryRegionSection
*section
)
439 hvf_set_phys_mem(section
, false);
442 static MemoryListener hvf_memory_listener
= {
444 .region_add
= hvf_region_add
,
445 .region_del
= hvf_region_del
,
446 .log_start
= hvf_log_start
,
447 .log_stop
= hvf_log_stop
,
448 .log_sync
= hvf_log_sync
,
451 void hvf_vcpu_destroy(CPUState
*cpu
)
453 X86CPU
*x86_cpu
= X86_CPU(cpu
);
454 CPUX86State
*env
= &x86_cpu
->env
;
456 hv_return_t ret
= hv_vcpu_destroy((hv_vcpuid_t
)cpu
->hvf_fd
);
457 g_free(env
->hvf_mmio_buf
);
461 static void dummy_signal(int sig
)
465 int hvf_init_vcpu(CPUState
*cpu
)
468 X86CPU
*x86cpu
= X86_CPU(cpu
);
469 CPUX86State
*env
= &x86cpu
->env
;
472 /* init cpu signals */
474 struct sigaction sigact
;
476 memset(&sigact
, 0, sizeof(sigact
));
477 sigact
.sa_handler
= dummy_signal
;
478 sigaction(SIG_IPI
, &sigact
, NULL
);
480 pthread_sigmask(SIG_BLOCK
, NULL
, &set
);
481 sigdelset(&set
, SIG_IPI
);
486 hvf_state
->hvf_caps
= g_new0(struct hvf_vcpu_caps
, 1);
487 env
->hvf_mmio_buf
= g_new(char, 4096);
489 r
= hv_vcpu_create((hv_vcpuid_t
*)&cpu
->hvf_fd
, HV_VCPU_DEFAULT
);
493 if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED
,
494 &hvf_state
->hvf_caps
->vmx_cap_pinbased
)) {
497 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED
,
498 &hvf_state
->hvf_caps
->vmx_cap_procbased
)) {
501 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2
,
502 &hvf_state
->hvf_caps
->vmx_cap_procbased2
)) {
505 if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY
,
506 &hvf_state
->hvf_caps
->vmx_cap_entry
)) {
510 /* set VMCS control fields */
511 wvmcs(cpu
->hvf_fd
, VMCS_PIN_BASED_CTLS
,
512 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_pinbased
,
513 VMCS_PIN_BASED_CTLS_EXTINT
|
514 VMCS_PIN_BASED_CTLS_NMI
|
515 VMCS_PIN_BASED_CTLS_VNMI
));
516 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
,
517 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased
,
518 VMCS_PRI_PROC_BASED_CTLS_HLT
|
519 VMCS_PRI_PROC_BASED_CTLS_MWAIT
|
520 VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET
|
521 VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW
) |
522 VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL
);
523 wvmcs(cpu
->hvf_fd
, VMCS_SEC_PROC_BASED_CTLS
,
524 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased2
,
525 VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES
));
527 wvmcs(cpu
->hvf_fd
, VMCS_ENTRY_CTLS
, cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_entry
,
529 wvmcs(cpu
->hvf_fd
, VMCS_EXCEPTION_BITMAP
, 0); /* Double fault */
531 wvmcs(cpu
->hvf_fd
, VMCS_TPR_THRESHOLD
, 0);
533 x86cpu
= X86_CPU(cpu
);
534 x86cpu
->env
.xsave_buf
= qemu_memalign(4096, 4096);
536 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_STAR
, 1);
537 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_LSTAR
, 1);
538 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_CSTAR
, 1);
539 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_FMASK
, 1);
540 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_FSBASE
, 1);
541 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_GSBASE
, 1);
542 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_KERNELGSBASE
, 1);
543 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_TSC_AUX
, 1);
544 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_TSC
, 1);
545 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_SYSENTER_CS
, 1);
546 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_SYSENTER_EIP
, 1);
547 hv_vcpu_enable_native_msr(cpu
->hvf_fd
, MSR_IA32_SYSENTER_ESP
, 1);
552 static void hvf_store_events(CPUState
*cpu
, uint32_t ins_len
, uint64_t idtvec_info
)
554 X86CPU
*x86_cpu
= X86_CPU(cpu
);
555 CPUX86State
*env
= &x86_cpu
->env
;
557 env
->exception_nr
= -1;
558 env
->exception_pending
= 0;
559 env
->exception_injected
= 0;
560 env
->interrupt_injected
= -1;
561 env
->nmi_injected
= false;
563 env
->has_error_code
= false;
564 if (idtvec_info
& VMCS_IDT_VEC_VALID
) {
565 switch (idtvec_info
& VMCS_IDT_VEC_TYPE
) {
566 case VMCS_IDT_VEC_HWINTR
:
567 case VMCS_IDT_VEC_SWINTR
:
568 env
->interrupt_injected
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
570 case VMCS_IDT_VEC_NMI
:
571 env
->nmi_injected
= true;
573 case VMCS_IDT_VEC_HWEXCEPTION
:
574 case VMCS_IDT_VEC_SWEXCEPTION
:
575 env
->exception_nr
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
576 env
->exception_injected
= 1;
578 case VMCS_IDT_VEC_PRIV_SWEXCEPTION
:
582 if ((idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWEXCEPTION
||
583 (idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWINTR
) {
584 env
->ins_len
= ins_len
;
586 if (idtvec_info
& VMCS_IDT_VEC_ERRCODE_VALID
) {
587 env
->has_error_code
= true;
588 env
->error_code
= rvmcs(cpu
->hvf_fd
, VMCS_IDT_VECTORING_ERROR
);
591 if ((rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
592 VMCS_INTERRUPTIBILITY_NMI_BLOCKING
)) {
593 env
->hflags2
|= HF2_NMI_MASK
;
595 env
->hflags2
&= ~HF2_NMI_MASK
;
597 if (rvmcs(cpu
->hvf_fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
598 (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
599 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
600 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
602 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
606 int hvf_vcpu_exec(CPUState
*cpu
)
608 X86CPU
*x86_cpu
= X86_CPU(cpu
);
609 CPUX86State
*env
= &x86_cpu
->env
;
613 if (hvf_process_events(cpu
)) {
618 if (cpu
->vcpu_dirty
) {
619 hvf_put_registers(cpu
);
620 cpu
->vcpu_dirty
= false;
623 if (hvf_inject_interrupts(cpu
)) {
624 return EXCP_INTERRUPT
;
628 qemu_mutex_unlock_iothread();
629 if (!cpu_is_bsp(X86_CPU(cpu
)) && cpu
->halted
) {
630 qemu_mutex_lock_iothread();
634 hv_return_t r
= hv_vcpu_run(cpu
->hvf_fd
);
638 uint64_t exit_reason
= rvmcs(cpu
->hvf_fd
, VMCS_EXIT_REASON
);
639 uint64_t exit_qual
= rvmcs(cpu
->hvf_fd
, VMCS_EXIT_QUALIFICATION
);
640 uint32_t ins_len
= (uint32_t)rvmcs(cpu
->hvf_fd
,
641 VMCS_EXIT_INSTRUCTION_LENGTH
);
643 uint64_t idtvec_info
= rvmcs(cpu
->hvf_fd
, VMCS_IDT_VECTORING_INFO
);
645 hvf_store_events(cpu
, ins_len
, idtvec_info
);
646 rip
= rreg(cpu
->hvf_fd
, HV_X86_RIP
);
647 env
->eflags
= rreg(cpu
->hvf_fd
, HV_X86_RFLAGS
);
649 qemu_mutex_lock_iothread();
651 update_apic_tpr(cpu
);
655 switch (exit_reason
) {
656 case EXIT_REASON_HLT
: {
657 macvm_set_rip(cpu
, rip
+ ins_len
);
658 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
659 (env
->eflags
& IF_MASK
))
660 && !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) &&
661 !(idtvec_info
& VMCS_IDT_VEC_VALID
)) {
666 ret
= EXCP_INTERRUPT
;
669 case EXIT_REASON_MWAIT
: {
670 ret
= EXCP_INTERRUPT
;
673 /* Need to check if MMIO or unmapped fault */
674 case EXIT_REASON_EPT_FAULT
:
677 uint64_t gpa
= rvmcs(cpu
->hvf_fd
, VMCS_GUEST_PHYSICAL_ADDRESS
);
679 if (((idtvec_info
& VMCS_IDT_VEC_VALID
) == 0) &&
680 ((exit_qual
& EXIT_QUAL_NMIUDTI
) != 0)) {
681 vmx_set_nmi_blocking(cpu
);
684 slot
= hvf_find_overlap_slot(gpa
, 1);
686 if (ept_emulation_fault(slot
, gpa
, exit_qual
)) {
687 struct x86_decode decode
;
690 decode_instruction(env
, &decode
);
691 exec_instruction(env
, &decode
);
697 case EXIT_REASON_INOUT
:
699 uint32_t in
= (exit_qual
& 8) != 0;
700 uint32_t size
= (exit_qual
& 7) + 1;
701 uint32_t string
= (exit_qual
& 16) != 0;
702 uint32_t port
= exit_qual
>> 16;
703 /*uint32_t rep = (exit_qual & 0x20) != 0;*/
708 hvf_handle_io(env
, port
, &val
, 0, size
, 1);
711 } else if (size
== 2) {
713 } else if (size
== 4) {
714 RAX(env
) = (uint32_t)val
;
716 RAX(env
) = (uint64_t)val
;
721 } else if (!string
&& !in
) {
722 RAX(env
) = rreg(cpu
->hvf_fd
, HV_X86_RAX
);
723 hvf_handle_io(env
, port
, &RAX(env
), 1, size
, 1);
724 macvm_set_rip(cpu
, rip
+ ins_len
);
727 struct x86_decode decode
;
730 decode_instruction(env
, &decode
);
731 assert(ins_len
== decode
.len
);
732 exec_instruction(env
, &decode
);
737 case EXIT_REASON_CPUID
: {
738 uint32_t rax
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RAX
);
739 uint32_t rbx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RBX
);
740 uint32_t rcx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RCX
);
741 uint32_t rdx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RDX
);
743 cpu_x86_cpuid(env
, rax
, rcx
, &rax
, &rbx
, &rcx
, &rdx
);
745 wreg(cpu
->hvf_fd
, HV_X86_RAX
, rax
);
746 wreg(cpu
->hvf_fd
, HV_X86_RBX
, rbx
);
747 wreg(cpu
->hvf_fd
, HV_X86_RCX
, rcx
);
748 wreg(cpu
->hvf_fd
, HV_X86_RDX
, rdx
);
750 macvm_set_rip(cpu
, rip
+ ins_len
);
753 case EXIT_REASON_XSETBV
: {
754 X86CPU
*x86_cpu
= X86_CPU(cpu
);
755 CPUX86State
*env
= &x86_cpu
->env
;
756 uint32_t eax
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RAX
);
757 uint32_t ecx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RCX
);
758 uint32_t edx
= (uint32_t)rreg(cpu
->hvf_fd
, HV_X86_RDX
);
761 macvm_set_rip(cpu
, rip
+ ins_len
);
764 env
->xcr0
= ((uint64_t)edx
<< 32) | eax
;
765 wreg(cpu
->hvf_fd
, HV_X86_XCR0
, env
->xcr0
| 1);
766 macvm_set_rip(cpu
, rip
+ ins_len
);
769 case EXIT_REASON_INTR_WINDOW
:
770 vmx_clear_int_window_exiting(cpu
);
771 ret
= EXCP_INTERRUPT
;
773 case EXIT_REASON_NMI_WINDOW
:
774 vmx_clear_nmi_window_exiting(cpu
);
775 ret
= EXCP_INTERRUPT
;
777 case EXIT_REASON_EXT_INTR
:
778 /* force exit and allow io handling */
779 ret
= EXCP_INTERRUPT
;
781 case EXIT_REASON_RDMSR
:
782 case EXIT_REASON_WRMSR
:
785 if (exit_reason
== EXIT_REASON_RDMSR
) {
794 case EXIT_REASON_CR_ACCESS
: {
800 reg
= (exit_qual
>> 8) & 15;
804 macvm_set_cr0(cpu
->hvf_fd
, RRX(env
, reg
));
808 macvm_set_cr4(cpu
->hvf_fd
, RRX(env
, reg
));
812 X86CPU
*x86_cpu
= X86_CPU(cpu
);
813 if (exit_qual
& 0x10) {
814 RRX(env
, reg
) = cpu_get_apic_tpr(x86_cpu
->apic_state
);
816 int tpr
= RRX(env
, reg
);
817 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
818 ret
= EXCP_INTERRUPT
;
823 error_report("Unrecognized CR %d", cr
);
830 case EXIT_REASON_APIC_ACCESS
: { /* TODO */
831 struct x86_decode decode
;
834 decode_instruction(env
, &decode
);
835 exec_instruction(env
, &decode
);
839 case EXIT_REASON_TPR
: {
843 case EXIT_REASON_TASK_SWITCH
: {
844 uint64_t vinfo
= rvmcs(cpu
->hvf_fd
, VMCS_IDT_VECTORING_INFO
);
845 x68_segment_selector sel
= {.sel
= exit_qual
& 0xffff};
846 vmx_handle_task_switch(cpu
, sel
, (exit_qual
>> 30) & 0x3,
847 vinfo
& VMCS_INTR_VALID
, vinfo
& VECTORING_INFO_VECTOR_MASK
, vinfo
851 case EXIT_REASON_TRIPLE_FAULT
: {
852 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
853 ret
= EXCP_INTERRUPT
;
856 case EXIT_REASON_RDPMC
:
857 wreg(cpu
->hvf_fd
, HV_X86_RAX
, 0);
858 wreg(cpu
->hvf_fd
, HV_X86_RDX
, 0);
859 macvm_set_rip(cpu
, rip
+ ins_len
);
861 case VMX_REASON_VMCALL
:
862 env
->exception_nr
= EXCP0D_GPF
;
863 env
->exception_injected
= 1;
864 env
->has_error_code
= true;
868 error_report("%llx: unhandled exit %llx", rip
, exit_reason
);
877 static int hvf_accel_init(MachineState
*ms
)
883 ret
= hv_vm_create(HV_VM_DEFAULT
);
886 s
= g_new0(HVFState
, 1);
889 for (x
= 0; x
< s
->num_slots
; ++x
) {
890 s
->slots
[x
].size
= 0;
891 s
->slots
[x
].slot_id
= x
;
895 cpu_interrupt_handler
= hvf_handle_interrupt
;
896 memory_listener_register(&hvf_memory_listener
, &address_space_memory
);
900 static void hvf_accel_class_init(ObjectClass
*oc
, void *data
)
902 AccelClass
*ac
= ACCEL_CLASS(oc
);
904 ac
->init_machine
= hvf_accel_init
;
905 ac
->allowed
= &hvf_allowed
;
908 static const TypeInfo hvf_accel_type
= {
909 .name
= TYPE_HVF_ACCEL
,
910 .parent
= TYPE_ACCEL
,
911 .class_init
= hvf_accel_class_init
,
914 static void hvf_type_init(void)
916 type_register_static(&hvf_accel_type
);
919 type_init(hvf_type_init
);