1 /* Copyright 2008 IBM Corporation
3 * Copyright 2011 Intel Corporation
4 * Copyright 2016 Veertu, Inc.
5 * Copyright 2017 The Android Open Source Project
7 * QEMU Hypervisor.framework support
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of version 2 of the GNU General Public
11 * License as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, see <http://www.gnu.org/licenses/>.
21 * This file contain code under public domain from the hvdos project:
22 * https://github.com/mist64/hvdos
24 * Parts Copyright (c) 2011 NetApp, Inc.
25 * All rights reserved.
27 * Redistribution and use in source and binary forms, with or without
28 * modification, are permitted provided that the following conditions
30 * 1. Redistributions of source code must retain the above copyright
31 * notice, this list of conditions and the following disclaimer.
32 * 2. Redistributions in binary form must reproduce the above copyright
33 * notice, this list of conditions and the following disclaimer in the
34 * documentation and/or other materials provided with the distribution.
36 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND
37 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
38 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
39 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE
40 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
41 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
42 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
43 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
44 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
45 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
49 #include "qemu/osdep.h"
50 #include "qemu/error-report.h"
51 #include "qemu/memalign.h"
52 #include "qapi/error.h"
53 #include "migration/blocker.h"
55 #include "sysemu/hvf.h"
56 #include "sysemu/hvf_int.h"
57 #include "sysemu/runstate.h"
58 #include "sysemu/cpus.h"
63 #include "x86_descr.h"
65 #include "x86_decode.h"
70 #include <Hypervisor/hv.h>
71 #include <Hypervisor/hv_vmx.h>
72 #include <sys/sysctl.h>
74 #include "hw/i386/apic_internal.h"
75 #include "qemu/main-loop.h"
76 #include "qemu/accel.h"
77 #include "target/i386/cpu.h"
79 static Error
*invtsc_mig_blocker
;
81 void vmx_update_tpr(CPUState
*cpu
)
83 /* TODO: need integrate APIC handling */
84 X86CPU
*x86_cpu
= X86_CPU(cpu
);
85 int tpr
= cpu_get_apic_tpr(x86_cpu
->apic_state
) << 4;
86 int irr
= apic_get_highest_priority_irr(x86_cpu
->apic_state
);
88 wreg(cpu
->accel
->fd
, HV_X86_TPR
, tpr
);
90 wvmcs(cpu
->accel
->fd
, VMCS_TPR_THRESHOLD
, 0);
92 wvmcs(cpu
->accel
->fd
, VMCS_TPR_THRESHOLD
, (irr
> tpr
) ? tpr
>> 4 :
97 static void update_apic_tpr(CPUState
*cpu
)
99 X86CPU
*x86_cpu
= X86_CPU(cpu
);
100 int tpr
= rreg(cpu
->accel
->fd
, HV_X86_TPR
) >> 4;
101 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
104 #define VECTORING_INFO_VECTOR_MASK 0xff
106 void hvf_handle_io(CPUArchState
*env
, uint16_t port
, void *buffer
,
107 int direction
, int size
, int count
)
110 uint8_t *ptr
= buffer
;
112 for (i
= 0; i
< count
; i
++) {
113 address_space_rw(&address_space_io
, port
, MEMTXATTRS_UNSPECIFIED
,
120 static bool ept_emulation_fault(hvf_slot
*slot
, uint64_t gpa
, uint64_t ept_qual
)
124 /* EPT fault on an instruction fetch doesn't make sense here */
125 if (ept_qual
& EPT_VIOLATION_INST_FETCH
) {
129 /* EPT fault must be a read fault or a write fault */
130 read
= ept_qual
& EPT_VIOLATION_DATA_READ
? 1 : 0;
131 write
= ept_qual
& EPT_VIOLATION_DATA_WRITE
? 1 : 0;
132 if ((read
| write
) == 0) {
137 if (slot
->flags
& HVF_SLOT_LOG
) {
138 uint64_t dirty_page_start
= gpa
& ~(TARGET_PAGE_SIZE
- 1u);
139 memory_region_set_dirty(slot
->region
, gpa
- slot
->start
, 1);
140 hv_vm_protect(dirty_page_start
, TARGET_PAGE_SIZE
,
141 HV_MEMORY_READ
| HV_MEMORY_WRITE
| HV_MEMORY_EXEC
);
146 * The EPT violation must have been caused by accessing a
147 * guest-physical address that is a translation of a guest-linear
150 if ((ept_qual
& EPT_VIOLATION_GLA_VALID
) == 0 ||
151 (ept_qual
& EPT_VIOLATION_XLAT_VALID
) == 0) {
158 if (!memory_region_is_ram(slot
->region
) &&
159 !(read
&& memory_region_is_romd(slot
->region
))) {
165 void hvf_arch_vcpu_destroy(CPUState
*cpu
)
167 X86CPU
*x86_cpu
= X86_CPU(cpu
);
168 CPUX86State
*env
= &x86_cpu
->env
;
170 g_free(env
->hvf_mmio_buf
);
173 static void init_tsc_freq(CPUX86State
*env
)
178 if (env
->tsc_khz
!= 0) {
182 length
= sizeof(uint64_t);
183 if (sysctlbyname("machdep.tsc.frequency", &tsc_freq
, &length
, NULL
, 0)) {
186 env
->tsc_khz
= tsc_freq
/ 1000; /* Hz to KHz */
189 static void init_apic_bus_freq(CPUX86State
*env
)
194 if (env
->apic_bus_freq
!= 0) {
198 length
= sizeof(uint64_t);
199 if (sysctlbyname("hw.busfrequency", &bus_freq
, &length
, NULL
, 0)) {
202 env
->apic_bus_freq
= bus_freq
;
205 static inline bool tsc_is_known(CPUX86State
*env
)
207 return env
->tsc_khz
!= 0;
210 static inline bool apic_bus_freq_is_known(CPUX86State
*env
)
212 return env
->apic_bus_freq
!= 0;
215 void hvf_kick_vcpu_thread(CPUState
*cpu
)
217 cpus_kick_thread(cpu
);
218 hv_vcpu_interrupt(&cpu
->accel
->fd
, 1);
221 int hvf_arch_init(void)
226 hv_return_t
hvf_arch_vm_create(MachineState
*ms
, uint32_t pa_range
)
228 return hv_vm_create(HV_VM_DEFAULT
);
231 int hvf_arch_init_vcpu(CPUState
*cpu
)
233 X86CPU
*x86cpu
= X86_CPU(cpu
);
234 CPUX86State
*env
= &x86cpu
->env
;
235 Error
*local_err
= NULL
;
242 hvf_state
->hvf_caps
= g_new0(struct hvf_vcpu_caps
, 1);
243 env
->hvf_mmio_buf
= g_new(char, 4096);
245 if (x86cpu
->vmware_cpuid_freq
) {
247 init_apic_bus_freq(env
);
249 if (!tsc_is_known(env
) || !apic_bus_freq_is_known(env
)) {
250 error_report("vmware-cpuid-freq: feature couldn't be enabled");
254 if ((env
->features
[FEAT_8000_0007_EDX
] & CPUID_APM_INVTSC
) &&
255 invtsc_mig_blocker
== NULL
) {
256 error_setg(&invtsc_mig_blocker
,
257 "State blocked by non-migratable CPU device (invtsc flag)");
258 r
= migrate_add_blocker(&invtsc_mig_blocker
, &local_err
);
260 error_report_err(local_err
);
266 if (hv_vmx_read_capability(HV_VMX_CAP_PINBASED
,
267 &hvf_state
->hvf_caps
->vmx_cap_pinbased
)) {
270 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED
,
271 &hvf_state
->hvf_caps
->vmx_cap_procbased
)) {
274 if (hv_vmx_read_capability(HV_VMX_CAP_PROCBASED2
,
275 &hvf_state
->hvf_caps
->vmx_cap_procbased2
)) {
278 if (hv_vmx_read_capability(HV_VMX_CAP_ENTRY
,
279 &hvf_state
->hvf_caps
->vmx_cap_entry
)) {
283 /* set VMCS control fields */
284 wvmcs(cpu
->accel
->fd
, VMCS_PIN_BASED_CTLS
,
285 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_pinbased
,
286 VMCS_PIN_BASED_CTLS_EXTINT
|
287 VMCS_PIN_BASED_CTLS_NMI
|
288 VMCS_PIN_BASED_CTLS_VNMI
));
289 wvmcs(cpu
->accel
->fd
, VMCS_PRI_PROC_BASED_CTLS
,
290 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased
,
291 VMCS_PRI_PROC_BASED_CTLS_HLT
|
292 VMCS_PRI_PROC_BASED_CTLS_MWAIT
|
293 VMCS_PRI_PROC_BASED_CTLS_TSC_OFFSET
|
294 VMCS_PRI_PROC_BASED_CTLS_TPR_SHADOW
) |
295 VMCS_PRI_PROC_BASED_CTLS_SEC_CONTROL
);
297 reqCap
= VMCS_PRI_PROC_BASED2_CTLS_APIC_ACCESSES
;
299 /* Is RDTSCP support in CPUID? If so, enable it in the VMCS. */
300 if (hvf_get_supported_cpuid(0x80000001, 0, R_EDX
) & CPUID_EXT2_RDTSCP
) {
301 reqCap
|= VMCS_PRI_PROC_BASED2_CTLS_RDTSCP
;
304 wvmcs(cpu
->accel
->fd
, VMCS_SEC_PROC_BASED_CTLS
,
305 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_procbased2
, reqCap
));
307 wvmcs(cpu
->accel
->fd
, VMCS_ENTRY_CTLS
,
308 cap2ctrl(hvf_state
->hvf_caps
->vmx_cap_entry
, 0));
309 wvmcs(cpu
->accel
->fd
, VMCS_EXCEPTION_BITMAP
, 0); /* Double fault */
311 wvmcs(cpu
->accel
->fd
, VMCS_TPR_THRESHOLD
, 0);
313 x86cpu
= X86_CPU(cpu
);
314 x86cpu
->env
.xsave_buf_len
= 4096;
315 x86cpu
->env
.xsave_buf
= qemu_memalign(4096, x86cpu
->env
.xsave_buf_len
);
318 * The allocated storage must be large enough for all of the
319 * possible XSAVE state components.
321 assert(hvf_get_supported_cpuid(0xd, 0, R_ECX
) <= x86cpu
->env
.xsave_buf_len
);
323 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_STAR
, 1);
324 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_LSTAR
, 1);
325 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_CSTAR
, 1);
326 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_FMASK
, 1);
327 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_FSBASE
, 1);
328 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_GSBASE
, 1);
329 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_KERNELGSBASE
, 1);
330 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_TSC_AUX
, 1);
331 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_IA32_TSC
, 1);
332 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_IA32_SYSENTER_CS
, 1);
333 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_IA32_SYSENTER_EIP
, 1);
334 hv_vcpu_enable_native_msr(cpu
->accel
->fd
, MSR_IA32_SYSENTER_ESP
, 1);
339 static void hvf_store_events(CPUState
*cpu
, uint32_t ins_len
, uint64_t idtvec_info
)
341 X86CPU
*x86_cpu
= X86_CPU(cpu
);
342 CPUX86State
*env
= &x86_cpu
->env
;
344 env
->exception_nr
= -1;
345 env
->exception_pending
= 0;
346 env
->exception_injected
= 0;
347 env
->interrupt_injected
= -1;
348 env
->nmi_injected
= false;
350 env
->has_error_code
= false;
351 if (idtvec_info
& VMCS_IDT_VEC_VALID
) {
352 switch (idtvec_info
& VMCS_IDT_VEC_TYPE
) {
353 case VMCS_IDT_VEC_HWINTR
:
354 case VMCS_IDT_VEC_SWINTR
:
355 env
->interrupt_injected
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
357 case VMCS_IDT_VEC_NMI
:
358 env
->nmi_injected
= true;
360 case VMCS_IDT_VEC_HWEXCEPTION
:
361 case VMCS_IDT_VEC_SWEXCEPTION
:
362 env
->exception_nr
= idtvec_info
& VMCS_IDT_VEC_VECNUM
;
363 env
->exception_injected
= 1;
365 case VMCS_IDT_VEC_PRIV_SWEXCEPTION
:
369 if ((idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWEXCEPTION
||
370 (idtvec_info
& VMCS_IDT_VEC_TYPE
) == VMCS_IDT_VEC_SWINTR
) {
371 env
->ins_len
= ins_len
;
373 if (idtvec_info
& VMCS_IDT_VEC_ERRCODE_VALID
) {
374 env
->has_error_code
= true;
375 env
->error_code
= rvmcs(cpu
->accel
->fd
, VMCS_IDT_VECTORING_ERROR
);
378 if ((rvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
379 VMCS_INTERRUPTIBILITY_NMI_BLOCKING
)) {
380 env
->hflags2
|= HF2_NMI_MASK
;
382 env
->hflags2
&= ~HF2_NMI_MASK
;
384 if (rvmcs(cpu
->accel
->fd
, VMCS_GUEST_INTERRUPTIBILITY
) &
385 (VMCS_INTERRUPTIBILITY_STI_BLOCKING
|
386 VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING
)) {
387 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
389 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
393 static void hvf_cpu_x86_cpuid(CPUX86State
*env
, uint32_t index
, uint32_t count
,
394 uint32_t *eax
, uint32_t *ebx
,
395 uint32_t *ecx
, uint32_t *edx
)
398 * A wrapper extends cpu_x86_cpuid with 0x40000000 and 0x40000010 leafs,
399 * leafs 0x40000001-0x4000000F are filled with zeros
400 * Provides vmware-cpuid-freq support to hvf
402 * Note: leaf 0x40000000 not exposes HVF,
403 * leaving hypervisor signature empty
406 if (index
< 0x40000000 || index
> 0x40000010 ||
407 !tsc_is_known(env
) || !apic_bus_freq_is_known(env
)) {
409 cpu_x86_cpuid(env
, index
, count
, eax
, ebx
, ecx
, edx
);
415 *eax
= 0x40000010; /* Max available cpuid leaf */
416 *ebx
= 0; /* Leave signature empty */
422 *ebx
= env
->apic_bus_freq
/ 1000; /* Hz to KHz */
435 int hvf_vcpu_exec(CPUState
*cpu
)
437 X86CPU
*x86_cpu
= X86_CPU(cpu
);
438 CPUX86State
*env
= &x86_cpu
->env
;
442 if (hvf_process_events(cpu
)) {
447 if (cpu
->accel
->dirty
) {
448 hvf_put_registers(cpu
);
449 cpu
->accel
->dirty
= false;
452 if (hvf_inject_interrupts(cpu
)) {
453 return EXCP_INTERRUPT
;
458 if (!cpu_is_bsp(X86_CPU(cpu
)) && cpu
->halted
) {
463 hv_return_t r
= hv_vcpu_run_until(cpu
->accel
->fd
, HV_DEADLINE_FOREVER
);
467 uint64_t exit_reason
= rvmcs(cpu
->accel
->fd
, VMCS_EXIT_REASON
);
468 uint64_t exit_qual
= rvmcs(cpu
->accel
->fd
, VMCS_EXIT_QUALIFICATION
);
469 uint32_t ins_len
= (uint32_t)rvmcs(cpu
->accel
->fd
,
470 VMCS_EXIT_INSTRUCTION_LENGTH
);
472 uint64_t idtvec_info
= rvmcs(cpu
->accel
->fd
, VMCS_IDT_VECTORING_INFO
);
474 hvf_store_events(cpu
, ins_len
, idtvec_info
);
475 rip
= rreg(cpu
->accel
->fd
, HV_X86_RIP
);
476 env
->eflags
= rreg(cpu
->accel
->fd
, HV_X86_RFLAGS
);
480 update_apic_tpr(cpu
);
484 switch (exit_reason
) {
485 case EXIT_REASON_HLT
: {
486 macvm_set_rip(cpu
, rip
+ ins_len
);
487 if (!((cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
488 (env
->eflags
& IF_MASK
))
489 && !(cpu
->interrupt_request
& CPU_INTERRUPT_NMI
) &&
490 !(idtvec_info
& VMCS_IDT_VEC_VALID
)) {
495 ret
= EXCP_INTERRUPT
;
498 case EXIT_REASON_MWAIT
: {
499 ret
= EXCP_INTERRUPT
;
502 /* Need to check if MMIO or unmapped fault */
503 case EXIT_REASON_EPT_FAULT
:
506 uint64_t gpa
= rvmcs(cpu
->accel
->fd
, VMCS_GUEST_PHYSICAL_ADDRESS
);
508 if (((idtvec_info
& VMCS_IDT_VEC_VALID
) == 0) &&
509 ((exit_qual
& EXIT_QUAL_NMIUDTI
) != 0)) {
510 vmx_set_nmi_blocking(cpu
);
513 slot
= hvf_find_overlap_slot(gpa
, 1);
515 if (ept_emulation_fault(slot
, gpa
, exit_qual
)) {
516 struct x86_decode decode
;
519 decode_instruction(env
, &decode
);
520 exec_instruction(env
, &decode
);
526 case EXIT_REASON_INOUT
:
528 uint32_t in
= (exit_qual
& 8) != 0;
529 uint32_t size
= (exit_qual
& 7) + 1;
530 uint32_t string
= (exit_qual
& 16) != 0;
531 uint32_t port
= exit_qual
>> 16;
532 /*uint32_t rep = (exit_qual & 0x20) != 0;*/
537 hvf_handle_io(env
, port
, &val
, 0, size
, 1);
540 } else if (size
== 2) {
542 } else if (size
== 4) {
543 RAX(env
) = (uint32_t)val
;
545 RAX(env
) = (uint64_t)val
;
550 } else if (!string
&& !in
) {
551 RAX(env
) = rreg(cpu
->accel
->fd
, HV_X86_RAX
);
552 hvf_handle_io(env
, port
, &RAX(env
), 1, size
, 1);
553 macvm_set_rip(cpu
, rip
+ ins_len
);
556 struct x86_decode decode
;
559 decode_instruction(env
, &decode
);
560 assert(ins_len
== decode
.len
);
561 exec_instruction(env
, &decode
);
566 case EXIT_REASON_CPUID
: {
567 uint32_t rax
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RAX
);
568 uint32_t rbx
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RBX
);
569 uint32_t rcx
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RCX
);
570 uint32_t rdx
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RDX
);
573 /* CPUID1.ecx.OSXSAVE needs to know CR4 */
574 env
->cr
[4] = rvmcs(cpu
->accel
->fd
, VMCS_GUEST_CR4
);
576 hvf_cpu_x86_cpuid(env
, rax
, rcx
, &rax
, &rbx
, &rcx
, &rdx
);
578 wreg(cpu
->accel
->fd
, HV_X86_RAX
, rax
);
579 wreg(cpu
->accel
->fd
, HV_X86_RBX
, rbx
);
580 wreg(cpu
->accel
->fd
, HV_X86_RCX
, rcx
);
581 wreg(cpu
->accel
->fd
, HV_X86_RDX
, rdx
);
583 macvm_set_rip(cpu
, rip
+ ins_len
);
586 case EXIT_REASON_XSETBV
: {
587 X86CPU
*x86_cpu
= X86_CPU(cpu
);
588 CPUX86State
*env
= &x86_cpu
->env
;
589 uint32_t eax
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RAX
);
590 uint32_t ecx
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RCX
);
591 uint32_t edx
= (uint32_t)rreg(cpu
->accel
->fd
, HV_X86_RDX
);
594 macvm_set_rip(cpu
, rip
+ ins_len
);
597 env
->xcr0
= ((uint64_t)edx
<< 32) | eax
;
598 wreg(cpu
->accel
->fd
, HV_X86_XCR0
, env
->xcr0
| 1);
599 macvm_set_rip(cpu
, rip
+ ins_len
);
602 case EXIT_REASON_INTR_WINDOW
:
603 vmx_clear_int_window_exiting(cpu
);
604 ret
= EXCP_INTERRUPT
;
606 case EXIT_REASON_NMI_WINDOW
:
607 vmx_clear_nmi_window_exiting(cpu
);
608 ret
= EXCP_INTERRUPT
;
610 case EXIT_REASON_EXT_INTR
:
611 /* force exit and allow io handling */
612 ret
= EXCP_INTERRUPT
;
614 case EXIT_REASON_RDMSR
:
615 case EXIT_REASON_WRMSR
:
618 if (exit_reason
== EXIT_REASON_RDMSR
) {
627 case EXIT_REASON_CR_ACCESS
: {
633 reg
= (exit_qual
>> 8) & 15;
637 macvm_set_cr0(cpu
->accel
->fd
, RRX(env
, reg
));
641 macvm_set_cr4(cpu
->accel
->fd
, RRX(env
, reg
));
645 X86CPU
*x86_cpu
= X86_CPU(cpu
);
646 if (exit_qual
& 0x10) {
647 RRX(env
, reg
) = cpu_get_apic_tpr(x86_cpu
->apic_state
);
649 int tpr
= RRX(env
, reg
);
650 cpu_set_apic_tpr(x86_cpu
->apic_state
, tpr
);
651 ret
= EXCP_INTERRUPT
;
656 error_report("Unrecognized CR %d", cr
);
663 case EXIT_REASON_APIC_ACCESS
: { /* TODO */
664 struct x86_decode decode
;
667 decode_instruction(env
, &decode
);
668 exec_instruction(env
, &decode
);
672 case EXIT_REASON_TPR
: {
676 case EXIT_REASON_TASK_SWITCH
: {
677 uint64_t vinfo
= rvmcs(cpu
->accel
->fd
, VMCS_IDT_VECTORING_INFO
);
678 x68_segment_selector sel
= {.sel
= exit_qual
& 0xffff};
679 vmx_handle_task_switch(cpu
, sel
, (exit_qual
>> 30) & 0x3,
680 vinfo
& VMCS_INTR_VALID
, vinfo
& VECTORING_INFO_VECTOR_MASK
, vinfo
684 case EXIT_REASON_TRIPLE_FAULT
: {
685 qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET
);
686 ret
= EXCP_INTERRUPT
;
689 case EXIT_REASON_RDPMC
:
690 wreg(cpu
->accel
->fd
, HV_X86_RAX
, 0);
691 wreg(cpu
->accel
->fd
, HV_X86_RDX
, 0);
692 macvm_set_rip(cpu
, rip
+ ins_len
);
694 case VMX_REASON_VMCALL
:
695 env
->exception_nr
= EXCP0D_GPF
;
696 env
->exception_injected
= 1;
697 env
->has_error_code
= true;
701 error_report("%llx: unhandled exit %llx", rip
, exit_reason
);
708 int hvf_arch_insert_sw_breakpoint(CPUState
*cpu
, struct hvf_sw_breakpoint
*bp
)
713 int hvf_arch_remove_sw_breakpoint(CPUState
*cpu
, struct hvf_sw_breakpoint
*bp
)
718 int hvf_arch_insert_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
723 int hvf_arch_remove_hw_breakpoint(vaddr addr
, vaddr len
, int type
)
728 void hvf_arch_remove_all_hw_breakpoints(void)
732 void hvf_arch_update_guest_debug(CPUState
*cpu
)
736 bool hvf_arch_supports_guest_debug(void)