2 * Copyright (c) 2003-2008 Fabrice Bellard
3 * Copyright (C) 2016 Veertu Inc,
4 * Copyright (C) 2017 Google Inc,
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "qemu-common.h"
27 #include "x86_descr.h"
28 #include "x86_decode.h"
30 #include "hw/i386/apic_internal.h"
32 #include <Hypervisor/hv.h>
33 #include <Hypervisor/hv_vmx.h>
35 void hvf_set_segment(struct CPUState
*cpu
, struct vmx_segment
*vmx_seg
,
36 SegmentCache
*qseg
, bool is_tr
)
38 vmx_seg
->sel
= qseg
->selector
;
39 vmx_seg
->base
= qseg
->base
;
40 vmx_seg
->limit
= qseg
->limit
;
42 if (!qseg
->selector
&& !x86_is_real(cpu
) && !is_tr
) {
43 /* the TR register is usable after processor reset despite
44 * having a null selector */
45 vmx_seg
->ar
= 1 << 16;
48 vmx_seg
->ar
= (qseg
->flags
>> DESC_TYPE_SHIFT
) & 0xf;
49 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_G_SHIFT
) & 1) << 15;
50 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_B_SHIFT
) & 1) << 14;
51 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_L_SHIFT
) & 1) << 13;
52 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_AVL_SHIFT
) & 1) << 12;
53 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_P_SHIFT
) & 1) << 7;
54 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_DPL_SHIFT
) & 3) << 5;
55 vmx_seg
->ar
|= ((qseg
->flags
>> DESC_S_SHIFT
) & 1) << 4;
58 void hvf_get_segment(SegmentCache
*qseg
, struct vmx_segment
*vmx_seg
)
60 qseg
->limit
= vmx_seg
->limit
;
61 qseg
->base
= vmx_seg
->base
;
62 qseg
->selector
= vmx_seg
->sel
;
63 qseg
->flags
= ((vmx_seg
->ar
& 0xf) << DESC_TYPE_SHIFT
) |
64 (((vmx_seg
->ar
>> 4) & 1) << DESC_S_SHIFT
) |
65 (((vmx_seg
->ar
>> 5) & 3) << DESC_DPL_SHIFT
) |
66 (((vmx_seg
->ar
>> 7) & 1) << DESC_P_SHIFT
) |
67 (((vmx_seg
->ar
>> 12) & 1) << DESC_AVL_SHIFT
) |
68 (((vmx_seg
->ar
>> 13) & 1) << DESC_L_SHIFT
) |
69 (((vmx_seg
->ar
>> 14) & 1) << DESC_B_SHIFT
) |
70 (((vmx_seg
->ar
>> 15) & 1) << DESC_G_SHIFT
);
73 void hvf_put_xsave(CPUState
*cpu_state
)
76 struct X86XSaveArea
*xsave
;
78 xsave
= X86_CPU(cpu_state
)->env
.xsave_buf
;
80 x86_cpu_xsave_all_areas(X86_CPU(cpu_state
), xsave
);
82 if (hv_vcpu_write_fpstate(cpu_state
->hvf_fd
, (void*)xsave
, 4096)) {
87 void hvf_put_segments(CPUState
*cpu_state
)
89 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
90 struct vmx_segment seg
;
92 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_LIMIT
, env
->idt
.limit
);
93 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_BASE
, env
->idt
.base
);
95 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_LIMIT
, env
->gdt
.limit
);
96 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_BASE
, env
->gdt
.base
);
98 /* wvmcs(cpu_state->hvf_fd, VMCS_GUEST_CR2, env->cr[2]); */
99 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR3
, env
->cr
[3]);
100 vmx_update_tpr(cpu_state
);
101 wvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IA32_EFER
, env
->efer
);
103 macvm_set_cr4(cpu_state
->hvf_fd
, env
->cr
[4]);
104 macvm_set_cr0(cpu_state
->hvf_fd
, env
->cr
[0]);
106 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_CS
], false);
107 vmx_write_segment_descriptor(cpu_state
, &seg
, R_CS
);
109 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_DS
], false);
110 vmx_write_segment_descriptor(cpu_state
, &seg
, R_DS
);
112 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_ES
], false);
113 vmx_write_segment_descriptor(cpu_state
, &seg
, R_ES
);
115 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_SS
], false);
116 vmx_write_segment_descriptor(cpu_state
, &seg
, R_SS
);
118 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_FS
], false);
119 vmx_write_segment_descriptor(cpu_state
, &seg
, R_FS
);
121 hvf_set_segment(cpu_state
, &seg
, &env
->segs
[R_GS
], false);
122 vmx_write_segment_descriptor(cpu_state
, &seg
, R_GS
);
124 hvf_set_segment(cpu_state
, &seg
, &env
->tr
, true);
125 vmx_write_segment_descriptor(cpu_state
, &seg
, R_TR
);
127 hvf_set_segment(cpu_state
, &seg
, &env
->ldt
, false);
128 vmx_write_segment_descriptor(cpu_state
, &seg
, R_LDTR
);
130 hv_vcpu_flush(cpu_state
->hvf_fd
);
133 void hvf_put_msrs(CPUState
*cpu_state
)
135 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
137 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_CS
,
139 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_ESP
,
141 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_EIP
,
144 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_STAR
, env
->star
);
147 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_CSTAR
, env
->cstar
);
148 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_KERNELGSBASE
, env
->kernelgsbase
);
149 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_FMASK
, env
->fmask
);
150 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_LSTAR
, env
->lstar
);
153 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_GSBASE
, env
->segs
[R_GS
].base
);
154 hv_vcpu_write_msr(cpu_state
->hvf_fd
, MSR_FSBASE
, env
->segs
[R_FS
].base
);
156 /* if (!osx_is_sierra())
157 wvmcs(cpu_state->hvf_fd, VMCS_TSC_OFFSET, env->tsc - rdtscp());*/
158 hv_vm_sync_tsc(env
->tsc
);
162 void hvf_get_xsave(CPUState
*cpu_state
)
164 struct X86XSaveArea
*xsave
;
166 xsave
= X86_CPU(cpu_state
)->env
.xsave_buf
;
168 if (hv_vcpu_read_fpstate(cpu_state
->hvf_fd
, (void*)xsave
, 4096)) {
172 x86_cpu_xrstor_all_areas(X86_CPU(cpu_state
), xsave
);
175 void hvf_get_segments(CPUState
*cpu_state
)
177 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
179 struct vmx_segment seg
;
181 env
->interrupt_injected
= -1;
183 vmx_read_segment_descriptor(cpu_state
, &seg
, R_CS
);
184 hvf_get_segment(&env
->segs
[R_CS
], &seg
);
186 vmx_read_segment_descriptor(cpu_state
, &seg
, R_DS
);
187 hvf_get_segment(&env
->segs
[R_DS
], &seg
);
189 vmx_read_segment_descriptor(cpu_state
, &seg
, R_ES
);
190 hvf_get_segment(&env
->segs
[R_ES
], &seg
);
192 vmx_read_segment_descriptor(cpu_state
, &seg
, R_FS
);
193 hvf_get_segment(&env
->segs
[R_FS
], &seg
);
195 vmx_read_segment_descriptor(cpu_state
, &seg
, R_GS
);
196 hvf_get_segment(&env
->segs
[R_GS
], &seg
);
198 vmx_read_segment_descriptor(cpu_state
, &seg
, R_SS
);
199 hvf_get_segment(&env
->segs
[R_SS
], &seg
);
201 vmx_read_segment_descriptor(cpu_state
, &seg
, R_TR
);
202 hvf_get_segment(&env
->tr
, &seg
);
204 vmx_read_segment_descriptor(cpu_state
, &seg
, R_LDTR
);
205 hvf_get_segment(&env
->ldt
, &seg
);
207 env
->idt
.limit
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_LIMIT
);
208 env
->idt
.base
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IDTR_BASE
);
209 env
->gdt
.limit
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_LIMIT
);
210 env
->gdt
.base
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_GDTR_BASE
);
212 env
->cr
[0] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR0
);
214 env
->cr
[3] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR3
);
215 env
->cr
[4] = rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_CR4
);
217 env
->efer
= rvmcs(cpu_state
->hvf_fd
, VMCS_GUEST_IA32_EFER
);
220 void hvf_get_msrs(CPUState
*cpu_state
)
222 CPUX86State
*env
= &X86_CPU(cpu_state
)->env
;
225 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_CS
, &tmp
);
226 env
->sysenter_cs
= tmp
;
228 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_ESP
, &tmp
);
229 env
->sysenter_esp
= tmp
;
231 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_SYSENTER_EIP
, &tmp
);
232 env
->sysenter_eip
= tmp
;
234 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_STAR
, &env
->star
);
237 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_CSTAR
, &env
->cstar
);
238 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_KERNELGSBASE
, &env
->kernelgsbase
);
239 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_FMASK
, &env
->fmask
);
240 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_LSTAR
, &env
->lstar
);
243 hv_vcpu_read_msr(cpu_state
->hvf_fd
, MSR_IA32_APICBASE
, &tmp
);
245 env
->tsc
= rdtscp() + rvmcs(cpu_state
->hvf_fd
, VMCS_TSC_OFFSET
);
248 int hvf_put_registers(CPUState
*cpu_state
)
250 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
251 CPUX86State
*env
= &x86cpu
->env
;
253 wreg(cpu_state
->hvf_fd
, HV_X86_RAX
, env
->regs
[R_EAX
]);
254 wreg(cpu_state
->hvf_fd
, HV_X86_RBX
, env
->regs
[R_EBX
]);
255 wreg(cpu_state
->hvf_fd
, HV_X86_RCX
, env
->regs
[R_ECX
]);
256 wreg(cpu_state
->hvf_fd
, HV_X86_RDX
, env
->regs
[R_EDX
]);
257 wreg(cpu_state
->hvf_fd
, HV_X86_RBP
, env
->regs
[R_EBP
]);
258 wreg(cpu_state
->hvf_fd
, HV_X86_RSP
, env
->regs
[R_ESP
]);
259 wreg(cpu_state
->hvf_fd
, HV_X86_RSI
, env
->regs
[R_ESI
]);
260 wreg(cpu_state
->hvf_fd
, HV_X86_RDI
, env
->regs
[R_EDI
]);
261 wreg(cpu_state
->hvf_fd
, HV_X86_R8
, env
->regs
[8]);
262 wreg(cpu_state
->hvf_fd
, HV_X86_R9
, env
->regs
[9]);
263 wreg(cpu_state
->hvf_fd
, HV_X86_R10
, env
->regs
[10]);
264 wreg(cpu_state
->hvf_fd
, HV_X86_R11
, env
->regs
[11]);
265 wreg(cpu_state
->hvf_fd
, HV_X86_R12
, env
->regs
[12]);
266 wreg(cpu_state
->hvf_fd
, HV_X86_R13
, env
->regs
[13]);
267 wreg(cpu_state
->hvf_fd
, HV_X86_R14
, env
->regs
[14]);
268 wreg(cpu_state
->hvf_fd
, HV_X86_R15
, env
->regs
[15]);
269 wreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
, env
->eflags
);
270 wreg(cpu_state
->hvf_fd
, HV_X86_RIP
, env
->eip
);
272 wreg(cpu_state
->hvf_fd
, HV_X86_XCR0
, env
->xcr0
);
274 hvf_put_xsave(cpu_state
);
276 hvf_put_segments(cpu_state
);
278 hvf_put_msrs(cpu_state
);
280 wreg(cpu_state
->hvf_fd
, HV_X86_DR0
, env
->dr
[0]);
281 wreg(cpu_state
->hvf_fd
, HV_X86_DR1
, env
->dr
[1]);
282 wreg(cpu_state
->hvf_fd
, HV_X86_DR2
, env
->dr
[2]);
283 wreg(cpu_state
->hvf_fd
, HV_X86_DR3
, env
->dr
[3]);
284 wreg(cpu_state
->hvf_fd
, HV_X86_DR4
, env
->dr
[4]);
285 wreg(cpu_state
->hvf_fd
, HV_X86_DR5
, env
->dr
[5]);
286 wreg(cpu_state
->hvf_fd
, HV_X86_DR6
, env
->dr
[6]);
287 wreg(cpu_state
->hvf_fd
, HV_X86_DR7
, env
->dr
[7]);
292 int hvf_get_registers(CPUState
*cpu_state
)
294 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
295 CPUX86State
*env
= &x86cpu
->env
;
297 env
->regs
[R_EAX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RAX
);
298 env
->regs
[R_EBX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RBX
);
299 env
->regs
[R_ECX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RCX
);
300 env
->regs
[R_EDX
] = rreg(cpu_state
->hvf_fd
, HV_X86_RDX
);
301 env
->regs
[R_EBP
] = rreg(cpu_state
->hvf_fd
, HV_X86_RBP
);
302 env
->regs
[R_ESP
] = rreg(cpu_state
->hvf_fd
, HV_X86_RSP
);
303 env
->regs
[R_ESI
] = rreg(cpu_state
->hvf_fd
, HV_X86_RSI
);
304 env
->regs
[R_EDI
] = rreg(cpu_state
->hvf_fd
, HV_X86_RDI
);
305 env
->regs
[8] = rreg(cpu_state
->hvf_fd
, HV_X86_R8
);
306 env
->regs
[9] = rreg(cpu_state
->hvf_fd
, HV_X86_R9
);
307 env
->regs
[10] = rreg(cpu_state
->hvf_fd
, HV_X86_R10
);
308 env
->regs
[11] = rreg(cpu_state
->hvf_fd
, HV_X86_R11
);
309 env
->regs
[12] = rreg(cpu_state
->hvf_fd
, HV_X86_R12
);
310 env
->regs
[13] = rreg(cpu_state
->hvf_fd
, HV_X86_R13
);
311 env
->regs
[14] = rreg(cpu_state
->hvf_fd
, HV_X86_R14
);
312 env
->regs
[15] = rreg(cpu_state
->hvf_fd
, HV_X86_R15
);
314 env
->eflags
= rreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
);
315 env
->eip
= rreg(cpu_state
->hvf_fd
, HV_X86_RIP
);
317 hvf_get_xsave(cpu_state
);
318 env
->xcr0
= rreg(cpu_state
->hvf_fd
, HV_X86_XCR0
);
320 hvf_get_segments(cpu_state
);
321 hvf_get_msrs(cpu_state
);
323 env
->dr
[0] = rreg(cpu_state
->hvf_fd
, HV_X86_DR0
);
324 env
->dr
[1] = rreg(cpu_state
->hvf_fd
, HV_X86_DR1
);
325 env
->dr
[2] = rreg(cpu_state
->hvf_fd
, HV_X86_DR2
);
326 env
->dr
[3] = rreg(cpu_state
->hvf_fd
, HV_X86_DR3
);
327 env
->dr
[4] = rreg(cpu_state
->hvf_fd
, HV_X86_DR4
);
328 env
->dr
[5] = rreg(cpu_state
->hvf_fd
, HV_X86_DR5
);
329 env
->dr
[6] = rreg(cpu_state
->hvf_fd
, HV_X86_DR6
);
330 env
->dr
[7] = rreg(cpu_state
->hvf_fd
, HV_X86_DR7
);
332 x86_update_hflags(env
);
336 static void vmx_set_int_window_exiting(CPUState
*cpu
)
339 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
340 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
|
341 VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING
);
344 void vmx_clear_int_window_exiting(CPUState
*cpu
)
347 val
= rvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
);
348 wvmcs(cpu
->hvf_fd
, VMCS_PRI_PROC_BASED_CTLS
, val
&
349 ~VMCS_PRI_PROC_BASED_CTLS_INT_WINDOW_EXITING
);
354 bool hvf_inject_interrupts(CPUState
*cpu_state
)
356 X86CPU
*x86cpu
= X86_CPU(cpu_state
);
357 CPUX86State
*env
= &x86cpu
->env
;
361 bool have_event
= true;
362 if (env
->interrupt_injected
!= -1) {
363 vector
= env
->interrupt_injected
;
364 intr_type
= VMCS_INTR_T_SWINTR
;
365 } else if (env
->exception_nr
!= -1) {
366 vector
= env
->exception_nr
;
367 if (vector
== EXCP03_INT3
|| vector
== EXCP04_INTO
) {
368 intr_type
= VMCS_INTR_T_SWEXCEPTION
;
370 intr_type
= VMCS_INTR_T_HWEXCEPTION
;
372 } else if (env
->nmi_injected
) {
374 intr_type
= VMCS_INTR_T_NMI
;
381 info
= vector
| intr_type
| VMCS_INTR_VALID
;
382 uint64_t reason
= rvmcs(cpu_state
->hvf_fd
, VMCS_EXIT_REASON
);
383 if (env
->nmi_injected
&& reason
!= EXIT_REASON_TASK_SWITCH
) {
384 vmx_clear_nmi_blocking(cpu_state
);
387 if (!(env
->hflags2
& HF2_NMI_MASK
) || intr_type
!= VMCS_INTR_T_NMI
) {
388 info
&= ~(1 << 12); /* clear undefined bit */
389 if (intr_type
== VMCS_INTR_T_SWINTR
||
390 intr_type
== VMCS_INTR_T_SWEXCEPTION
) {
391 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INST_LENGTH
, env
->ins_len
);
394 if (env
->has_error_code
) {
395 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_EXCEPTION_ERROR
,
398 /*printf("reinject %lx err %d\n", info, err);*/
399 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, info
);
403 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_NMI
) {
404 if (!(env
->hflags2
& HF2_NMI_MASK
) && !(info
& VMCS_INTR_VALID
)) {
405 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
406 info
= VMCS_INTR_VALID
| VMCS_INTR_T_NMI
| NMI_VEC
;
407 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, info
);
409 vmx_set_nmi_window_exiting(cpu_state
);
413 if (!(env
->hflags
& HF_INHIBIT_IRQ_MASK
) &&
414 (cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
415 (EFLAGS(env
) & IF_MASK
) && !(info
& VMCS_INTR_VALID
)) {
416 int line
= cpu_get_pic_interrupt(&x86cpu
->env
);
417 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_HARD
;
419 wvmcs(cpu_state
->hvf_fd
, VMCS_ENTRY_INTR_INFO
, line
|
420 VMCS_INTR_VALID
| VMCS_INTR_T_HWINTR
);
423 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) {
424 vmx_set_int_window_exiting(cpu_state
);
426 return (cpu_state
->interrupt_request
427 & (CPU_INTERRUPT_INIT
| CPU_INTERRUPT_TPR
));
430 int hvf_process_events(CPUState
*cpu_state
)
432 X86CPU
*cpu
= X86_CPU(cpu_state
);
433 CPUX86State
*env
= &cpu
->env
;
435 EFLAGS(env
) = rreg(cpu_state
->hvf_fd
, HV_X86_RFLAGS
);
437 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_INIT
) {
438 hvf_cpu_synchronize_state(cpu_state
);
442 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_POLL
) {
443 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
444 apic_poll_irq(cpu
->apic_state
);
446 if (((cpu_state
->interrupt_request
& CPU_INTERRUPT_HARD
) &&
447 (EFLAGS(env
) & IF_MASK
)) ||
448 (cpu_state
->interrupt_request
& CPU_INTERRUPT_NMI
)) {
449 cpu_state
->halted
= 0;
451 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_SIPI
) {
452 hvf_cpu_synchronize_state(cpu_state
);
455 if (cpu_state
->interrupt_request
& CPU_INTERRUPT_TPR
) {
456 cpu_state
->interrupt_request
&= ~CPU_INTERRUPT_TPR
;
457 hvf_cpu_synchronize_state(cpu_state
);
458 apic_handle_tpr_access_report(cpu
->apic_state
, env
->eip
,
459 env
->tpr_access_type
);
461 return cpu_state
->halted
;