2 * x86 SVM helpers (sysemu only)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
28 /* Secure Virtual Machine helpers */
30 static void svm_save_seg(CPUX86State
*env
, int mmu_idx
, hwaddr addr
,
31 const SegmentCache
*sc
)
33 cpu_stw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, selector
),
34 sc
->selector
, mmu_idx
, 0);
35 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, base
),
36 sc
->base
, mmu_idx
, 0);
37 cpu_stl_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, limit
),
38 sc
->limit
, mmu_idx
, 0);
39 cpu_stw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, attrib
),
40 ((sc
->flags
>> 8) & 0xff)
41 | ((sc
->flags
>> 12) & 0x0f00),
46 * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
47 * addresses in the segment registers that have been loaded.
49 static inline void svm_canonicalization(CPUX86State
*env
, target_ulong
*seg_base
)
51 uint16_t shift_amt
= 64 - cpu_x86_virtual_addr_width(env
);
52 *seg_base
= ((((long) *seg_base
) << shift_amt
) >> shift_amt
);
55 static void svm_load_seg(CPUX86State
*env
, int mmu_idx
, hwaddr addr
,
61 cpu_lduw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, selector
),
64 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, base
),
67 cpu_ldl_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, limit
),
70 cpu_lduw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, attrib
),
72 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
74 svm_canonicalization(env
, &sc
->base
);
77 static void svm_load_seg_cache(CPUX86State
*env
, int mmu_idx
,
78 hwaddr addr
, int seg_reg
)
82 svm_load_seg(env
, mmu_idx
, addr
, &sc
);
83 cpu_x86_load_seg_cache(env
, seg_reg
, sc
.selector
,
84 sc
.base
, sc
.limit
, sc
.flags
);
87 static inline bool is_efer_invalid_state (CPUX86State
*env
)
89 if (!(env
->efer
& MSR_EFER_SVME
)) {
93 if (env
->efer
& MSR_EFER_RESERVED
) {
97 if ((env
->efer
& (MSR_EFER_LMA
| MSR_EFER_LME
)) &&
98 !(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
102 if ((env
->efer
& MSR_EFER_LME
) && (env
->cr
[0] & CR0_PG_MASK
)
103 && !(env
->cr
[4] & CR4_PAE_MASK
)) {
107 if ((env
->efer
& MSR_EFER_LME
) && (env
->cr
[0] & CR0_PG_MASK
)
108 && !(env
->cr
[0] & CR0_PE_MASK
)) {
112 if ((env
->efer
& MSR_EFER_LME
) && (env
->cr
[0] & CR0_PG_MASK
)
113 && (env
->cr
[4] & CR4_PAE_MASK
)
114 && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)
115 && (env
->segs
[R_CS
].flags
& DESC_B_MASK
)) {
122 static inline bool virtual_gif_enabled(CPUX86State
*env
)
124 if (likely(env
->hflags
& HF_GUEST_MASK
)) {
125 return (env
->features
[FEAT_SVM
] & CPUID_SVM_VGIF
)
126 && (env
->int_ctl
& V_GIF_ENABLED_MASK
);
131 static inline bool virtual_vm_load_save_enabled(CPUX86State
*env
, uint32_t exit_code
, uintptr_t retaddr
)
135 if (likely(env
->hflags
& HF_GUEST_MASK
)) {
136 if (likely(!(env
->hflags2
& HF2_NPT_MASK
)) || !(env
->efer
& MSR_EFER_LMA
)) {
137 cpu_vmexit(env
, exit_code
, 0, retaddr
);
140 lbr_ctl
= x86_ldl_phys(env_cpu(env
), env
->vm_vmcb
+ offsetof(struct vmcb
,
142 return (env
->features
[FEAT_SVM
] & CPUID_SVM_V_VMSAVE_VMLOAD
)
143 && (lbr_ctl
& V_VMLOAD_VMSAVE_ENABLED_MASK
);
150 static inline bool virtual_gif_set(CPUX86State
*env
)
152 return !virtual_gif_enabled(env
) || (env
->int_ctl
& V_GIF_MASK
);
155 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
157 CPUState
*cs
= env_cpu(env
);
158 X86CPU
*cpu
= env_archcpu(env
);
167 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
170 addr
= env
->regs
[R_EAX
];
172 addr
= (uint32_t)env
->regs
[R_EAX
];
175 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
179 /* save the current CPU state in the hsave page */
180 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
182 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
185 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
187 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
191 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
193 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
195 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
197 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
199 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
201 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
204 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
206 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
207 cpu_compute_eflags(env
));
209 svm_save_seg(env
, MMU_PHYS_IDX
,
210 env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
212 svm_save_seg(env
, MMU_PHYS_IDX
,
213 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
215 svm_save_seg(env
, MMU_PHYS_IDX
,
216 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
218 svm_save_seg(env
, MMU_PHYS_IDX
,
219 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
222 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
223 env
->eip
+ next_eip_addend
);
225 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
227 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
229 /* load the interception bitmaps so we do not need to access the
231 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
233 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
234 offsetof(struct vmcb
,
235 control
.intercept_cr_read
));
236 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
237 offsetof(struct vmcb
,
238 control
.intercept_cr_write
));
239 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
240 offsetof(struct vmcb
,
241 control
.intercept_dr_read
));
242 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
243 offsetof(struct vmcb
,
244 control
.intercept_dr_write
));
245 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
246 offsetof(struct vmcb
,
247 control
.intercept_exceptions
250 nested_ctl
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
251 control
.nested_ctl
));
252 asid
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
255 uint64_t msrpm_base_pa
= x86_ldq_phys(cs
, env
->vm_vmcb
+
256 offsetof(struct vmcb
,
257 control
.msrpm_base_pa
));
258 uint64_t iopm_base_pa
= x86_ldq_phys(cs
, env
->vm_vmcb
+
259 offsetof(struct vmcb
, control
.iopm_base_pa
));
261 if ((msrpm_base_pa
& ~0xfff) >= (1ull << cpu
->phys_bits
) - SVM_MSRPM_SIZE
) {
262 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
265 if ((iopm_base_pa
& ~0xfff) >= (1ull << cpu
->phys_bits
) - SVM_IOPM_SIZE
) {
266 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
269 env
->nested_pg_mode
= 0;
271 if (!cpu_svm_has_intercept(env
, SVM_EXIT_VMRUN
)) {
272 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
275 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
278 if (nested_ctl
& SVM_NPT_ENABLED
) {
279 env
->nested_cr3
= x86_ldq_phys(cs
,
280 env
->vm_vmcb
+ offsetof(struct vmcb
,
281 control
.nested_cr3
));
282 env
->hflags2
|= HF2_NPT_MASK
;
284 env
->nested_pg_mode
= get_pg_mode(env
) & PG_MODE_SVM_MASK
;
286 tlb_flush_by_mmuidx(cs
, 1 << MMU_NESTED_IDX
);
289 /* enable intercepts */
290 env
->hflags
|= HF_GUEST_MASK
;
292 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
293 offsetof(struct vmcb
, control
.tsc_offset
));
295 new_cr0
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
));
296 if (new_cr0
& SVM_CR0_RESERVED_MASK
) {
297 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
299 if ((new_cr0
& CR0_NW_MASK
) && !(new_cr0
& CR0_CD_MASK
)) {
300 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
302 new_cr3
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
));
303 if ((env
->efer
& MSR_EFER_LMA
) &&
304 (new_cr3
& ((~0ULL) << cpu
->phys_bits
))) {
305 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
307 new_cr4
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
));
308 if (new_cr4
& cr4_reserved_bits(env
)) {
309 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
311 /* clear exit_info_2 so we behave like the real hardware */
313 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
315 cpu_x86_update_cr0(env
, new_cr0
);
316 cpu_x86_update_cr4(env
, new_cr4
);
317 cpu_x86_update_cr3(env
, new_cr3
);
318 env
->cr
[2] = x86_ldq_phys(cs
,
319 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
320 env
->int_ctl
= x86_ldl_phys(cs
,
321 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
322 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
323 if (env
->int_ctl
& V_INTR_MASKING_MASK
) {
324 env
->hflags2
|= HF2_VINTR_MASK
;
325 if (env
->eflags
& IF_MASK
) {
326 env
->hflags2
|= HF2_HIF_MASK
;
332 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
334 cpu_load_eflags(env
, x86_ldq_phys(cs
,
335 env
->vm_vmcb
+ offsetof(struct vmcb
,
337 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
339 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
340 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
), R_ES
);
341 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
342 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
), R_CS
);
343 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
344 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
), R_SS
);
345 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
346 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
), R_DS
);
347 svm_load_seg(env
, MMU_PHYS_IDX
,
348 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
), &env
->idt
);
349 svm_load_seg(env
, MMU_PHYS_IDX
,
350 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
), &env
->gdt
);
352 env
->eip
= x86_ldq_phys(cs
,
353 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
355 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
356 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
357 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
358 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
359 env
->dr
[7] = x86_ldq_phys(cs
,
360 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
361 env
->dr
[6] = x86_ldq_phys(cs
,
362 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
365 if (env
->dr
[6] & DR_RESERVED_MASK
) {
366 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
368 if (env
->dr
[7] & DR_RESERVED_MASK
) {
369 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
373 if (is_efer_invalid_state(env
)) {
374 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
377 switch (x86_ldub_phys(cs
,
378 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
379 case TLB_CONTROL_DO_NOTHING
:
381 case TLB_CONTROL_FLUSH_ALL_ASID
:
382 /* FIXME: this is not 100% correct but should work for now */
387 env
->hflags2
|= HF2_GIF_MASK
;
389 if (ctl_has_irq(env
)) {
390 CPUState
*cs
= env_cpu(env
);
392 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
395 if (virtual_gif_set(env
)) {
396 env
->hflags2
|= HF2_VGIF_MASK
;
399 /* maybe we need to inject an event */
400 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
402 if (event_inj
& SVM_EVTINJ_VALID
) {
403 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
404 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
405 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
406 offsetof(struct vmcb
,
407 control
.event_inj_err
));
409 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
410 /* FIXME: need to implement valid_err */
411 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
412 case SVM_EVTINJ_TYPE_INTR
:
413 cs
->exception_index
= vector
;
414 env
->error_code
= event_inj_err
;
415 env
->exception_is_int
= 0;
416 env
->exception_next_eip
= -1;
417 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
418 /* XXX: is it always correct? */
419 do_interrupt_x86_hardirq(env
, vector
, 1);
421 case SVM_EVTINJ_TYPE_NMI
:
422 cs
->exception_index
= EXCP02_NMI
;
423 env
->error_code
= event_inj_err
;
424 env
->exception_is_int
= 0;
425 env
->exception_next_eip
= env
->eip
;
426 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
429 case SVM_EVTINJ_TYPE_EXEPT
:
430 if (vector
== EXCP02_NMI
|| vector
>= 31) {
431 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
433 cs
->exception_index
= vector
;
434 env
->error_code
= event_inj_err
;
435 env
->exception_is_int
= 0;
436 env
->exception_next_eip
= -1;
437 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
440 case SVM_EVTINJ_TYPE_SOFT
:
441 cs
->exception_index
= vector
;
442 env
->error_code
= event_inj_err
;
443 env
->exception_is_int
= 1;
444 env
->exception_next_eip
= env
->eip
;
445 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
449 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
452 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
457 void helper_vmmcall(CPUX86State
*env
)
459 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
460 raise_exception(env
, EXCP06_ILLOP
);
463 void helper_vmload(CPUX86State
*env
, int aflag
)
465 int mmu_idx
= MMU_PHYS_IDX
;
468 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
471 addr
= env
->regs
[R_EAX
];
473 addr
= (uint32_t)env
->regs
[R_EAX
];
476 if (virtual_vm_load_save_enabled(env
, SVM_EXIT_VMLOAD
, GETPC())) {
477 mmu_idx
= MMU_NESTED_IDX
;
480 svm_load_seg_cache(env
, mmu_idx
,
481 addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
482 svm_load_seg_cache(env
, mmu_idx
,
483 addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
484 svm_load_seg(env
, mmu_idx
,
485 addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
486 svm_load_seg(env
, mmu_idx
,
487 addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
491 cpu_ldq_mmuidx_ra(env
,
492 addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
495 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.lstar
),
498 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.cstar
),
501 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sfmask
),
503 svm_canonicalization(env
, &env
->kernelgsbase
);
506 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.star
),
509 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_cs
),
512 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
515 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
519 void helper_vmsave(CPUX86State
*env
, int aflag
)
521 int mmu_idx
= MMU_PHYS_IDX
;
524 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
527 addr
= env
->regs
[R_EAX
];
529 addr
= (uint32_t)env
->regs
[R_EAX
];
532 if (virtual_vm_load_save_enabled(env
, SVM_EXIT_VMSAVE
, GETPC())) {
533 mmu_idx
= MMU_NESTED_IDX
;
536 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.fs
),
538 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.gs
),
540 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.tr
),
542 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
546 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
547 env
->kernelgsbase
, mmu_idx
, 0);
548 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.lstar
),
549 env
->lstar
, mmu_idx
, 0);
550 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.cstar
),
551 env
->cstar
, mmu_idx
, 0);
552 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sfmask
),
553 env
->fmask
, mmu_idx
, 0);
555 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.star
),
556 env
->star
, mmu_idx
, 0);
557 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_cs
),
558 env
->sysenter_cs
, mmu_idx
, 0);
559 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
560 env
->sysenter_esp
, mmu_idx
, 0);
561 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
562 env
->sysenter_eip
, mmu_idx
, 0);
565 void helper_stgi(CPUX86State
*env
)
567 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
569 if (virtual_gif_enabled(env
)) {
570 env
->int_ctl
|= V_GIF_MASK
;
571 env
->hflags2
|= HF2_VGIF_MASK
;
573 env
->hflags2
|= HF2_GIF_MASK
;
577 void helper_clgi(CPUX86State
*env
)
579 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
581 if (virtual_gif_enabled(env
)) {
582 env
->int_ctl
&= ~V_GIF_MASK
;
583 env
->hflags2
&= ~HF2_VGIF_MASK
;
585 env
->hflags2
&= ~HF2_GIF_MASK
;
589 bool cpu_svm_has_intercept(CPUX86State
*env
, uint32_t type
)
592 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
593 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
597 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
598 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
602 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
603 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
607 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
608 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
612 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
613 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
618 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
626 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
627 uint64_t param
, uintptr_t retaddr
)
629 CPUState
*cs
= env_cpu(env
);
631 if (likely(!(env
->hflags
& HF_GUEST_MASK
))) {
635 if (!cpu_svm_has_intercept(env
, type
)) {
639 if (type
== SVM_EXIT_MSR
) {
640 /* FIXME: this should be read in at vmrun (faster this way?) */
641 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
642 offsetof(struct vmcb
,
643 control
.msrpm_base_pa
));
646 switch ((uint32_t)env
->regs
[R_ECX
]) {
648 t0
= (env
->regs
[R_ECX
] * 2) % 8;
649 t1
= (env
->regs
[R_ECX
] * 2) / 8;
651 case 0xc0000000 ... 0xc0001fff:
652 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
656 case 0xc0010000 ... 0xc0011fff:
657 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
662 cpu_vmexit(env
, type
, param
, retaddr
);
667 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
668 cpu_vmexit(env
, type
, param
, retaddr
);
673 cpu_vmexit(env
, type
, param
, retaddr
);
676 void helper_svm_check_intercept(CPUX86State
*env
, uint32_t type
)
678 cpu_svm_check_intercept_param(env
, type
, 0, GETPC());
681 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
682 uint32_t next_eip_addend
)
684 CPUState
*cs
= env_cpu(env
);
686 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
687 /* FIXME: this should be read in at vmrun (faster this way?) */
688 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
689 offsetof(struct vmcb
, control
.iopm_base_pa
));
690 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
692 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
695 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
696 env
->eip
+ next_eip_addend
);
697 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
702 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
705 CPUState
*cs
= env_cpu(env
);
707 cpu_restore_state(cs
, retaddr
);
709 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
710 PRIx64
", " TARGET_FMT_lx
")!\n",
711 exit_code
, exit_info_1
,
712 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
713 control
.exit_info_2
)),
716 cs
->exception_index
= EXCP_VMEXIT
;
717 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
720 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
721 control
.exit_info_1
), exit_info_1
),
723 /* remove any pending exception */
724 env
->old_exception
= -1;
728 void do_vmexit(CPUX86State
*env
)
730 CPUState
*cs
= env_cpu(env
);
732 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
734 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
735 SVM_INTERRUPT_SHADOW_MASK
);
736 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
739 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
741 env
->hflags2
&= ~HF2_NPT_MASK
;
742 tlb_flush_by_mmuidx(cs
, 1 << MMU_NESTED_IDX
);
744 /* Save the VM state in the vmcb */
745 svm_save_seg(env
, MMU_PHYS_IDX
,
746 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
748 svm_save_seg(env
, MMU_PHYS_IDX
,
749 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
751 svm_save_seg(env
, MMU_PHYS_IDX
,
752 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
754 svm_save_seg(env
, MMU_PHYS_IDX
,
755 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
758 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
760 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
763 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
765 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
769 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
771 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
773 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
775 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
777 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
779 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), env
->int_ctl
);
781 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
782 cpu_compute_eflags(env
));
783 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
786 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
788 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
790 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
792 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
793 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
794 env
->hflags
& HF_CPL_MASK
);
796 /* Reload the host state from vm_hsave */
797 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
798 env
->hflags
&= ~HF_GUEST_MASK
;
800 env
->intercept_exceptions
= 0;
801 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
805 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
807 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
810 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
812 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
815 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
816 env
->vm_hsave
+ offsetof(struct vmcb
,
819 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
820 env
->vm_hsave
+ offsetof(struct vmcb
,
822 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
823 env
->vm_hsave
+ offsetof(struct vmcb
,
825 /* we need to set the efer after the crs so the hidden flags get
827 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
830 cpu_load_eflags(env
, x86_ldq_phys(cs
,
831 env
->vm_hsave
+ offsetof(struct vmcb
,
833 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
836 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
837 env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
), R_ES
);
838 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
839 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
), R_CS
);
840 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
841 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
), R_SS
);
842 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
843 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
), R_DS
);
845 env
->eip
= x86_ldq_phys(cs
,
846 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
847 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
848 offsetof(struct vmcb
, save
.rsp
));
849 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
850 offsetof(struct vmcb
, save
.rax
));
852 env
->dr
[6] = x86_ldq_phys(cs
,
853 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
854 env
->dr
[7] = x86_ldq_phys(cs
,
855 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
));
859 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
860 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
861 control
.event_inj
)));
863 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
864 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
865 control
.event_inj_err
)));
867 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
869 env
->hflags2
&= ~HF2_GIF_MASK
;
870 env
->hflags2
&= ~HF2_VGIF_MASK
;
871 /* FIXME: Resets the current ASID register to zero (host ASID). */
873 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
875 /* Clears the TSC_OFFSET inside the processor. */
877 /* If the host is in PAE mode, the processor reloads the host's PDPEs
878 from the page table indicated the host's CR3. If the PDPEs contain
879 illegal state, the processor causes a shutdown. */
881 /* Disables all breakpoints in the host DR7 register. */
883 /* Checks the reloaded host state for consistency. */
885 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
886 host's code segment or non-canonical (in the case of long mode), a
887 #GP fault is delivered inside the host. */