2 * x86 SVM helpers (sysemu only)
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/helper-proto.h"
24 #include "exec/exec-all.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
28 /* Secure Virtual Machine helpers */
30 static void svm_save_seg(CPUX86State
*env
, int mmu_idx
, hwaddr addr
,
31 const SegmentCache
*sc
)
33 cpu_stw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, selector
),
34 sc
->selector
, mmu_idx
, 0);
35 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, base
),
36 sc
->base
, mmu_idx
, 0);
37 cpu_stl_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, limit
),
38 sc
->limit
, mmu_idx
, 0);
39 cpu_stw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, attrib
),
40 ((sc
->flags
>> 8) & 0xff)
41 | ((sc
->flags
>> 12) & 0x0f00),
46 * VMRUN and VMLOAD canonicalizes (i.e., sign-extend to bit 63) all base
47 * addresses in the segment registers that have been loaded.
49 static inline void svm_canonicalization(CPUX86State
*env
, target_ulong
*seg_base
)
51 uint16_t shift_amt
= 64 - cpu_x86_virtual_addr_width(env
);
52 *seg_base
= ((((long) *seg_base
) << shift_amt
) >> shift_amt
);
55 static void svm_load_seg(CPUX86State
*env
, int mmu_idx
, hwaddr addr
,
61 cpu_lduw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, selector
),
64 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, base
),
67 cpu_ldl_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, limit
),
70 cpu_lduw_mmuidx_ra(env
, addr
+ offsetof(struct vmcb_seg
, attrib
),
72 sc
->flags
= ((flags
& 0xff) << 8) | ((flags
& 0x0f00) << 12);
74 svm_canonicalization(env
, &sc
->base
);
77 static void svm_load_seg_cache(CPUX86State
*env
, int mmu_idx
,
78 hwaddr addr
, int seg_reg
)
82 svm_load_seg(env
, mmu_idx
, addr
, &sc
);
83 cpu_x86_load_seg_cache(env
, seg_reg
, sc
.selector
,
84 sc
.base
, sc
.limit
, sc
.flags
);
87 static inline bool is_efer_invalid_state (CPUX86State
*env
)
89 if (!(env
->efer
& MSR_EFER_SVME
)) {
93 if (env
->efer
& MSR_EFER_RESERVED
) {
97 if ((env
->efer
& (MSR_EFER_LMA
| MSR_EFER_LME
)) &&
98 !(env
->features
[FEAT_8000_0001_EDX
] & CPUID_EXT2_LM
)) {
102 if ((env
->efer
& MSR_EFER_LME
) && (env
->cr
[0] & CR0_PG_MASK
)
103 && !(env
->cr
[4] & CR4_PAE_MASK
)) {
107 if ((env
->efer
& MSR_EFER_LME
) && (env
->cr
[0] & CR0_PG_MASK
)
108 && !(env
->cr
[0] & CR0_PE_MASK
)) {
112 if ((env
->efer
& MSR_EFER_LME
) && (env
->cr
[0] & CR0_PG_MASK
)
113 && (env
->cr
[4] & CR4_PAE_MASK
)
114 && (env
->segs
[R_CS
].flags
& DESC_L_MASK
)
115 && (env
->segs
[R_CS
].flags
& DESC_B_MASK
)) {
122 static inline bool virtual_gif_enabled(CPUX86State
*env
)
124 if (likely(env
->hflags
& HF_GUEST_MASK
)) {
125 return (env
->features
[FEAT_SVM
] & CPUID_SVM_VGIF
)
126 && (env
->int_ctl
& V_GIF_ENABLED_MASK
);
131 static inline bool virtual_vm_load_save_enabled(CPUX86State
*env
, uint32_t exit_code
, uintptr_t retaddr
)
135 if (likely(env
->hflags
& HF_GUEST_MASK
)) {
136 if (likely(!(env
->hflags2
& HF2_NPT_MASK
)) || !(env
->efer
& MSR_EFER_LMA
)) {
137 cpu_vmexit(env
, exit_code
, 0, retaddr
);
140 lbr_ctl
= x86_ldl_phys(env_cpu(env
), env
->vm_vmcb
+ offsetof(struct vmcb
,
142 return (env
->features
[FEAT_SVM
] & CPUID_SVM_V_VMSAVE_VMLOAD
)
143 && (lbr_ctl
& V_VMLOAD_VMSAVE_ENABLED_MASK
);
150 static inline bool virtual_gif_set(CPUX86State
*env
)
152 return !virtual_gif_enabled(env
) || (env
->int_ctl
& V_GIF_MASK
);
155 void helper_vmrun(CPUX86State
*env
, int aflag
, int next_eip_addend
)
157 CPUState
*cs
= env_cpu(env
);
158 X86CPU
*cpu
= env_archcpu(env
);
170 addr
= env
->regs
[R_EAX
];
172 addr
= (uint32_t)env
->regs
[R_EAX
];
175 /* Exceptions are checked before the intercept. */
176 if (addr
& (0xfff | ((~0ULL) << env_archcpu(env
)->phys_bits
))) {
177 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
180 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMRUN
, 0, GETPC());
182 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmrun! " TARGET_FMT_lx
"\n", addr
);
186 /* save the current CPU state in the hsave page */
187 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.base
),
189 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
192 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.base
),
194 x86_stl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.idtr
.limit
),
198 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
200 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
202 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
204 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
206 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
208 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
211 env
->vm_hsave
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
213 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rflags
),
214 cpu_compute_eflags(env
));
216 svm_save_seg(env
, MMU_PHYS_IDX
,
217 env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
),
219 svm_save_seg(env
, MMU_PHYS_IDX
,
220 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
),
222 svm_save_seg(env
, MMU_PHYS_IDX
,
223 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
),
225 svm_save_seg(env
, MMU_PHYS_IDX
,
226 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
),
229 x86_stq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
),
230 env
->eip
+ next_eip_addend
);
232 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
234 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
236 /* load the interception bitmaps so we do not need to access the
238 env
->intercept
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
240 env
->intercept_cr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
241 offsetof(struct vmcb
,
242 control
.intercept_cr_read
));
243 env
->intercept_cr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
244 offsetof(struct vmcb
,
245 control
.intercept_cr_write
));
246 env
->intercept_dr_read
= x86_lduw_phys(cs
, env
->vm_vmcb
+
247 offsetof(struct vmcb
,
248 control
.intercept_dr_read
));
249 env
->intercept_dr_write
= x86_lduw_phys(cs
, env
->vm_vmcb
+
250 offsetof(struct vmcb
,
251 control
.intercept_dr_write
));
252 env
->intercept_exceptions
= x86_ldl_phys(cs
, env
->vm_vmcb
+
253 offsetof(struct vmcb
,
254 control
.intercept_exceptions
257 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
258 if (x86_ldl_phys(cs
, env
->vm_vmcb
+
259 offsetof(struct vmcb
, control
.int_state
)) &
260 SVM_INTERRUPT_SHADOW_MASK
) {
261 env
->hflags
|= HF_INHIBIT_IRQ_MASK
;
264 nested_ctl
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
265 control
.nested_ctl
));
266 asid
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
269 uint64_t msrpm_base_pa
= x86_ldq_phys(cs
, env
->vm_vmcb
+
270 offsetof(struct vmcb
,
271 control
.msrpm_base_pa
));
272 uint64_t iopm_base_pa
= x86_ldq_phys(cs
, env
->vm_vmcb
+
273 offsetof(struct vmcb
, control
.iopm_base_pa
));
275 if ((msrpm_base_pa
& ~0xfff) >= (1ull << cpu
->phys_bits
) - SVM_MSRPM_SIZE
) {
276 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
279 if ((iopm_base_pa
& ~0xfff) >= (1ull << cpu
->phys_bits
) - SVM_IOPM_SIZE
) {
280 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
283 env
->nested_pg_mode
= 0;
285 if (!cpu_svm_has_intercept(env
, SVM_EXIT_VMRUN
)) {
286 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
289 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
292 if (nested_ctl
& SVM_NPT_ENABLED
) {
293 env
->nested_cr3
= x86_ldq_phys(cs
,
294 env
->vm_vmcb
+ offsetof(struct vmcb
,
295 control
.nested_cr3
));
296 env
->hflags2
|= HF2_NPT_MASK
;
298 env
->nested_pg_mode
= get_pg_mode(env
) & PG_MODE_SVM_MASK
;
300 tlb_flush_by_mmuidx(cs
, 1 << MMU_NESTED_IDX
);
303 /* enable intercepts */
304 env
->hflags
|= HF_GUEST_MASK
;
306 env
->tsc_offset
= x86_ldq_phys(cs
, env
->vm_vmcb
+
307 offsetof(struct vmcb
, control
.tsc_offset
));
309 new_cr0
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
));
310 if (new_cr0
& SVM_CR0_RESERVED_MASK
) {
311 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
313 if ((new_cr0
& CR0_NW_MASK
) && !(new_cr0
& CR0_CD_MASK
)) {
314 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
316 new_cr3
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
));
317 if ((env
->efer
& MSR_EFER_LMA
) &&
318 (new_cr3
& ((~0ULL) << cpu
->phys_bits
))) {
319 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
321 new_cr4
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
));
322 if (new_cr4
& cr4_reserved_bits(env
)) {
323 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
325 /* clear exit_info_2 so we behave like the real hardware */
327 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
), 0);
329 cpu_x86_update_cr0(env
, new_cr0
);
330 cpu_x86_update_cr4(env
, new_cr4
);
331 cpu_x86_update_cr3(env
, new_cr3
);
332 env
->cr
[2] = x86_ldq_phys(cs
,
333 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
));
334 env
->int_ctl
= x86_ldl_phys(cs
,
335 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
));
336 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
337 if (env
->int_ctl
& V_INTR_MASKING_MASK
) {
338 env
->hflags2
|= HF2_VINTR_MASK
;
339 if (env
->eflags
& IF_MASK
) {
340 env
->hflags2
|= HF2_HIF_MASK
;
346 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
)));
348 cpu_load_eflags(env
, x86_ldq_phys(cs
,
349 env
->vm_vmcb
+ offsetof(struct vmcb
,
351 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
));
353 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
354 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
), R_ES
);
355 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
356 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
), R_CS
);
357 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
358 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
), R_SS
);
359 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
360 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
), R_DS
);
361 svm_load_seg(env
, MMU_PHYS_IDX
,
362 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
), &env
->idt
);
363 svm_load_seg(env
, MMU_PHYS_IDX
,
364 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
), &env
->gdt
);
366 env
->eip
= x86_ldq_phys(cs
,
367 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
));
369 env
->regs
[R_ESP
] = x86_ldq_phys(cs
,
370 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
));
371 env
->regs
[R_EAX
] = x86_ldq_phys(cs
,
372 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
));
374 new_dr7
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
));
375 new_dr6
= x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
));
378 if (new_dr7
& DR_RESERVED_MASK
) {
379 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
381 if (new_dr6
& DR_RESERVED_MASK
) {
382 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
386 cpu_x86_update_dr7(env
, new_dr7
);
387 env
->dr
[6] = new_dr6
;
389 if (is_efer_invalid_state(env
)) {
390 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
393 switch (x86_ldub_phys(cs
,
394 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.tlb_ctl
))) {
395 case TLB_CONTROL_DO_NOTHING
:
397 case TLB_CONTROL_FLUSH_ALL_ASID
:
398 /* FIXME: this is not 100% correct but should work for now */
403 env
->hflags2
|= HF2_GIF_MASK
;
405 if (ctl_has_irq(env
)) {
406 cs
->interrupt_request
|= CPU_INTERRUPT_VIRQ
;
409 if (virtual_gif_set(env
)) {
410 env
->hflags2
|= HF2_VGIF_MASK
;
413 /* maybe we need to inject an event */
414 event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
416 if (event_inj
& SVM_EVTINJ_VALID
) {
417 uint8_t vector
= event_inj
& SVM_EVTINJ_VEC_MASK
;
418 uint16_t valid_err
= event_inj
& SVM_EVTINJ_VALID_ERR
;
419 uint32_t event_inj_err
= x86_ldl_phys(cs
, env
->vm_vmcb
+
420 offsetof(struct vmcb
,
421 control
.event_inj_err
));
423 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "Injecting(%#hx): ", valid_err
);
424 /* FIXME: need to implement valid_err */
425 switch (event_inj
& SVM_EVTINJ_TYPE_MASK
) {
426 case SVM_EVTINJ_TYPE_INTR
:
427 cs
->exception_index
= vector
;
428 env
->error_code
= event_inj_err
;
429 env
->exception_is_int
= 0;
430 env
->exception_next_eip
= -1;
431 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "INTR");
432 /* XXX: is it always correct? */
433 do_interrupt_x86_hardirq(env
, vector
, 1);
435 case SVM_EVTINJ_TYPE_NMI
:
436 cs
->exception_index
= EXCP02_NMI
;
437 env
->error_code
= event_inj_err
;
438 env
->exception_is_int
= 0;
439 env
->exception_next_eip
= env
->eip
;
440 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "NMI");
443 case SVM_EVTINJ_TYPE_EXEPT
:
444 if (vector
== EXCP02_NMI
|| vector
>= 31) {
445 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
447 cs
->exception_index
= vector
;
448 env
->error_code
= event_inj_err
;
449 env
->exception_is_int
= 0;
450 env
->exception_next_eip
= -1;
451 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "EXEPT");
454 case SVM_EVTINJ_TYPE_SOFT
:
455 cs
->exception_index
= vector
;
456 env
->error_code
= event_inj_err
;
457 env
->exception_is_int
= 1;
458 env
->exception_next_eip
= env
->eip
;
459 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "SOFT");
463 cpu_vmexit(env
, SVM_EXIT_ERR
, 0, GETPC());
466 qemu_log_mask(CPU_LOG_TB_IN_ASM
, " %#x %#x\n", cs
->exception_index
,
471 void helper_vmmcall(CPUX86State
*env
)
473 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMMCALL
, 0, GETPC());
474 raise_exception(env
, EXCP06_ILLOP
);
477 void helper_vmload(CPUX86State
*env
, int aflag
)
479 int mmu_idx
= MMU_PHYS_IDX
;
483 addr
= env
->regs
[R_EAX
];
485 addr
= (uint32_t)env
->regs
[R_EAX
];
488 /* Exceptions are checked before the intercept. */
489 if (addr
& (0xfff | ((~0ULL) << env_archcpu(env
)->phys_bits
))) {
490 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
493 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMLOAD
, 0, GETPC());
495 if (virtual_vm_load_save_enabled(env
, SVM_EXIT_VMLOAD
, GETPC())) {
496 mmu_idx
= MMU_NESTED_IDX
;
499 svm_load_seg_cache(env
, mmu_idx
,
500 addr
+ offsetof(struct vmcb
, save
.fs
), R_FS
);
501 svm_load_seg_cache(env
, mmu_idx
,
502 addr
+ offsetof(struct vmcb
, save
.gs
), R_GS
);
503 svm_load_seg(env
, mmu_idx
,
504 addr
+ offsetof(struct vmcb
, save
.tr
), &env
->tr
);
505 svm_load_seg(env
, mmu_idx
,
506 addr
+ offsetof(struct vmcb
, save
.ldtr
), &env
->ldt
);
510 cpu_ldq_mmuidx_ra(env
,
511 addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
514 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.lstar
),
517 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.cstar
),
520 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sfmask
),
522 svm_canonicalization(env
, &env
->kernelgsbase
);
525 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.star
),
528 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_cs
),
531 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
534 cpu_ldq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
538 void helper_vmsave(CPUX86State
*env
, int aflag
)
540 int mmu_idx
= MMU_PHYS_IDX
;
544 addr
= env
->regs
[R_EAX
];
546 addr
= (uint32_t)env
->regs
[R_EAX
];
549 /* Exceptions are checked before the intercept. */
550 if (addr
& (0xfff | ((~0ULL) << env_archcpu(env
)->phys_bits
))) {
551 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
554 cpu_svm_check_intercept_param(env
, SVM_EXIT_VMSAVE
, 0, GETPC());
556 if (virtual_vm_load_save_enabled(env
, SVM_EXIT_VMSAVE
, GETPC())) {
557 mmu_idx
= MMU_NESTED_IDX
;
560 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.fs
),
562 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.gs
),
564 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.tr
),
566 svm_save_seg(env
, mmu_idx
, addr
+ offsetof(struct vmcb
, save
.ldtr
),
570 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.kernel_gs_base
),
571 env
->kernelgsbase
, mmu_idx
, 0);
572 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.lstar
),
573 env
->lstar
, mmu_idx
, 0);
574 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.cstar
),
575 env
->cstar
, mmu_idx
, 0);
576 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sfmask
),
577 env
->fmask
, mmu_idx
, 0);
579 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.star
),
580 env
->star
, mmu_idx
, 0);
581 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_cs
),
582 env
->sysenter_cs
, mmu_idx
, 0);
583 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_esp
),
584 env
->sysenter_esp
, mmu_idx
, 0);
585 cpu_stq_mmuidx_ra(env
, addr
+ offsetof(struct vmcb
, save
.sysenter_eip
),
586 env
->sysenter_eip
, mmu_idx
, 0);
589 void helper_stgi(CPUX86State
*env
)
591 cpu_svm_check_intercept_param(env
, SVM_EXIT_STGI
, 0, GETPC());
593 if (virtual_gif_enabled(env
)) {
594 env
->int_ctl
|= V_GIF_MASK
;
595 env
->hflags2
|= HF2_VGIF_MASK
;
597 env
->hflags2
|= HF2_GIF_MASK
;
601 void helper_clgi(CPUX86State
*env
)
603 cpu_svm_check_intercept_param(env
, SVM_EXIT_CLGI
, 0, GETPC());
605 if (virtual_gif_enabled(env
)) {
606 env
->int_ctl
&= ~V_GIF_MASK
;
607 env
->hflags2
&= ~HF2_VGIF_MASK
;
609 env
->hflags2
&= ~HF2_GIF_MASK
;
613 bool cpu_svm_has_intercept(CPUX86State
*env
, uint32_t type
)
616 case SVM_EXIT_READ_CR0
... SVM_EXIT_READ_CR0
+ 8:
617 if (env
->intercept_cr_read
& (1 << (type
- SVM_EXIT_READ_CR0
))) {
621 case SVM_EXIT_WRITE_CR0
... SVM_EXIT_WRITE_CR0
+ 8:
622 if (env
->intercept_cr_write
& (1 << (type
- SVM_EXIT_WRITE_CR0
))) {
626 case SVM_EXIT_READ_DR0
... SVM_EXIT_READ_DR0
+ 7:
627 if (env
->intercept_dr_read
& (1 << (type
- SVM_EXIT_READ_DR0
))) {
631 case SVM_EXIT_WRITE_DR0
... SVM_EXIT_WRITE_DR0
+ 7:
632 if (env
->intercept_dr_write
& (1 << (type
- SVM_EXIT_WRITE_DR0
))) {
636 case SVM_EXIT_EXCP_BASE
... SVM_EXIT_EXCP_BASE
+ 31:
637 if (env
->intercept_exceptions
& (1 << (type
- SVM_EXIT_EXCP_BASE
))) {
642 if (env
->intercept
& (1ULL << (type
- SVM_EXIT_INTR
))) {
650 void cpu_svm_check_intercept_param(CPUX86State
*env
, uint32_t type
,
651 uint64_t param
, uintptr_t retaddr
)
653 CPUState
*cs
= env_cpu(env
);
655 if (likely(!(env
->hflags
& HF_GUEST_MASK
))) {
659 if (!cpu_svm_has_intercept(env
, type
)) {
663 if (type
== SVM_EXIT_MSR
) {
664 /* FIXME: this should be read in at vmrun (faster this way?) */
665 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
666 offsetof(struct vmcb
,
667 control
.msrpm_base_pa
));
670 switch ((uint32_t)env
->regs
[R_ECX
]) {
672 t0
= (env
->regs
[R_ECX
] * 2) % 8;
673 t1
= (env
->regs
[R_ECX
] * 2) / 8;
675 case 0xc0000000 ... 0xc0001fff:
676 t0
= (8192 + env
->regs
[R_ECX
] - 0xc0000000) * 2;
680 case 0xc0010000 ... 0xc0011fff:
681 t0
= (16384 + env
->regs
[R_ECX
] - 0xc0010000) * 2;
686 cpu_vmexit(env
, type
, param
, retaddr
);
691 if (x86_ldub_phys(cs
, addr
+ t1
) & ((1 << param
) << t0
)) {
692 cpu_vmexit(env
, type
, param
, retaddr
);
697 cpu_vmexit(env
, type
, param
, retaddr
);
700 void helper_svm_check_intercept(CPUX86State
*env
, uint32_t type
)
702 cpu_svm_check_intercept_param(env
, type
, 0, GETPC());
705 void helper_svm_check_io(CPUX86State
*env
, uint32_t port
, uint32_t param
,
706 uint32_t next_eip_addend
)
708 CPUState
*cs
= env_cpu(env
);
710 if (env
->intercept
& (1ULL << (SVM_EXIT_IOIO
- SVM_EXIT_INTR
))) {
711 /* FIXME: this should be read in at vmrun (faster this way?) */
712 uint64_t addr
= x86_ldq_phys(cs
, env
->vm_vmcb
+
713 offsetof(struct vmcb
, control
.iopm_base_pa
));
714 uint16_t mask
= (1 << ((param
>> 4) & 7)) - 1;
716 if (x86_lduw_phys(cs
, addr
+ port
/ 8) & (mask
<< (port
& 7))) {
719 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_info_2
),
720 env
->eip
+ next_eip_addend
);
721 cpu_vmexit(env
, SVM_EXIT_IOIO
, param
| (port
<< 16), GETPC());
726 void cpu_vmexit(CPUX86State
*env
, uint32_t exit_code
, uint64_t exit_info_1
,
729 CPUState
*cs
= env_cpu(env
);
731 cpu_restore_state(cs
, retaddr
);
733 qemu_log_mask(CPU_LOG_TB_IN_ASM
, "vmexit(%08x, %016" PRIx64
", %016"
734 PRIx64
", " TARGET_FMT_lx
")!\n",
735 exit_code
, exit_info_1
,
736 x86_ldq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
737 control
.exit_info_2
)),
740 cs
->exception_index
= EXCP_VMEXIT
;
741 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_code
),
744 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
745 control
.exit_info_1
), exit_info_1
),
747 /* remove any pending exception */
748 env
->old_exception
= -1;
752 void do_vmexit(CPUX86State
*env
)
754 CPUState
*cs
= env_cpu(env
);
756 if (env
->hflags
& HF_INHIBIT_IRQ_MASK
) {
758 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
),
759 SVM_INTERRUPT_SHADOW_MASK
);
760 env
->hflags
&= ~HF_INHIBIT_IRQ_MASK
;
763 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_state
), 0);
765 env
->hflags2
&= ~HF2_NPT_MASK
;
766 tlb_flush_by_mmuidx(cs
, 1 << MMU_NESTED_IDX
);
768 /* Save the VM state in the vmcb */
769 svm_save_seg(env
, MMU_PHYS_IDX
,
770 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.es
),
772 svm_save_seg(env
, MMU_PHYS_IDX
,
773 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cs
),
775 svm_save_seg(env
, MMU_PHYS_IDX
,
776 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ss
),
778 svm_save_seg(env
, MMU_PHYS_IDX
,
779 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.ds
),
782 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.base
),
784 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.gdtr
.limit
),
787 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.base
),
789 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.idtr
.limit
),
793 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.efer
), env
->efer
);
795 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr0
), env
->cr
[0]);
797 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr2
), env
->cr
[2]);
799 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr3
), env
->cr
[3]);
801 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cr4
), env
->cr
[4]);
803 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.int_ctl
), env
->int_ctl
);
805 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rflags
),
806 cpu_compute_eflags(env
));
807 x86_stq_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rip
),
810 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rsp
), env
->regs
[R_ESP
]);
812 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.rax
), env
->regs
[R_EAX
]);
814 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr7
), env
->dr
[7]);
816 env
->vm_vmcb
+ offsetof(struct vmcb
, save
.dr6
), env
->dr
[6]);
817 x86_stb_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
, save
.cpl
),
818 env
->hflags
& HF_CPL_MASK
);
820 /* Reload the host state from vm_hsave */
821 env
->hflags2
&= ~(HF2_HIF_MASK
| HF2_VINTR_MASK
);
822 env
->hflags
&= ~HF_GUEST_MASK
;
824 env
->intercept_exceptions
= 0;
826 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
827 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
830 /* Clears the TSC_OFFSET inside the processor. */
833 env
->gdt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
835 env
->gdt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
838 env
->idt
.base
= x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
840 env
->idt
.limit
= x86_ldl_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
843 cpu_x86_update_cr0(env
, x86_ldq_phys(cs
,
844 env
->vm_hsave
+ offsetof(struct vmcb
,
847 cpu_x86_update_cr4(env
, x86_ldq_phys(cs
,
848 env
->vm_hsave
+ offsetof(struct vmcb
,
852 * Resets the current ASID register to zero (host ASID; TLB flush).
854 * If the host is in PAE mode, the processor reloads the host's PDPEs
855 * from the page table indicated the host's CR3. FIXME: If the PDPEs
856 * contain illegal state, the processor causes a shutdown (QEMU does
857 * not implement PDPTRs).
859 cpu_x86_update_cr3(env
, x86_ldq_phys(cs
,
860 env
->vm_hsave
+ offsetof(struct vmcb
,
862 /* we need to set the efer after the crs so the hidden flags get
864 cpu_load_efer(env
, x86_ldq_phys(cs
, env
->vm_hsave
+ offsetof(struct vmcb
,
867 /* Completion of the VMRUN instruction clears the host EFLAGS.RF bit. */
869 cpu_load_eflags(env
, x86_ldq_phys(cs
,
870 env
->vm_hsave
+ offsetof(struct vmcb
,
872 ~(CC_O
| CC_S
| CC_Z
| CC_A
| CC_P
| CC_C
| DF_MASK
|
875 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
876 env
->vm_hsave
+ offsetof(struct vmcb
, save
.es
), R_ES
);
877 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
878 env
->vm_hsave
+ offsetof(struct vmcb
, save
.cs
), R_CS
);
879 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
880 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ss
), R_SS
);
881 svm_load_seg_cache(env
, MMU_PHYS_IDX
,
882 env
->vm_hsave
+ offsetof(struct vmcb
, save
.ds
), R_DS
);
884 env
->eip
= x86_ldq_phys(cs
,
885 env
->vm_hsave
+ offsetof(struct vmcb
, save
.rip
));
886 env
->regs
[R_ESP
] = x86_ldq_phys(cs
, env
->vm_hsave
+
887 offsetof(struct vmcb
, save
.rsp
));
888 env
->regs
[R_EAX
] = x86_ldq_phys(cs
, env
->vm_hsave
+
889 offsetof(struct vmcb
, save
.rax
));
891 env
->dr
[6] = x86_ldq_phys(cs
,
892 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr6
));
894 /* Disables all breakpoints in the host DR7 register. */
895 cpu_x86_update_dr7(env
,
897 env
->vm_hsave
+ offsetof(struct vmcb
, save
.dr7
)) & ~0xff);
901 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info
),
902 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
903 control
.event_inj
)));
905 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.exit_int_info_err
),
906 x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
907 control
.event_inj_err
)));
909 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
), 0);
911 env
->hflags2
&= ~HF2_GIF_MASK
;
912 env
->hflags2
&= ~HF2_VGIF_MASK
;
915 /* FIXME: Checks the reloaded host state for consistency. */
918 * EFLAGS.TF causes a #DB trap after the VMRUN completes on the host
919 * side (i.e., after the #VMEXIT from the guest). Since we're running
920 * in the main loop, call do_interrupt_all directly.
922 if ((env
->eflags
& TF_MASK
) != 0) {
923 env
->dr
[6] |= DR6_BS
;
924 do_interrupt_all(X86_CPU(cs
), EXCP01_DB
, 0, 0, env
->eip
, 0);