2 * x86 segmentation related helpers: (sysemu-only code)
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/cpu_ldst.h"
26 #include "tcg/helper-tcg.h"
27 #include "../seg_helper.h"
30 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
34 if (!(env
->efer
& MSR_EFER_SCE
)) {
35 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
37 selector
= (env
->star
>> 32) & 0xffff;
38 if (env
->hflags
& HF_LMA_MASK
) {
41 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
42 env
->regs
[11] = cpu_compute_eflags(env
) & ~RF_MASK
;
44 code64
= env
->hflags
& HF_CS64_MASK
;
46 env
->eflags
&= ~(env
->fmask
| RF_MASK
);
47 cpu_load_eflags(env
, env
->eflags
, 0);
48 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
50 DESC_G_MASK
| DESC_P_MASK
|
52 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
54 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
56 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
58 DESC_W_MASK
| DESC_A_MASK
);
60 env
->eip
= env
->lstar
;
62 env
->eip
= env
->cstar
;
65 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
67 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
68 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
70 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
72 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
73 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
75 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
77 DESC_W_MASK
| DESC_A_MASK
);
78 env
->eip
= (uint32_t)env
->star
;
81 #endif /* TARGET_X86_64 */
83 void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
84 int error_code
, int is_hw
, int rm
)
86 CPUState
*cs
= env_cpu(env
);
87 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
90 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
94 type
= SVM_EVTINJ_TYPE_SOFT
;
96 type
= SVM_EVTINJ_TYPE_EXEPT
;
98 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
99 if (!rm
&& exception_has_error_code(intno
)) {
100 event_inj
|= SVM_EVTINJ_VALID_ERR
;
101 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
102 control
.event_inj_err
),
106 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
111 void x86_cpu_do_interrupt(CPUState
*cs
)
113 X86CPU
*cpu
= X86_CPU(cs
);
114 CPUX86State
*env
= &cpu
->env
;
116 if (cs
->exception_index
== EXCP_VMEXIT
) {
117 assert(env
->old_exception
== -1);
120 do_interrupt_all(cpu
, cs
->exception_index
,
121 env
->exception_is_int
,
123 env
->exception_next_eip
, 0);
124 /* successfully delivered */
125 env
->old_exception
= -1;
129 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
131 X86CPU
*cpu
= X86_CPU(cs
);
132 CPUX86State
*env
= &cpu
->env
;
135 interrupt_request
= x86_cpu_pending_interrupt(cs
, interrupt_request
);
136 if (!interrupt_request
) {
140 /* Don't process multiple interrupt requests in a single call.
141 * This is required to make icount-driven execution deterministic.
143 switch (interrupt_request
) {
144 case CPU_INTERRUPT_POLL
:
145 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
146 apic_poll_irq(cpu
->apic_state
);
148 case CPU_INTERRUPT_SIPI
:
151 case CPU_INTERRUPT_SMI
:
152 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
153 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
156 case CPU_INTERRUPT_NMI
:
157 cpu_svm_check_intercept_param(env
, SVM_EXIT_NMI
, 0, 0);
158 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
159 env
->hflags2
|= HF2_NMI_MASK
;
160 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
162 case CPU_INTERRUPT_MCE
:
163 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
164 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
166 case CPU_INTERRUPT_HARD
:
167 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
168 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
170 intno
= cpu_get_pic_interrupt(env
);
171 qemu_log_mask(CPU_LOG_INT
,
172 "Servicing hardware INT=0x%02x\n", intno
);
173 do_interrupt_x86_hardirq(env
, intno
, 1);
175 case CPU_INTERRUPT_VIRQ
:
176 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
177 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
178 + offsetof(struct vmcb
, control
.int_vector
));
179 qemu_log_mask(CPU_LOG_INT
,
180 "Servicing virtual hardware INT=0x%02x\n", intno
);
181 do_interrupt_x86_hardirq(env
, intno
, 1);
182 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
183 env
->int_ctl
&= ~V_IRQ_MASK
;
187 /* Ensure that no TB jump will be modified as the program flow was changed. */
191 /* check if Port I/O is allowed in TSS */
192 void helper_check_io(CPUX86State
*env
, uint32_t addr
, uint32_t size
)
194 uintptr_t retaddr
= GETPC();
195 uint32_t io_offset
, val
, mask
;
197 /* TSS must be a valid 32 bit one */
198 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
199 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
200 env
->tr
.limit
< 103) {
203 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
204 io_offset
+= (addr
>> 3);
205 /* Note: the check needs two bytes */
206 if ((io_offset
+ 1) > env
->tr
.limit
) {
209 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
211 mask
= (1 << size
) - 1;
212 /* all bits must be zero to allow the I/O */
213 if ((val
& mask
) != 0) {
215 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);