2 * x86 segmentation related helpers: (sysemu-only code)
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
23 #include "qemu/main-loop.h"
25 #include "exec/helper-proto.h"
26 #include "exec/cpu_ldst.h"
27 #include "tcg/helper-tcg.h"
28 #include "../seg_helper.h"
30 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
34 if (!(env
->efer
& MSR_EFER_SCE
)) {
35 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
37 selector
= (env
->star
>> 32) & 0xffff;
39 if (env
->hflags
& HF_LMA_MASK
) {
42 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
43 env
->regs
[11] = cpu_compute_eflags(env
) & ~RF_MASK
;
45 code64
= env
->hflags
& HF_CS64_MASK
;
47 env
->eflags
&= ~(env
->fmask
| RF_MASK
);
48 cpu_load_eflags(env
, env
->eflags
, 0);
49 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
51 DESC_G_MASK
| DESC_P_MASK
|
53 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
55 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
57 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
59 DESC_W_MASK
| DESC_A_MASK
);
61 env
->eip
= env
->lstar
;
63 env
->eip
= env
->cstar
;
68 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
70 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
71 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
73 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
75 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
76 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
78 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
80 DESC_W_MASK
| DESC_A_MASK
);
81 env
->eip
= (uint32_t)env
->star
;
85 void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
86 int error_code
, int is_hw
, int rm
)
88 CPUState
*cs
= env_cpu(env
);
89 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
92 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
96 type
= SVM_EVTINJ_TYPE_SOFT
;
98 type
= SVM_EVTINJ_TYPE_EXEPT
;
100 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
101 if (!rm
&& exception_has_error_code(intno
)) {
102 event_inj
|= SVM_EVTINJ_VALID_ERR
;
103 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
104 control
.event_inj_err
),
108 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
113 void x86_cpu_do_interrupt(CPUState
*cs
)
115 X86CPU
*cpu
= X86_CPU(cs
);
116 CPUX86State
*env
= &cpu
->env
;
118 if (cs
->exception_index
== EXCP_VMEXIT
) {
119 assert(env
->old_exception
== -1);
122 do_interrupt_all(cpu
, cs
->exception_index
,
123 env
->exception_is_int
,
125 env
->exception_next_eip
, 0);
126 /* successfully delivered */
127 env
->old_exception
= -1;
131 bool x86_cpu_exec_halt(CPUState
*cpu
)
133 X86CPU
*x86_cpu
= X86_CPU(cpu
);
134 CPUX86State
*env
= &x86_cpu
->env
;
136 if (cpu
->interrupt_request
& CPU_INTERRUPT_POLL
) {
138 apic_poll_irq(x86_cpu
->apic_state
);
139 cpu_reset_interrupt(cpu
, CPU_INTERRUPT_POLL
);
143 if (!cpu_has_work(cpu
)) {
147 /* Complete HLT instruction. */
148 if (env
->eflags
& TF_MASK
) {
149 env
->dr
[6] |= DR6_BS
;
150 do_interrupt_all(x86_cpu
, EXCP01_DB
, 0, 0, env
->eip
, 0);
155 bool x86_need_replay_interrupt(int interrupt_request
)
158 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
159 * "real" interrupt event later. It does not need to be recorded for
162 return !(interrupt_request
& CPU_INTERRUPT_POLL
);
165 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
167 X86CPU
*cpu
= X86_CPU(cs
);
168 CPUX86State
*env
= &cpu
->env
;
171 interrupt_request
= x86_cpu_pending_interrupt(cs
, interrupt_request
);
172 if (!interrupt_request
) {
176 /* Don't process multiple interrupt requests in a single call.
177 * This is required to make icount-driven execution deterministic.
179 switch (interrupt_request
) {
180 case CPU_INTERRUPT_POLL
:
181 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
182 apic_poll_irq(cpu
->apic_state
);
184 case CPU_INTERRUPT_SIPI
:
187 case CPU_INTERRUPT_SMI
:
188 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
189 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
192 case CPU_INTERRUPT_NMI
:
193 cpu_svm_check_intercept_param(env
, SVM_EXIT_NMI
, 0, 0);
194 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
195 env
->hflags2
|= HF2_NMI_MASK
;
196 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
198 case CPU_INTERRUPT_MCE
:
199 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
200 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
202 case CPU_INTERRUPT_HARD
:
203 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
204 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
206 intno
= cpu_get_pic_interrupt(env
);
207 qemu_log_mask(CPU_LOG_INT
,
208 "Servicing hardware INT=0x%02x\n", intno
);
209 do_interrupt_x86_hardirq(env
, intno
, 1);
211 case CPU_INTERRUPT_VIRQ
:
212 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
213 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
214 + offsetof(struct vmcb
, control
.int_vector
));
215 qemu_log_mask(CPU_LOG_INT
,
216 "Servicing virtual hardware INT=0x%02x\n", intno
);
217 do_interrupt_x86_hardirq(env
, intno
, 1);
218 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
219 env
->int_ctl
&= ~V_IRQ_MASK
;
223 /* Ensure that no TB jump will be modified as the program flow was changed. */
227 /* check if Port I/O is allowed in TSS */
228 void helper_check_io(CPUX86State
*env
, uint32_t addr
, uint32_t size
)
230 uintptr_t retaddr
= GETPC();
231 uint32_t io_offset
, val
, mask
;
233 /* TSS must be a valid 32 bit one */
234 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
235 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
236 env
->tr
.limit
< 103) {
239 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
240 io_offset
+= (addr
>> 3);
241 /* Note: the check needs two bytes */
242 if ((io_offset
+ 1) > env
->tr
.limit
) {
245 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
247 mask
= (1 << size
) - 1;
248 /* all bits must be zero to allow the I/O */
249 if ((val
& mask
) != 0) {
251 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);