2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
32 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
33 # define LOG_PCALL_STATE(cpu) \
34 log_cpu_state_mask(CPU_LOG_PCALL, (cpu), CPU_DUMP_CCOP)
36 # define LOG_PCALL(...) do { } while (0)
37 # define LOG_PCALL_STATE(cpu) do { } while (0)
40 #ifdef CONFIG_USER_ONLY
41 #define MEMSUFFIX _kernel
43 #include "exec/cpu_ldst_useronly_template.h"
46 #include "exec/cpu_ldst_useronly_template.h"
49 #include "exec/cpu_ldst_useronly_template.h"
52 #include "exec/cpu_ldst_useronly_template.h"
55 #define CPU_MMU_INDEX (cpu_mmu_index_kernel(env))
56 #define MEMSUFFIX _kernel
58 #include "exec/cpu_ldst_template.h"
61 #include "exec/cpu_ldst_template.h"
64 #include "exec/cpu_ldst_template.h"
67 #include "exec/cpu_ldst_template.h"
72 /* return non zero if error */
73 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
74 uint32_t *e2_ptr
, int selector
,
86 index
= selector
& ~7;
87 if ((index
+ 7) > dt
->limit
) {
90 ptr
= dt
->base
+ index
;
91 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
92 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
96 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
97 uint32_t *e2_ptr
, int selector
)
99 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
102 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
106 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
107 if (e2
& DESC_G_MASK
) {
108 limit
= (limit
<< 12) | 0xfff;
113 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
115 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
118 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
121 sc
->base
= get_seg_base(e1
, e2
);
122 sc
->limit
= get_seg_limit(e1
, e2
);
126 /* init the segment cache in vm86 mode. */
127 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
131 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
132 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
133 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
136 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
137 uint32_t *esp_ptr
, int dpl
,
140 X86CPU
*cpu
= x86_env_get_cpu(env
);
141 int type
, index
, shift
;
146 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
147 for (i
= 0; i
< env
->tr
.limit
; i
++) {
148 printf("%02x ", env
->tr
.base
[i
]);
157 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
158 cpu_abort(CPU(cpu
), "invalid tss");
160 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
161 if ((type
& 7) != 1) {
162 cpu_abort(CPU(cpu
), "invalid tss type");
165 index
= (dpl
* 4 + 2) << shift
;
166 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
167 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
170 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
171 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
173 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
174 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
178 static void tss_load_seg(CPUX86State
*env
, int seg_reg
, int selector
, int cpl
,
184 if ((selector
& 0xfffc) != 0) {
185 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
186 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
188 if (!(e2
& DESC_S_MASK
)) {
189 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
192 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
193 if (seg_reg
== R_CS
) {
194 if (!(e2
& DESC_CS_MASK
)) {
195 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
198 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
200 } else if (seg_reg
== R_SS
) {
201 /* SS must be writable data */
202 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
203 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
205 if (dpl
!= cpl
|| dpl
!= rpl
) {
206 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
209 /* not readable code */
210 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
211 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
213 /* if data or non conforming code, checks the rights */
214 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
215 if (dpl
< cpl
|| dpl
< rpl
) {
216 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
220 if (!(e2
& DESC_P_MASK
)) {
221 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
223 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
224 get_seg_base(e1
, e2
),
225 get_seg_limit(e1
, e2
),
228 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
229 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
234 #define SWITCH_TSS_JMP 0
235 #define SWITCH_TSS_IRET 1
236 #define SWITCH_TSS_CALL 2
238 /* XXX: restore CPU state in registers (PowerPC case) */
239 static void switch_tss_ra(CPUX86State
*env
, int tss_selector
,
240 uint32_t e1
, uint32_t e2
, int source
,
241 uint32_t next_eip
, uintptr_t retaddr
)
243 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
244 target_ulong tss_base
;
245 uint32_t new_regs
[8], new_segs
[6];
246 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
247 uint32_t old_eflags
, eflags_mask
;
252 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
253 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
256 /* if task gate, we read the TSS segment and we load it */
258 if (!(e2
& DESC_P_MASK
)) {
259 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
261 tss_selector
= e1
>> 16;
262 if (tss_selector
& 4) {
263 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
265 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
266 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
268 if (e2
& DESC_S_MASK
) {
269 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
271 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
272 if ((type
& 7) != 1) {
273 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
277 if (!(e2
& DESC_P_MASK
)) {
278 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
286 tss_limit
= get_seg_limit(e1
, e2
);
287 tss_base
= get_seg_base(e1
, e2
);
288 if ((tss_selector
& 4) != 0 ||
289 tss_limit
< tss_limit_max
) {
290 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
292 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
294 old_tss_limit_max
= 103;
296 old_tss_limit_max
= 43;
299 /* read all the registers from the new TSS */
302 new_cr3
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x1c, retaddr
);
303 new_eip
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x20, retaddr
);
304 new_eflags
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x24, retaddr
);
305 for (i
= 0; i
< 8; i
++) {
306 new_regs
[i
] = cpu_ldl_kernel_ra(env
, tss_base
+ (0x28 + i
* 4),
309 for (i
= 0; i
< 6; i
++) {
310 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x48 + i
* 4),
313 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x60, retaddr
);
314 new_trap
= cpu_ldl_kernel_ra(env
, tss_base
+ 0x64, retaddr
);
318 new_eip
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x0e, retaddr
);
319 new_eflags
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x10, retaddr
);
320 for (i
= 0; i
< 8; i
++) {
321 new_regs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x12 + i
* 2),
322 retaddr
) | 0xffff0000;
324 for (i
= 0; i
< 4; i
++) {
325 new_segs
[i
] = cpu_lduw_kernel_ra(env
, tss_base
+ (0x22 + i
* 4),
328 new_ldt
= cpu_lduw_kernel_ra(env
, tss_base
+ 0x2a, retaddr
);
333 /* XXX: avoid a compiler warning, see
334 http://support.amd.com/us/Processor_TechDocs/24593.pdf
335 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
338 /* NOTE: we must avoid memory exceptions during the task switch,
339 so we make dummy accesses before */
340 /* XXX: it can still fail in some cases, so a bigger hack is
341 necessary to valid the TLB after having done the accesses */
343 v1
= cpu_ldub_kernel_ra(env
, env
->tr
.base
, retaddr
);
344 v2
= cpu_ldub_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, retaddr
);
345 cpu_stb_kernel_ra(env
, env
->tr
.base
, v1
, retaddr
);
346 cpu_stb_kernel_ra(env
, env
->tr
.base
+ old_tss_limit_max
, v2
, retaddr
);
348 /* clear busy bit (it is restartable) */
349 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
353 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
354 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
355 e2
&= ~DESC_TSS_BUSY_MASK
;
356 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
358 old_eflags
= cpu_compute_eflags(env
);
359 if (source
== SWITCH_TSS_IRET
) {
360 old_eflags
&= ~NT_MASK
;
363 /* save the current state in the old TSS */
366 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x20, next_eip
, retaddr
);
367 cpu_stl_kernel_ra(env
, env
->tr
.base
+ 0x24, old_eflags
, retaddr
);
368 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
], retaddr
);
369 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
], retaddr
);
370 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
], retaddr
);
371 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
], retaddr
);
372 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
], retaddr
);
373 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
], retaddr
);
374 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
], retaddr
);
375 cpu_stl_kernel_ra(env
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
], retaddr
);
376 for (i
= 0; i
< 6; i
++) {
377 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x48 + i
* 4),
378 env
->segs
[i
].selector
, retaddr
);
382 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x0e, next_eip
, retaddr
);
383 cpu_stw_kernel_ra(env
, env
->tr
.base
+ 0x10, old_eflags
, retaddr
);
384 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
], retaddr
);
385 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
], retaddr
);
386 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
], retaddr
);
387 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
], retaddr
);
388 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
], retaddr
);
389 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
], retaddr
);
390 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
], retaddr
);
391 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
], retaddr
);
392 for (i
= 0; i
< 4; i
++) {
393 cpu_stw_kernel_ra(env
, env
->tr
.base
+ (0x22 + i
* 4),
394 env
->segs
[i
].selector
, retaddr
);
398 /* now if an exception occurs, it will occurs in the next task
401 if (source
== SWITCH_TSS_CALL
) {
402 cpu_stw_kernel_ra(env
, tss_base
, env
->tr
.selector
, retaddr
);
403 new_eflags
|= NT_MASK
;
407 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
411 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
412 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
413 e2
|= DESC_TSS_BUSY_MASK
;
414 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
417 /* set the new CPU state */
418 /* from this point, any exception which occurs can give problems */
419 env
->cr
[0] |= CR0_TS_MASK
;
420 env
->hflags
|= HF_TS_MASK
;
421 env
->tr
.selector
= tss_selector
;
422 env
->tr
.base
= tss_base
;
423 env
->tr
.limit
= tss_limit
;
424 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
426 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
427 cpu_x86_update_cr3(env
, new_cr3
);
430 /* load all registers without an exception, then reload them with
431 possible exception */
433 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
434 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
436 eflags_mask
&= 0xffff;
438 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
439 /* XXX: what to do in 16 bit case? */
440 env
->regs
[R_EAX
] = new_regs
[0];
441 env
->regs
[R_ECX
] = new_regs
[1];
442 env
->regs
[R_EDX
] = new_regs
[2];
443 env
->regs
[R_EBX
] = new_regs
[3];
444 env
->regs
[R_ESP
] = new_regs
[4];
445 env
->regs
[R_EBP
] = new_regs
[5];
446 env
->regs
[R_ESI
] = new_regs
[6];
447 env
->regs
[R_EDI
] = new_regs
[7];
448 if (new_eflags
& VM_MASK
) {
449 for (i
= 0; i
< 6; i
++) {
450 load_seg_vm(env
, i
, new_segs
[i
]);
453 /* first just selectors as the rest may trigger exceptions */
454 for (i
= 0; i
< 6; i
++) {
455 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
459 env
->ldt
.selector
= new_ldt
& ~4;
466 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
469 if ((new_ldt
& 0xfffc) != 0) {
471 index
= new_ldt
& ~7;
472 if ((index
+ 7) > dt
->limit
) {
473 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
475 ptr
= dt
->base
+ index
;
476 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
477 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
478 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
479 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
481 if (!(e2
& DESC_P_MASK
)) {
482 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
484 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
487 /* load the segments */
488 if (!(new_eflags
& VM_MASK
)) {
489 int cpl
= new_segs
[R_CS
] & 3;
490 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
491 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
492 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
493 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
494 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
495 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
498 /* check that env->eip is in the CS segment limits */
499 if (new_eip
> env
->segs
[R_CS
].limit
) {
500 /* XXX: different exception if CALL? */
501 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
504 #ifndef CONFIG_USER_ONLY
505 /* reset local breakpoints */
506 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
507 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
512 static void switch_tss(CPUX86State
*env
, int tss_selector
,
513 uint32_t e1
, uint32_t e2
, int source
,
516 switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
519 static inline unsigned int get_sp_mask(unsigned int e2
)
521 if (e2
& DESC_B_MASK
) {
528 static int exception_has_error_code(int intno
)
544 #define SET_ESP(val, sp_mask) \
546 if ((sp_mask) == 0xffff) { \
547 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
549 } else if ((sp_mask) == 0xffffffffLL) { \
550 env->regs[R_ESP] = (uint32_t)(val); \
552 env->regs[R_ESP] = (val); \
556 #define SET_ESP(val, sp_mask) \
558 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
559 ((val) & (sp_mask)); \
563 /* in 64-bit machines, this can overflow. So this segment addition macro
564 * can be used to trim the value to 32-bit whenever needed */
565 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
567 /* XXX: add a is_user flag to have proper security support */
568 #define PUSHW_RA(ssp, sp, sp_mask, val, ra) \
571 cpu_stw_kernel_ra(env, (ssp) + (sp & (sp_mask)), (val), ra); \
574 #define PUSHL_RA(ssp, sp, sp_mask, val, ra) \
577 cpu_stl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val), ra); \
580 #define POPW_RA(ssp, sp, sp_mask, val, ra) \
582 val = cpu_lduw_kernel_ra(env, (ssp) + (sp & (sp_mask)), ra); \
586 #define POPL_RA(ssp, sp, sp_mask, val, ra) \
588 val = (uint32_t)cpu_ldl_kernel_ra(env, SEG_ADDL(ssp, sp, sp_mask), ra); \
592 #define PUSHW(ssp, sp, sp_mask, val) PUSHW_RA(ssp, sp, sp_mask, val, 0)
593 #define PUSHL(ssp, sp, sp_mask, val) PUSHL_RA(ssp, sp, sp_mask, val, 0)
594 #define POPW(ssp, sp, sp_mask, val) POPW_RA(ssp, sp, sp_mask, val, 0)
595 #define POPL(ssp, sp, sp_mask, val) POPL_RA(ssp, sp, sp_mask, val, 0)
597 /* protected mode interrupt */
598 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
599 int error_code
, unsigned int next_eip
,
603 target_ulong ptr
, ssp
;
604 int type
, dpl
, selector
, ss_dpl
, cpl
;
605 int has_error_code
, new_stack
, shift
;
606 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
607 uint32_t old_eip
, sp_mask
;
608 int vm86
= env
->eflags
& VM_MASK
;
611 if (!is_int
&& !is_hw
) {
612 has_error_code
= exception_has_error_code(intno
);
621 if (intno
* 8 + 7 > dt
->limit
) {
622 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
624 ptr
= dt
->base
+ intno
* 8;
625 e1
= cpu_ldl_kernel(env
, ptr
);
626 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
627 /* check gate type */
628 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
630 case 5: /* task gate */
631 /* must do that check here to return the correct error code */
632 if (!(e2
& DESC_P_MASK
)) {
633 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
635 switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
636 if (has_error_code
) {
640 /* push the error code */
641 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
643 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
648 esp
= (env
->regs
[R_ESP
] - (2 << shift
)) & mask
;
649 ssp
= env
->segs
[R_SS
].base
+ esp
;
651 cpu_stl_kernel(env
, ssp
, error_code
);
653 cpu_stw_kernel(env
, ssp
, error_code
);
658 case 6: /* 286 interrupt gate */
659 case 7: /* 286 trap gate */
660 case 14: /* 386 interrupt gate */
661 case 15: /* 386 trap gate */
664 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
667 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
668 cpl
= env
->hflags
& HF_CPL_MASK
;
669 /* check privilege if software int */
670 if (is_int
&& dpl
< cpl
) {
671 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
673 /* check valid bit */
674 if (!(e2
& DESC_P_MASK
)) {
675 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
678 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
679 if ((selector
& 0xfffc) == 0) {
680 raise_exception_err(env
, EXCP0D_GPF
, 0);
682 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
683 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
685 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
686 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
688 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
690 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
692 if (!(e2
& DESC_P_MASK
)) {
693 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
695 if (e2
& DESC_C_MASK
) {
699 /* to inner privilege */
700 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
701 if ((ss
& 0xfffc) == 0) {
702 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
704 if ((ss
& 3) != dpl
) {
705 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
707 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
708 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
710 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
712 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
714 if (!(ss_e2
& DESC_S_MASK
) ||
715 (ss_e2
& DESC_CS_MASK
) ||
716 !(ss_e2
& DESC_W_MASK
)) {
717 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
719 if (!(ss_e2
& DESC_P_MASK
)) {
720 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
723 sp_mask
= get_sp_mask(ss_e2
);
724 ssp
= get_seg_base(ss_e1
, ss_e2
);
726 /* to same privilege */
728 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
731 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
732 ssp
= env
->segs
[R_SS
].base
;
733 esp
= env
->regs
[R_ESP
];
739 /* XXX: check that enough room is available */
740 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
749 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
750 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
751 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
752 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
754 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
755 PUSHL(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
757 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
758 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
759 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
760 if (has_error_code
) {
761 PUSHL(ssp
, esp
, sp_mask
, error_code
);
766 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
767 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
768 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
769 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
771 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
772 PUSHW(ssp
, esp
, sp_mask
, env
->regs
[R_ESP
]);
774 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
775 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
776 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
777 if (has_error_code
) {
778 PUSHW(ssp
, esp
, sp_mask
, error_code
);
782 /* interrupt gate clear IF mask */
783 if ((type
& 1) == 0) {
784 env
->eflags
&= ~IF_MASK
;
786 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
790 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
791 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
792 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
793 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
795 ss
= (ss
& ~3) | dpl
;
796 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
797 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
799 SET_ESP(esp
, sp_mask
);
801 selector
= (selector
& ~3) | dpl
;
802 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
803 get_seg_base(e1
, e2
),
804 get_seg_limit(e1
, e2
),
811 #define PUSHQ_RA(sp, val, ra) \
814 cpu_stq_kernel_ra(env, sp, (val), ra); \
817 #define POPQ_RA(sp, val, ra) \
819 val = cpu_ldq_kernel_ra(env, sp, ra); \
823 #define PUSHQ(sp, val) PUSHQ_RA(sp, val, 0)
824 #define POPQ(sp, val) POPQ_RA(sp, val, 0)
826 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
828 X86CPU
*cpu
= x86_env_get_cpu(env
);
832 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
833 env
->tr
.base
, env
->tr
.limit
);
836 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
837 cpu_abort(CPU(cpu
), "invalid tss");
839 index
= 8 * level
+ 4;
840 if ((index
+ 7) > env
->tr
.limit
) {
841 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
843 return cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
846 /* 64 bit interrupt */
847 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
848 int error_code
, target_ulong next_eip
, int is_hw
)
852 int type
, dpl
, selector
, cpl
, ist
;
853 int has_error_code
, new_stack
;
854 uint32_t e1
, e2
, e3
, ss
;
855 target_ulong old_eip
, esp
, offset
;
858 if (!is_int
&& !is_hw
) {
859 has_error_code
= exception_has_error_code(intno
);
868 if (intno
* 16 + 15 > dt
->limit
) {
869 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
871 ptr
= dt
->base
+ intno
* 16;
872 e1
= cpu_ldl_kernel(env
, ptr
);
873 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
874 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
875 /* check gate type */
876 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
878 case 14: /* 386 interrupt gate */
879 case 15: /* 386 trap gate */
882 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
885 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
886 cpl
= env
->hflags
& HF_CPL_MASK
;
887 /* check privilege if software int */
888 if (is_int
&& dpl
< cpl
) {
889 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
891 /* check valid bit */
892 if (!(e2
& DESC_P_MASK
)) {
893 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
896 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
898 if ((selector
& 0xfffc) == 0) {
899 raise_exception_err(env
, EXCP0D_GPF
, 0);
902 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
903 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
905 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
906 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
908 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
910 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
912 if (!(e2
& DESC_P_MASK
)) {
913 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
915 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
916 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
918 if (e2
& DESC_C_MASK
) {
921 if (dpl
< cpl
|| ist
!= 0) {
922 /* to inner privilege */
924 esp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
927 /* to same privilege */
928 if (env
->eflags
& VM_MASK
) {
929 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
932 esp
= env
->regs
[R_ESP
];
934 esp
&= ~0xfLL
; /* align stack */
936 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
937 PUSHQ(esp
, env
->regs
[R_ESP
]);
938 PUSHQ(esp
, cpu_compute_eflags(env
));
939 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
941 if (has_error_code
) {
942 PUSHQ(esp
, error_code
);
945 /* interrupt gate clear IF mask */
946 if ((type
& 1) == 0) {
947 env
->eflags
&= ~IF_MASK
;
949 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
953 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, dpl
<< DESC_DPL_SHIFT
);
955 env
->regs
[R_ESP
] = esp
;
957 selector
= (selector
& ~3) | dpl
;
958 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
959 get_seg_base(e1
, e2
),
960 get_seg_limit(e1
, e2
),
967 #if defined(CONFIG_USER_ONLY)
968 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
970 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
972 cs
->exception_index
= EXCP_SYSCALL
;
973 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
977 void helper_syscall(CPUX86State
*env
, int next_eip_addend
)
981 if (!(env
->efer
& MSR_EFER_SCE
)) {
982 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
984 selector
= (env
->star
>> 32) & 0xffff;
985 if (env
->hflags
& HF_LMA_MASK
) {
988 env
->regs
[R_ECX
] = env
->eip
+ next_eip_addend
;
989 env
->regs
[11] = cpu_compute_eflags(env
);
991 code64
= env
->hflags
& HF_CS64_MASK
;
993 env
->eflags
&= ~env
->fmask
;
994 cpu_load_eflags(env
, env
->eflags
, 0);
995 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
997 DESC_G_MASK
| DESC_P_MASK
|
999 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1001 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1003 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1005 DESC_W_MASK
| DESC_A_MASK
);
1007 env
->eip
= env
->lstar
;
1009 env
->eip
= env
->cstar
;
1012 env
->regs
[R_ECX
] = (uint32_t)(env
->eip
+ next_eip_addend
);
1014 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
1015 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
1017 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1019 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1020 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
1022 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1024 DESC_W_MASK
| DESC_A_MASK
);
1025 env
->eip
= (uint32_t)env
->star
;
1031 #ifdef TARGET_X86_64
1032 void helper_sysret(CPUX86State
*env
, int dflag
)
1036 if (!(env
->efer
& MSR_EFER_SCE
)) {
1037 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1039 cpl
= env
->hflags
& HF_CPL_MASK
;
1040 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1041 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1043 selector
= (env
->star
>> 48) & 0xffff;
1044 if (env
->hflags
& HF_LMA_MASK
) {
1045 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1046 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1049 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1051 DESC_G_MASK
| DESC_P_MASK
|
1052 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1053 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1055 env
->eip
= env
->regs
[R_ECX
];
1057 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1059 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1060 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1061 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1062 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1064 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1066 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1067 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1068 DESC_W_MASK
| DESC_A_MASK
);
1070 env
->eflags
|= IF_MASK
;
1071 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1073 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1074 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1075 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1076 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1077 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1079 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1080 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1081 DESC_W_MASK
| DESC_A_MASK
);
1086 /* real mode interrupt */
1087 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1088 int error_code
, unsigned int next_eip
)
1091 target_ulong ptr
, ssp
;
1093 uint32_t offset
, esp
;
1094 uint32_t old_cs
, old_eip
;
1096 /* real mode (simpler!) */
1098 if (intno
* 4 + 3 > dt
->limit
) {
1099 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1101 ptr
= dt
->base
+ intno
* 4;
1102 offset
= cpu_lduw_kernel(env
, ptr
);
1103 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1104 esp
= env
->regs
[R_ESP
];
1105 ssp
= env
->segs
[R_SS
].base
;
1111 old_cs
= env
->segs
[R_CS
].selector
;
1112 /* XXX: use SS segment size? */
1113 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1114 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1115 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1117 /* update processor state */
1118 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~0xffff) | (esp
& 0xffff);
1120 env
->segs
[R_CS
].selector
= selector
;
1121 env
->segs
[R_CS
].base
= (selector
<< 4);
1122 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1125 #if defined(CONFIG_USER_ONLY)
1126 /* fake user mode interrupt. is_int is TRUE if coming from the int
1127 * instruction. next_eip is the env->eip value AFTER the interrupt
1128 * instruction. It is only relevant if is_int is TRUE or if intno
1131 static void do_interrupt_user(CPUX86State
*env
, int intno
, int is_int
,
1132 int error_code
, target_ulong next_eip
)
1137 int dpl
, cpl
, shift
;
1141 if (env
->hflags
& HF_LMA_MASK
) {
1146 ptr
= dt
->base
+ (intno
<< shift
);
1147 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
1149 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1150 cpl
= env
->hflags
& HF_CPL_MASK
;
1151 /* check privilege if software int */
1153 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1157 /* Since we emulate only user space, we cannot do more than
1158 exiting the emulation with the suitable exception and error
1159 code. So update EIP for INT 0x80 and EXCP_SYSCALL. */
1160 if (is_int
|| intno
== EXCP_SYSCALL
) {
1161 env
->eip
= next_eip
;
1167 static void handle_even_inj(CPUX86State
*env
, int intno
, int is_int
,
1168 int error_code
, int is_hw
, int rm
)
1170 CPUState
*cs
= CPU(x86_env_get_cpu(env
));
1171 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1172 control
.event_inj
));
1174 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1178 type
= SVM_EVTINJ_TYPE_SOFT
;
1180 type
= SVM_EVTINJ_TYPE_EXEPT
;
1182 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1183 if (!rm
&& exception_has_error_code(intno
)) {
1184 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1185 x86_stl_phys(cs
, env
->vm_vmcb
+ offsetof(struct vmcb
,
1186 control
.event_inj_err
),
1190 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1197 * Begin execution of an interruption. is_int is TRUE if coming from
1198 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1199 * instruction. It is only relevant if is_int is TRUE.
1201 static void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1202 int error_code
, target_ulong next_eip
, int is_hw
)
1204 CPUX86State
*env
= &cpu
->env
;
1206 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1207 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1210 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1211 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1212 count
, intno
, error_code
, is_int
,
1213 env
->hflags
& HF_CPL_MASK
,
1214 env
->segs
[R_CS
].selector
, env
->eip
,
1215 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1216 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1217 if (intno
== 0x0e) {
1218 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1220 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1223 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1230 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1231 for (i
= 0; i
< 16; i
++) {
1232 qemu_log(" %02x", ldub(ptr
+ i
));
1240 if (env
->cr
[0] & CR0_PE_MASK
) {
1241 #if !defined(CONFIG_USER_ONLY)
1242 if (env
->hflags
& HF_SVMI_MASK
) {
1243 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1246 #ifdef TARGET_X86_64
1247 if (env
->hflags
& HF_LMA_MASK
) {
1248 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1252 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1256 #if !defined(CONFIG_USER_ONLY)
1257 if (env
->hflags
& HF_SVMI_MASK
) {
1258 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1261 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1264 #if !defined(CONFIG_USER_ONLY)
1265 if (env
->hflags
& HF_SVMI_MASK
) {
1266 CPUState
*cs
= CPU(cpu
);
1267 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1268 offsetof(struct vmcb
,
1269 control
.event_inj
));
1272 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1273 event_inj
& ~SVM_EVTINJ_VALID
);
1278 void x86_cpu_do_interrupt(CPUState
*cs
)
1280 X86CPU
*cpu
= X86_CPU(cs
);
1281 CPUX86State
*env
= &cpu
->env
;
1283 #if defined(CONFIG_USER_ONLY)
1284 /* if user mode only, we simulate a fake exception
1285 which will be handled outside the cpu execution
1287 do_interrupt_user(env
, cs
->exception_index
,
1288 env
->exception_is_int
,
1290 env
->exception_next_eip
);
1291 /* successfully delivered */
1292 env
->old_exception
= -1;
1294 if (cs
->exception_index
>= EXCP_VMEXIT
) {
1295 assert(env
->old_exception
== -1);
1296 do_vmexit(env
, cs
->exception_index
- EXCP_VMEXIT
, env
->error_code
);
1298 do_interrupt_all(cpu
, cs
->exception_index
,
1299 env
->exception_is_int
,
1301 env
->exception_next_eip
, 0);
1302 /* successfully delivered */
1303 env
->old_exception
= -1;
1308 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1310 do_interrupt_all(x86_env_get_cpu(env
), intno
, 0, 0, 0, is_hw
);
1313 bool x86_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
1315 X86CPU
*cpu
= X86_CPU(cs
);
1316 CPUX86State
*env
= &cpu
->env
;
1319 #if !defined(CONFIG_USER_ONLY)
1320 if (interrupt_request
& CPU_INTERRUPT_POLL
) {
1321 cs
->interrupt_request
&= ~CPU_INTERRUPT_POLL
;
1322 apic_poll_irq(cpu
->apic_state
);
1323 /* Don't process multiple interrupt requests in a single call.
1324 This is required to make icount-driven execution deterministic. */
1328 if (interrupt_request
& CPU_INTERRUPT_SIPI
) {
1331 } else if (env
->hflags2
& HF2_GIF_MASK
) {
1332 if ((interrupt_request
& CPU_INTERRUPT_SMI
) &&
1333 !(env
->hflags
& HF_SMM_MASK
)) {
1334 cpu_svm_check_intercept_param(env
, SVM_EXIT_SMI
, 0, 0);
1335 cs
->interrupt_request
&= ~CPU_INTERRUPT_SMI
;
1338 } else if ((interrupt_request
& CPU_INTERRUPT_NMI
) &&
1339 !(env
->hflags2
& HF2_NMI_MASK
)) {
1340 cs
->interrupt_request
&= ~CPU_INTERRUPT_NMI
;
1341 env
->hflags2
|= HF2_NMI_MASK
;
1342 do_interrupt_x86_hardirq(env
, EXCP02_NMI
, 1);
1344 } else if (interrupt_request
& CPU_INTERRUPT_MCE
) {
1345 cs
->interrupt_request
&= ~CPU_INTERRUPT_MCE
;
1346 do_interrupt_x86_hardirq(env
, EXCP12_MCHK
, 0);
1348 } else if ((interrupt_request
& CPU_INTERRUPT_HARD
) &&
1349 (((env
->hflags2
& HF2_VINTR_MASK
) &&
1350 (env
->hflags2
& HF2_HIF_MASK
)) ||
1351 (!(env
->hflags2
& HF2_VINTR_MASK
) &&
1352 (env
->eflags
& IF_MASK
&&
1353 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
))))) {
1355 cpu_svm_check_intercept_param(env
, SVM_EXIT_INTR
, 0, 0);
1356 cs
->interrupt_request
&= ~(CPU_INTERRUPT_HARD
|
1357 CPU_INTERRUPT_VIRQ
);
1358 intno
= cpu_get_pic_interrupt(env
);
1359 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1360 "Servicing hardware INT=0x%02x\n", intno
);
1361 do_interrupt_x86_hardirq(env
, intno
, 1);
1362 /* ensure that no TB jump will be modified as
1363 the program flow was changed */
1365 #if !defined(CONFIG_USER_ONLY)
1366 } else if ((interrupt_request
& CPU_INTERRUPT_VIRQ
) &&
1367 (env
->eflags
& IF_MASK
) &&
1368 !(env
->hflags
& HF_INHIBIT_IRQ_MASK
)) {
1370 /* FIXME: this should respect TPR */
1371 cpu_svm_check_intercept_param(env
, SVM_EXIT_VINTR
, 0, 0);
1372 intno
= x86_ldl_phys(cs
, env
->vm_vmcb
1373 + offsetof(struct vmcb
, control
.int_vector
));
1374 qemu_log_mask(CPU_LOG_TB_IN_ASM
,
1375 "Servicing virtual hardware INT=0x%02x\n", intno
);
1376 do_interrupt_x86_hardirq(env
, intno
, 1);
1377 cs
->interrupt_request
&= ~CPU_INTERRUPT_VIRQ
;
1386 void helper_lldt(CPUX86State
*env
, int selector
)
1390 int index
, entry_limit
;
1394 if ((selector
& 0xfffc) == 0) {
1395 /* XXX: NULL selector case: invalid LDT */
1399 if (selector
& 0x4) {
1400 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1403 index
= selector
& ~7;
1404 #ifdef TARGET_X86_64
1405 if (env
->hflags
& HF_LMA_MASK
) {
1412 if ((index
+ entry_limit
) > dt
->limit
) {
1413 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1415 ptr
= dt
->base
+ index
;
1416 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1417 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1418 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1419 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1421 if (!(e2
& DESC_P_MASK
)) {
1422 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1424 #ifdef TARGET_X86_64
1425 if (env
->hflags
& HF_LMA_MASK
) {
1428 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1429 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1430 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1434 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1437 env
->ldt
.selector
= selector
;
1440 void helper_ltr(CPUX86State
*env
, int selector
)
1444 int index
, type
, entry_limit
;
1448 if ((selector
& 0xfffc) == 0) {
1449 /* NULL selector case: invalid TR */
1454 if (selector
& 0x4) {
1455 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1458 index
= selector
& ~7;
1459 #ifdef TARGET_X86_64
1460 if (env
->hflags
& HF_LMA_MASK
) {
1467 if ((index
+ entry_limit
) > dt
->limit
) {
1468 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1470 ptr
= dt
->base
+ index
;
1471 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1472 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1473 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1474 if ((e2
& DESC_S_MASK
) ||
1475 (type
!= 1 && type
!= 9)) {
1476 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1478 if (!(e2
& DESC_P_MASK
)) {
1479 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1481 #ifdef TARGET_X86_64
1482 if (env
->hflags
& HF_LMA_MASK
) {
1485 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1486 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1487 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1488 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1490 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1491 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1495 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1497 e2
|= DESC_TSS_BUSY_MASK
;
1498 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1500 env
->tr
.selector
= selector
;
1503 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1504 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1513 cpl
= env
->hflags
& HF_CPL_MASK
;
1514 if ((selector
& 0xfffc) == 0) {
1515 /* null selector case */
1517 #ifdef TARGET_X86_64
1518 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1521 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1523 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1526 if (selector
& 0x4) {
1531 index
= selector
& ~7;
1532 if ((index
+ 7) > dt
->limit
) {
1533 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1535 ptr
= dt
->base
+ index
;
1536 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1537 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1539 if (!(e2
& DESC_S_MASK
)) {
1540 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1543 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1544 if (seg_reg
== R_SS
) {
1545 /* must be writable segment */
1546 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1547 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1549 if (rpl
!= cpl
|| dpl
!= cpl
) {
1550 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1553 /* must be readable segment */
1554 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1555 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1558 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1559 /* if not conforming code, test rights */
1560 if (dpl
< cpl
|| dpl
< rpl
) {
1561 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1566 if (!(e2
& DESC_P_MASK
)) {
1567 if (seg_reg
== R_SS
) {
1568 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1570 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1574 /* set the access bit if not already set */
1575 if (!(e2
& DESC_A_MASK
)) {
1577 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1580 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1581 get_seg_base(e1
, e2
),
1582 get_seg_limit(e1
, e2
),
1585 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1586 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1591 /* protected mode jump */
1592 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1593 target_ulong next_eip
)
1596 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1598 if ((new_cs
& 0xfffc) == 0) {
1599 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1601 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1602 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1604 cpl
= env
->hflags
& HF_CPL_MASK
;
1605 if (e2
& DESC_S_MASK
) {
1606 if (!(e2
& DESC_CS_MASK
)) {
1607 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1609 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1610 if (e2
& DESC_C_MASK
) {
1611 /* conforming code segment */
1613 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1616 /* non conforming code segment */
1619 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1622 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1625 if (!(e2
& DESC_P_MASK
)) {
1626 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1628 limit
= get_seg_limit(e1
, e2
);
1629 if (new_eip
> limit
&&
1630 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1631 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1633 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1634 get_seg_base(e1
, e2
), limit
, e2
);
1637 /* jump to call or task gate */
1638 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1640 cpl
= env
->hflags
& HF_CPL_MASK
;
1641 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1643 case 1: /* 286 TSS */
1644 case 9: /* 386 TSS */
1645 case 5: /* task gate */
1646 if (dpl
< cpl
|| dpl
< rpl
) {
1647 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1649 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1651 case 4: /* 286 call gate */
1652 case 12: /* 386 call gate */
1653 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1654 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1656 if (!(e2
& DESC_P_MASK
)) {
1657 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1660 new_eip
= (e1
& 0xffff);
1662 new_eip
|= (e2
& 0xffff0000);
1664 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1665 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1667 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1668 /* must be code segment */
1669 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1670 (DESC_S_MASK
| DESC_CS_MASK
))) {
1671 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1673 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1674 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1675 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1677 if (!(e2
& DESC_P_MASK
)) {
1678 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1680 limit
= get_seg_limit(e1
, e2
);
1681 if (new_eip
> limit
) {
1682 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1684 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1685 get_seg_base(e1
, e2
), limit
, e2
);
1689 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1695 /* real mode call */
1696 void helper_lcall_real(CPUX86State
*env
, int new_cs
, target_ulong new_eip1
,
1697 int shift
, int next_eip
)
1700 uint32_t esp
, esp_mask
;
1704 esp
= env
->regs
[R_ESP
];
1705 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1706 ssp
= env
->segs
[R_SS
].base
;
1708 PUSHL_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1709 PUSHL_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1711 PUSHW_RA(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1712 PUSHW_RA(ssp
, esp
, esp_mask
, next_eip
, GETPC());
1715 SET_ESP(esp
, esp_mask
);
1717 env
->segs
[R_CS
].selector
= new_cs
;
1718 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1721 /* protected mode call */
1722 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1723 int shift
, target_ulong next_eip
)
1726 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1727 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1728 uint32_t val
, limit
, old_sp_mask
;
1729 target_ulong ssp
, old_ssp
;
1731 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1732 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
1733 if ((new_cs
& 0xfffc) == 0) {
1734 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1736 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1737 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1739 cpl
= env
->hflags
& HF_CPL_MASK
;
1740 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1741 if (e2
& DESC_S_MASK
) {
1742 if (!(e2
& DESC_CS_MASK
)) {
1743 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1745 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1746 if (e2
& DESC_C_MASK
) {
1747 /* conforming code segment */
1749 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1752 /* non conforming code segment */
1755 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1758 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1761 if (!(e2
& DESC_P_MASK
)) {
1762 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1765 #ifdef TARGET_X86_64
1766 /* XXX: check 16/32 bit cases in long mode */
1771 rsp
= env
->regs
[R_ESP
];
1772 PUSHQ_RA(rsp
, env
->segs
[R_CS
].selector
, GETPC());
1773 PUSHQ_RA(rsp
, next_eip
, GETPC());
1774 /* from this point, not restartable */
1775 env
->regs
[R_ESP
] = rsp
;
1776 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1777 get_seg_base(e1
, e2
),
1778 get_seg_limit(e1
, e2
), e2
);
1783 sp
= env
->regs
[R_ESP
];
1784 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1785 ssp
= env
->segs
[R_SS
].base
;
1787 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1788 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1790 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1791 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1794 limit
= get_seg_limit(e1
, e2
);
1795 if (new_eip
> limit
) {
1796 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1798 /* from this point, not restartable */
1799 SET_ESP(sp
, sp_mask
);
1800 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1801 get_seg_base(e1
, e2
), limit
, e2
);
1805 /* check gate type */
1806 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1807 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1810 case 1: /* available 286 TSS */
1811 case 9: /* available 386 TSS */
1812 case 5: /* task gate */
1813 if (dpl
< cpl
|| dpl
< rpl
) {
1814 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1816 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1818 case 4: /* 286 call gate */
1819 case 12: /* 386 call gate */
1822 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1827 if (dpl
< cpl
|| dpl
< rpl
) {
1828 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1830 /* check valid bit */
1831 if (!(e2
& DESC_P_MASK
)) {
1832 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1834 selector
= e1
>> 16;
1835 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1836 param_count
= e2
& 0x1f;
1837 if ((selector
& 0xfffc) == 0) {
1838 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1841 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1842 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1844 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1845 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1847 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1849 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1851 if (!(e2
& DESC_P_MASK
)) {
1852 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1855 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1856 /* to inner privilege */
1857 get_ss_esp_from_tss(env
, &ss
, &sp
, dpl
, GETPC());
1858 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1859 TARGET_FMT_lx
"\n", ss
, sp
, param_count
,
1861 if ((ss
& 0xfffc) == 0) {
1862 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1864 if ((ss
& 3) != dpl
) {
1865 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1867 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1868 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1870 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1871 if (ss_dpl
!= dpl
) {
1872 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1874 if (!(ss_e2
& DESC_S_MASK
) ||
1875 (ss_e2
& DESC_CS_MASK
) ||
1876 !(ss_e2
& DESC_W_MASK
)) {
1877 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1879 if (!(ss_e2
& DESC_P_MASK
)) {
1880 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1883 /* push_size = ((param_count * 2) + 8) << shift; */
1885 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1886 old_ssp
= env
->segs
[R_SS
].base
;
1888 sp_mask
= get_sp_mask(ss_e2
);
1889 ssp
= get_seg_base(ss_e1
, ss_e2
);
1891 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1892 PUSHL_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1893 for (i
= param_count
- 1; i
>= 0; i
--) {
1894 val
= cpu_ldl_kernel_ra(env
, old_ssp
+
1895 ((env
->regs
[R_ESP
] + i
* 4) &
1896 old_sp_mask
), GETPC());
1897 PUSHL_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1900 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
, GETPC());
1901 PUSHW_RA(ssp
, sp
, sp_mask
, env
->regs
[R_ESP
], GETPC());
1902 for (i
= param_count
- 1; i
>= 0; i
--) {
1903 val
= cpu_lduw_kernel_ra(env
, old_ssp
+
1904 ((env
->regs
[R_ESP
] + i
* 2) &
1905 old_sp_mask
), GETPC());
1906 PUSHW_RA(ssp
, sp
, sp_mask
, val
, GETPC());
1911 /* to same privilege */
1912 sp
= env
->regs
[R_ESP
];
1913 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1914 ssp
= env
->segs
[R_SS
].base
;
1915 /* push_size = (4 << shift); */
1920 PUSHL_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1921 PUSHL_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1923 PUSHW_RA(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
, GETPC());
1924 PUSHW_RA(ssp
, sp
, sp_mask
, next_eip
, GETPC());
1927 /* from this point, not restartable */
1930 ss
= (ss
& ~3) | dpl
;
1931 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1933 get_seg_limit(ss_e1
, ss_e2
),
1937 selector
= (selector
& ~3) | dpl
;
1938 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1939 get_seg_base(e1
, e2
),
1940 get_seg_limit(e1
, e2
),
1942 SET_ESP(sp
, sp_mask
);
1947 /* real and vm86 mode iret */
1948 void helper_iret_real(CPUX86State
*env
, int shift
)
1950 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1954 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1955 sp
= env
->regs
[R_ESP
];
1956 ssp
= env
->segs
[R_SS
].base
;
1959 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1960 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1962 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1965 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, GETPC());
1966 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, GETPC());
1967 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, GETPC());
1969 env
->regs
[R_ESP
] = (env
->regs
[R_ESP
] & ~sp_mask
) | (sp
& sp_mask
);
1970 env
->segs
[R_CS
].selector
= new_cs
;
1971 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1973 if (env
->eflags
& VM_MASK
) {
1974 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1977 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1981 eflags_mask
&= 0xffff;
1983 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1984 env
->hflags2
&= ~HF2_NMI_MASK
;
1987 static inline void validate_seg(CPUX86State
*env
, int seg_reg
, int cpl
)
1992 /* XXX: on x86_64, we do not want to nullify FS and GS because
1993 they may still contain a valid base. I would be interested to
1994 know how a real x86_64 CPU behaves */
1995 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1996 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
2000 e2
= env
->segs
[seg_reg
].flags
;
2001 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2002 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
2003 /* data or non conforming code segment */
2005 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
2010 /* protected mode iret */
2011 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
2012 int is_iret
, int addend
,
2015 uint32_t new_cs
, new_eflags
, new_ss
;
2016 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
2017 uint32_t e1
, e2
, ss_e1
, ss_e2
;
2018 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
2019 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
2021 #ifdef TARGET_X86_64
2027 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2029 sp
= env
->regs
[R_ESP
];
2030 ssp
= env
->segs
[R_SS
].base
;
2031 new_eflags
= 0; /* avoid warning */
2032 #ifdef TARGET_X86_64
2034 POPQ_RA(sp
, new_eip
, retaddr
);
2035 POPQ_RA(sp
, new_cs
, retaddr
);
2038 POPQ_RA(sp
, new_eflags
, retaddr
);
2045 POPL_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2046 POPL_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2049 POPL_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2050 if (new_eflags
& VM_MASK
) {
2051 goto return_to_vm86
;
2056 POPW_RA(ssp
, sp
, sp_mask
, new_eip
, retaddr
);
2057 POPW_RA(ssp
, sp
, sp_mask
, new_cs
, retaddr
);
2059 POPW_RA(ssp
, sp
, sp_mask
, new_eflags
, retaddr
);
2063 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2064 new_cs
, new_eip
, shift
, addend
);
2065 LOG_PCALL_STATE(CPU(x86_env_get_cpu(env
)));
2066 if ((new_cs
& 0xfffc) == 0) {
2067 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2069 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2070 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2072 if (!(e2
& DESC_S_MASK
) ||
2073 !(e2
& DESC_CS_MASK
)) {
2074 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2076 cpl
= env
->hflags
& HF_CPL_MASK
;
2079 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2081 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2082 if (e2
& DESC_C_MASK
) {
2084 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2088 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2091 if (!(e2
& DESC_P_MASK
)) {
2092 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2096 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2097 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2098 /* return to same privilege level */
2099 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2100 get_seg_base(e1
, e2
),
2101 get_seg_limit(e1
, e2
),
2104 /* return to different privilege level */
2105 #ifdef TARGET_X86_64
2107 POPQ_RA(sp
, new_esp
, retaddr
);
2108 POPQ_RA(sp
, new_ss
, retaddr
);
2115 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2116 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2120 POPW_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2121 POPW_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2124 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2126 if ((new_ss
& 0xfffc) == 0) {
2127 #ifdef TARGET_X86_64
2128 /* NULL ss is allowed in long mode if cpl != 3 */
2129 /* XXX: test CS64? */
2130 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2131 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2133 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2134 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2135 DESC_W_MASK
| DESC_A_MASK
);
2136 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2140 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2143 if ((new_ss
& 3) != rpl
) {
2144 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2146 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2147 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2149 if (!(ss_e2
& DESC_S_MASK
) ||
2150 (ss_e2
& DESC_CS_MASK
) ||
2151 !(ss_e2
& DESC_W_MASK
)) {
2152 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2154 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2156 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2158 if (!(ss_e2
& DESC_P_MASK
)) {
2159 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2161 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2162 get_seg_base(ss_e1
, ss_e2
),
2163 get_seg_limit(ss_e1
, ss_e2
),
2167 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2168 get_seg_base(e1
, e2
),
2169 get_seg_limit(e1
, e2
),
2172 #ifdef TARGET_X86_64
2173 if (env
->hflags
& HF_CS64_MASK
) {
2178 sp_mask
= get_sp_mask(ss_e2
);
2181 /* validate data segments */
2182 validate_seg(env
, R_ES
, rpl
);
2183 validate_seg(env
, R_DS
, rpl
);
2184 validate_seg(env
, R_FS
, rpl
);
2185 validate_seg(env
, R_GS
, rpl
);
2189 SET_ESP(sp
, sp_mask
);
2192 /* NOTE: 'cpl' is the _old_ CPL */
2193 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2195 eflags_mask
|= IOPL_MASK
;
2197 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2199 eflags_mask
|= IF_MASK
;
2202 eflags_mask
&= 0xffff;
2204 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2209 POPL_RA(ssp
, sp
, sp_mask
, new_esp
, retaddr
);
2210 POPL_RA(ssp
, sp
, sp_mask
, new_ss
, retaddr
);
2211 POPL_RA(ssp
, sp
, sp_mask
, new_es
, retaddr
);
2212 POPL_RA(ssp
, sp
, sp_mask
, new_ds
, retaddr
);
2213 POPL_RA(ssp
, sp
, sp_mask
, new_fs
, retaddr
);
2214 POPL_RA(ssp
, sp
, sp_mask
, new_gs
, retaddr
);
2216 /* modify processor state */
2217 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2218 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2220 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2221 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2222 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2223 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2224 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2225 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2227 env
->eip
= new_eip
& 0xffff;
2228 env
->regs
[R_ESP
] = new_esp
;
2231 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2233 int tss_selector
, type
;
2236 /* specific case for TSS */
2237 if (env
->eflags
& NT_MASK
) {
2238 #ifdef TARGET_X86_64
2239 if (env
->hflags
& HF_LMA_MASK
) {
2240 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2243 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2244 if (tss_selector
& 4) {
2245 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2247 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2248 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2250 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2251 /* NOTE: we check both segment and busy TSS */
2253 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2255 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2257 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2259 env
->hflags2
&= ~HF2_NMI_MASK
;
2262 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2264 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2267 void helper_sysenter(CPUX86State
*env
)
2269 if (env
->sysenter_cs
== 0) {
2270 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2272 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2274 #ifdef TARGET_X86_64
2275 if (env
->hflags
& HF_LMA_MASK
) {
2276 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2278 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2280 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2285 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2287 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2289 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2291 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2293 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2295 DESC_W_MASK
| DESC_A_MASK
);
2296 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2297 env
->eip
= env
->sysenter_eip
;
2300 void helper_sysexit(CPUX86State
*env
, int dflag
)
2304 cpl
= env
->hflags
& HF_CPL_MASK
;
2305 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2306 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2308 #ifdef TARGET_X86_64
2310 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2312 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2313 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2314 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2316 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2318 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2319 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2320 DESC_W_MASK
| DESC_A_MASK
);
2324 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2326 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2327 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2328 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2329 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2331 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2332 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2333 DESC_W_MASK
| DESC_A_MASK
);
2335 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2336 env
->eip
= env
->regs
[R_EDX
];
2339 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2342 uint32_t e1
, e2
, eflags
, selector
;
2343 int rpl
, dpl
, cpl
, type
;
2345 selector
= selector1
& 0xffff;
2346 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2347 if ((selector
& 0xfffc) == 0) {
2350 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2354 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2355 cpl
= env
->hflags
& HF_CPL_MASK
;
2356 if (e2
& DESC_S_MASK
) {
2357 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2360 if (dpl
< cpl
|| dpl
< rpl
) {
2365 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2376 if (dpl
< cpl
|| dpl
< rpl
) {
2378 CC_SRC
= eflags
& ~CC_Z
;
2382 limit
= get_seg_limit(e1
, e2
);
2383 CC_SRC
= eflags
| CC_Z
;
2387 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2389 uint32_t e1
, e2
, eflags
, selector
;
2390 int rpl
, dpl
, cpl
, type
;
2392 selector
= selector1
& 0xffff;
2393 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2394 if ((selector
& 0xfffc) == 0) {
2397 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2401 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2402 cpl
= env
->hflags
& HF_CPL_MASK
;
2403 if (e2
& DESC_S_MASK
) {
2404 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2407 if (dpl
< cpl
|| dpl
< rpl
) {
2412 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2426 if (dpl
< cpl
|| dpl
< rpl
) {
2428 CC_SRC
= eflags
& ~CC_Z
;
2432 CC_SRC
= eflags
| CC_Z
;
2433 return e2
& 0x00f0ff00;
2436 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2438 uint32_t e1
, e2
, eflags
, selector
;
2441 selector
= selector1
& 0xffff;
2442 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2443 if ((selector
& 0xfffc) == 0) {
2446 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2449 if (!(e2
& DESC_S_MASK
)) {
2453 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2454 cpl
= env
->hflags
& HF_CPL_MASK
;
2455 if (e2
& DESC_CS_MASK
) {
2456 if (!(e2
& DESC_R_MASK
)) {
2459 if (!(e2
& DESC_C_MASK
)) {
2460 if (dpl
< cpl
|| dpl
< rpl
) {
2465 if (dpl
< cpl
|| dpl
< rpl
) {
2467 CC_SRC
= eflags
& ~CC_Z
;
2471 CC_SRC
= eflags
| CC_Z
;
2474 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2476 uint32_t e1
, e2
, eflags
, selector
;
2479 selector
= selector1
& 0xffff;
2480 eflags
= cpu_cc_compute_all(env
, CC_OP
);
2481 if ((selector
& 0xfffc) == 0) {
2484 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2487 if (!(e2
& DESC_S_MASK
)) {
2491 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2492 cpl
= env
->hflags
& HF_CPL_MASK
;
2493 if (e2
& DESC_CS_MASK
) {
2496 if (dpl
< cpl
|| dpl
< rpl
) {
2499 if (!(e2
& DESC_W_MASK
)) {
2501 CC_SRC
= eflags
& ~CC_Z
;
2505 CC_SRC
= eflags
| CC_Z
;
2508 #if defined(CONFIG_USER_ONLY)
2509 void cpu_x86_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
2511 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2512 int dpl
= (env
->eflags
& VM_MASK
) ? 3 : 0;
2514 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2515 (selector
<< 4), 0xffff,
2516 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
2517 DESC_A_MASK
| (dpl
<< DESC_DPL_SHIFT
));
2519 helper_load_seg(env
, seg_reg
, selector
);
2524 /* check if Port I/O is allowed in TSS */
2525 static inline void check_io(CPUX86State
*env
, int addr
, int size
,
2528 int io_offset
, val
, mask
;
2530 /* TSS must be a valid 32 bit one */
2531 if (!(env
->tr
.flags
& DESC_P_MASK
) ||
2532 ((env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf) != 9 ||
2533 env
->tr
.limit
< 103) {
2536 io_offset
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0x66, retaddr
);
2537 io_offset
+= (addr
>> 3);
2538 /* Note: the check needs two bytes */
2539 if ((io_offset
+ 1) > env
->tr
.limit
) {
2542 val
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ io_offset
, retaddr
);
2544 mask
= (1 << size
) - 1;
2545 /* all bits must be zero to allow the I/O */
2546 if ((val
& mask
) != 0) {
2548 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2552 void helper_check_iob(CPUX86State
*env
, uint32_t t0
)
2554 check_io(env
, t0
, 1, GETPC());
2557 void helper_check_iow(CPUX86State
*env
, uint32_t t0
)
2559 check_io(env
, t0
, 2, GETPC());
2562 void helper_check_iol(CPUX86State
*env
, uint32_t t0
)
2564 check_io(env
, t0
, 4, GETPC());