2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
33 #define SET_ESP(val, sp_mask) \
35 if ((sp_mask) == 0xffff) { \
36 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
38 } else if ((sp_mask) == 0xffffffffLL) { \
39 env->regs[R_ESP] = (uint32_t)(val); \
41 env->regs[R_ESP] = (val); \
45 #define SET_ESP(val, sp_mask) \
47 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
48 ((val) & (sp_mask)); \
52 /* XXX: use mmu_index to have proper DPL support */
53 typedef struct StackAccess
63 static void pushw(StackAccess
*sa
, uint16_t val
)
66 cpu_stw_mmuidx_ra(sa
->env
, sa
->ss_base
+ (sa
->sp
& sa
->sp_mask
),
67 val
, sa
->mmu_index
, sa
->ra
);
70 static void pushl(StackAccess
*sa
, uint32_t val
)
73 cpu_stl_mmuidx_ra(sa
->env
, sa
->ss_base
+ (sa
->sp
& sa
->sp_mask
),
74 val
, sa
->mmu_index
, sa
->ra
);
77 static uint16_t popw(StackAccess
*sa
)
79 uint16_t ret
= cpu_lduw_mmuidx_ra(sa
->env
,
80 sa
->ss_base
+ (sa
->sp
& sa
->sp_mask
),
81 sa
->mmu_index
, sa
->ra
);
86 static uint32_t popl(StackAccess
*sa
)
88 uint32_t ret
= cpu_ldl_mmuidx_ra(sa
->env
,
89 sa
->ss_base
+ (sa
->sp
& sa
->sp_mask
),
90 sa
->mmu_index
, sa
->ra
);
95 int get_pg_mode(CPUX86State
*env
)
98 if (!(env
->cr
[0] & CR0_PG_MASK
)) {
101 if (env
->cr
[0] & CR0_WP_MASK
) {
102 pg_mode
|= PG_MODE_WP
;
104 if (env
->cr
[4] & CR4_PAE_MASK
) {
105 pg_mode
|= PG_MODE_PAE
;
106 if (env
->efer
& MSR_EFER_NXE
) {
107 pg_mode
|= PG_MODE_NXE
;
110 if (env
->cr
[4] & CR4_PSE_MASK
) {
111 pg_mode
|= PG_MODE_PSE
;
113 if (env
->cr
[4] & CR4_SMEP_MASK
) {
114 pg_mode
|= PG_MODE_SMEP
;
116 if (env
->hflags
& HF_LMA_MASK
) {
117 pg_mode
|= PG_MODE_LMA
;
118 if (env
->cr
[4] & CR4_PKE_MASK
) {
119 pg_mode
|= PG_MODE_PKE
;
121 if (env
->cr
[4] & CR4_PKS_MASK
) {
122 pg_mode
|= PG_MODE_PKS
;
124 if (env
->cr
[4] & CR4_LA57_MASK
) {
125 pg_mode
|= PG_MODE_LA57
;
131 /* return non zero if error */
132 static inline int load_segment_ra(CPUX86State
*env
, uint32_t *e1_ptr
,
133 uint32_t *e2_ptr
, int selector
,
140 if (selector
& 0x4) {
145 index
= selector
& ~7;
146 if ((index
+ 7) > dt
->limit
) {
149 ptr
= dt
->base
+ index
;
150 *e1_ptr
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
151 *e2_ptr
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
155 static inline int load_segment(CPUX86State
*env
, uint32_t *e1_ptr
,
156 uint32_t *e2_ptr
, int selector
)
158 return load_segment_ra(env
, e1_ptr
, e2_ptr
, selector
, 0);
161 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
165 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
166 if (e2
& DESC_G_MASK
) {
167 limit
= (limit
<< 12) | 0xfff;
172 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
174 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
177 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
180 sc
->base
= get_seg_base(e1
, e2
);
181 sc
->limit
= get_seg_limit(e1
, e2
);
185 /* init the segment cache in vm86 mode. */
186 static inline void load_seg_vm(CPUX86State
*env
, int seg
, int selector
)
190 cpu_x86_load_seg_cache(env
, seg
, selector
, (selector
<< 4), 0xffff,
191 DESC_P_MASK
| DESC_S_MASK
| DESC_W_MASK
|
192 DESC_A_MASK
| (3 << DESC_DPL_SHIFT
));
195 static inline void get_ss_esp_from_tss(CPUX86State
*env
, uint32_t *ss_ptr
,
196 uint32_t *esp_ptr
, int dpl
,
199 X86CPU
*cpu
= env_archcpu(env
);
200 int type
, index
, shift
;
205 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
206 for (i
= 0; i
< env
->tr
.limit
; i
++) {
207 printf("%02x ", env
->tr
.base
[i
]);
216 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
217 cpu_abort(CPU(cpu
), "invalid tss");
219 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
220 if ((type
& 7) != 1) {
221 cpu_abort(CPU(cpu
), "invalid tss type");
224 index
= (dpl
* 4 + 2) << shift
;
225 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
226 raise_exception_err_ra(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc, retaddr
);
229 *esp_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
230 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 2, retaddr
);
232 *esp_ptr
= cpu_ldl_kernel_ra(env
, env
->tr
.base
+ index
, retaddr
);
233 *ss_ptr
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ index
+ 4, retaddr
);
237 static void tss_load_seg(CPUX86State
*env
, X86Seg seg_reg
, int selector
,
238 int cpl
, uintptr_t retaddr
)
243 if ((selector
& 0xfffc) != 0) {
244 if (load_segment_ra(env
, &e1
, &e2
, selector
, retaddr
) != 0) {
245 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
247 if (!(e2
& DESC_S_MASK
)) {
248 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
251 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
252 if (seg_reg
== R_CS
) {
253 if (!(e2
& DESC_CS_MASK
)) {
254 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
257 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
259 } else if (seg_reg
== R_SS
) {
260 /* SS must be writable data */
261 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
262 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
264 if (dpl
!= cpl
|| dpl
!= rpl
) {
265 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
268 /* not readable code */
269 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
270 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
272 /* if data or non conforming code, checks the rights */
273 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
274 if (dpl
< cpl
|| dpl
< rpl
) {
275 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
279 if (!(e2
& DESC_P_MASK
)) {
280 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, retaddr
);
282 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
283 get_seg_base(e1
, e2
),
284 get_seg_limit(e1
, e2
),
287 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
288 raise_exception_err_ra(env
, EXCP0A_TSS
, selector
& 0xfffc, retaddr
);
293 static void tss_set_busy(CPUX86State
*env
, int tss_selector
, bool value
,
296 target_ulong ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
297 uint32_t e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
300 e2
|= DESC_TSS_BUSY_MASK
;
302 e2
&= ~DESC_TSS_BUSY_MASK
;
305 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, retaddr
);
308 #define SWITCH_TSS_JMP 0
309 #define SWITCH_TSS_IRET 1
310 #define SWITCH_TSS_CALL 2
312 /* return 0 if switching to a 16-bit selector */
313 static int switch_tss_ra(CPUX86State
*env
, int tss_selector
,
314 uint32_t e1
, uint32_t e2
, int source
,
315 uint32_t next_eip
, uintptr_t retaddr
)
317 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, i
;
318 target_ulong tss_base
;
319 uint32_t new_regs
[8], new_segs
[6];
320 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
321 uint32_t old_eflags
, eflags_mask
;
323 int mmu_index
, index
;
327 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
328 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
331 /* if task gate, we read the TSS segment and we load it */
333 if (!(e2
& DESC_P_MASK
)) {
334 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
336 tss_selector
= e1
>> 16;
337 if (tss_selector
& 4) {
338 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
340 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, retaddr
) != 0) {
341 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
343 if (e2
& DESC_S_MASK
) {
344 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
346 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
347 if ((type
& 7) != 1) {
348 raise_exception_err_ra(env
, EXCP0D_GPF
, tss_selector
& 0xfffc, retaddr
);
352 if (!(e2
& DESC_P_MASK
)) {
353 raise_exception_err_ra(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc, retaddr
);
361 tss_limit
= get_seg_limit(e1
, e2
);
362 tss_base
= get_seg_base(e1
, e2
);
363 if ((tss_selector
& 4) != 0 ||
364 tss_limit
< tss_limit_max
) {
365 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
367 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
369 old_tss_limit_max
= 103;
371 old_tss_limit_max
= 43;
374 /* new TSS must be busy iff the source is an IRET instruction */
375 if (!!(e2
& DESC_TSS_BUSY_MASK
) != (source
== SWITCH_TSS_IRET
)) {
376 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, retaddr
);
379 /* X86Access avoids memory exceptions during the task switch */
380 mmu_index
= cpu_mmu_index_kernel(env
);
381 access_prepare_mmu(&old
, env
, env
->tr
.base
, old_tss_limit_max
+ 1,
382 MMU_DATA_STORE
, mmu_index
, retaddr
);
384 if (source
== SWITCH_TSS_CALL
) {
385 /* Probe for future write of parent task */
386 probe_access(env
, tss_base
, 2, MMU_DATA_STORE
,
389 /* While true tss_limit may be larger, we don't access the iopb here. */
390 access_prepare_mmu(&new, env
, tss_base
, tss_limit_max
+ 1,
391 MMU_DATA_LOAD
, mmu_index
, retaddr
);
393 /* save the current state in the old TSS */
394 old_eflags
= cpu_compute_eflags(env
);
397 access_stl(&old
, env
->tr
.base
+ 0x20, next_eip
);
398 access_stl(&old
, env
->tr
.base
+ 0x24, old_eflags
);
399 access_stl(&old
, env
->tr
.base
+ (0x28 + 0 * 4), env
->regs
[R_EAX
]);
400 access_stl(&old
, env
->tr
.base
+ (0x28 + 1 * 4), env
->regs
[R_ECX
]);
401 access_stl(&old
, env
->tr
.base
+ (0x28 + 2 * 4), env
->regs
[R_EDX
]);
402 access_stl(&old
, env
->tr
.base
+ (0x28 + 3 * 4), env
->regs
[R_EBX
]);
403 access_stl(&old
, env
->tr
.base
+ (0x28 + 4 * 4), env
->regs
[R_ESP
]);
404 access_stl(&old
, env
->tr
.base
+ (0x28 + 5 * 4), env
->regs
[R_EBP
]);
405 access_stl(&old
, env
->tr
.base
+ (0x28 + 6 * 4), env
->regs
[R_ESI
]);
406 access_stl(&old
, env
->tr
.base
+ (0x28 + 7 * 4), env
->regs
[R_EDI
]);
407 for (i
= 0; i
< 6; i
++) {
408 access_stw(&old
, env
->tr
.base
+ (0x48 + i
* 4),
409 env
->segs
[i
].selector
);
413 access_stw(&old
, env
->tr
.base
+ 0x0e, next_eip
);
414 access_stw(&old
, env
->tr
.base
+ 0x10, old_eflags
);
415 access_stw(&old
, env
->tr
.base
+ (0x12 + 0 * 2), env
->regs
[R_EAX
]);
416 access_stw(&old
, env
->tr
.base
+ (0x12 + 1 * 2), env
->regs
[R_ECX
]);
417 access_stw(&old
, env
->tr
.base
+ (0x12 + 2 * 2), env
->regs
[R_EDX
]);
418 access_stw(&old
, env
->tr
.base
+ (0x12 + 3 * 2), env
->regs
[R_EBX
]);
419 access_stw(&old
, env
->tr
.base
+ (0x12 + 4 * 2), env
->regs
[R_ESP
]);
420 access_stw(&old
, env
->tr
.base
+ (0x12 + 5 * 2), env
->regs
[R_EBP
]);
421 access_stw(&old
, env
->tr
.base
+ (0x12 + 6 * 2), env
->regs
[R_ESI
]);
422 access_stw(&old
, env
->tr
.base
+ (0x12 + 7 * 2), env
->regs
[R_EDI
]);
423 for (i
= 0; i
< 4; i
++) {
424 access_stw(&old
, env
->tr
.base
+ (0x22 + i
* 2),
425 env
->segs
[i
].selector
);
429 /* read all the registers from the new TSS */
432 new_cr3
= access_ldl(&new, tss_base
+ 0x1c);
433 new_eip
= access_ldl(&new, tss_base
+ 0x20);
434 new_eflags
= access_ldl(&new, tss_base
+ 0x24);
435 for (i
= 0; i
< 8; i
++) {
436 new_regs
[i
] = access_ldl(&new, tss_base
+ (0x28 + i
* 4));
438 for (i
= 0; i
< 6; i
++) {
439 new_segs
[i
] = access_ldw(&new, tss_base
+ (0x48 + i
* 4));
441 new_ldt
= access_ldw(&new, tss_base
+ 0x60);
442 new_trap
= access_ldl(&new, tss_base
+ 0x64);
446 new_eip
= access_ldw(&new, tss_base
+ 0x0e);
447 new_eflags
= access_ldw(&new, tss_base
+ 0x10);
448 for (i
= 0; i
< 8; i
++) {
449 new_regs
[i
] = access_ldw(&new, tss_base
+ (0x12 + i
* 2));
451 for (i
= 0; i
< 4; i
++) {
452 new_segs
[i
] = access_ldw(&new, tss_base
+ (0x22 + i
* 2));
454 new_ldt
= access_ldw(&new, tss_base
+ 0x2a);
459 /* XXX: avoid a compiler warning, see
460 http://support.amd.com/us/Processor_TechDocs/24593.pdf
461 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
464 /* clear busy bit (it is restartable) */
465 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
466 tss_set_busy(env
, env
->tr
.selector
, 0, retaddr
);
469 if (source
== SWITCH_TSS_IRET
) {
470 old_eflags
&= ~NT_MASK
;
472 access_stl(&old
, env
->tr
.base
+ 0x24, old_eflags
);
474 access_stw(&old
, env
->tr
.base
+ 0x10, old_eflags
);
478 if (source
== SWITCH_TSS_CALL
) {
480 * Thanks to the probe_access above, we know the first two
481 * bytes addressed by &new are writable too.
483 access_stw(&new, tss_base
, env
->tr
.selector
);
484 new_eflags
|= NT_MASK
;
488 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
489 tss_set_busy(env
, tss_selector
, 1, retaddr
);
492 /* set the new CPU state */
494 /* now if an exception occurs, it will occur in the next task context */
496 env
->cr
[0] |= CR0_TS_MASK
;
497 env
->hflags
|= HF_TS_MASK
;
498 env
->tr
.selector
= tss_selector
;
499 env
->tr
.base
= tss_base
;
500 env
->tr
.limit
= tss_limit
;
501 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
503 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
504 cpu_x86_update_cr3(env
, new_cr3
);
507 /* load all registers without an exception, then reload them with
508 possible exception */
510 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
511 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
513 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
514 for (i
= 0; i
< 8; i
++) {
515 env
->regs
[i
] = new_regs
[i
];
518 cpu_load_eflags(env
, new_eflags
, eflags_mask
& 0xffff);
519 for (i
= 0; i
< 8; i
++) {
520 env
->regs
[i
] = (env
->regs
[i
] & 0xffff0000) | new_regs
[i
];
523 if (new_eflags
& VM_MASK
) {
524 for (i
= 0; i
< 6; i
++) {
525 load_seg_vm(env
, i
, new_segs
[i
]);
528 /* first just selectors as the rest may trigger exceptions */
529 for (i
= 0; i
< 6; i
++) {
530 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
534 env
->ldt
.selector
= new_ldt
& ~4;
541 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
544 if ((new_ldt
& 0xfffc) != 0) {
546 index
= new_ldt
& ~7;
547 if ((index
+ 7) > dt
->limit
) {
548 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
550 ptr
= dt
->base
+ index
;
551 e1
= cpu_ldl_kernel_ra(env
, ptr
, retaddr
);
552 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, retaddr
);
553 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
554 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
556 if (!(e2
& DESC_P_MASK
)) {
557 raise_exception_err_ra(env
, EXCP0A_TSS
, new_ldt
& 0xfffc, retaddr
);
559 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
562 /* load the segments */
563 if (!(new_eflags
& VM_MASK
)) {
564 int cpl
= new_segs
[R_CS
] & 3;
565 tss_load_seg(env
, R_CS
, new_segs
[R_CS
], cpl
, retaddr
);
566 tss_load_seg(env
, R_SS
, new_segs
[R_SS
], cpl
, retaddr
);
567 tss_load_seg(env
, R_ES
, new_segs
[R_ES
], cpl
, retaddr
);
568 tss_load_seg(env
, R_DS
, new_segs
[R_DS
], cpl
, retaddr
);
569 tss_load_seg(env
, R_FS
, new_segs
[R_FS
], cpl
, retaddr
);
570 tss_load_seg(env
, R_GS
, new_segs
[R_GS
], cpl
, retaddr
);
573 /* check that env->eip is in the CS segment limits */
574 if (new_eip
> env
->segs
[R_CS
].limit
) {
575 /* XXX: different exception if CALL? */
576 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
579 #ifndef CONFIG_USER_ONLY
580 /* reset local breakpoints */
581 if (env
->dr
[7] & DR7_LOCAL_BP_MASK
) {
582 cpu_x86_update_dr7(env
, env
->dr
[7] & ~DR7_LOCAL_BP_MASK
);
588 static int switch_tss(CPUX86State
*env
, int tss_selector
,
589 uint32_t e1
, uint32_t e2
, int source
,
592 return switch_tss_ra(env
, tss_selector
, e1
, e2
, source
, next_eip
, 0);
595 static inline unsigned int get_sp_mask(unsigned int e2
)
598 if (e2
& DESC_L_MASK
) {
602 if (e2
& DESC_B_MASK
) {
609 static int exception_is_fault(int intno
)
613 * #DB can be both fault- and trap-like, but it never sets RF=1
614 * in the RFLAGS value pushed on the stack.
623 /* Everything else including reserved exception is a fault. */
627 int exception_has_error_code(int intno
)
642 /* protected mode interrupt */
643 static void do_interrupt_protected(CPUX86State
*env
, int intno
, int is_int
,
644 int error_code
, unsigned int next_eip
,
649 int type
, dpl
, selector
, ss_dpl
, cpl
;
650 int has_error_code
, new_stack
, shift
;
651 uint32_t e1
, e2
, offset
, ss
= 0, ss_e1
= 0, ss_e2
= 0;
652 uint32_t old_eip
, eflags
;
653 int vm86
= env
->eflags
& VM_MASK
;
658 if (!is_int
&& !is_hw
) {
659 has_error_code
= exception_has_error_code(intno
);
666 set_rf
= exception_is_fault(intno
);
670 if (intno
* 8 + 7 > dt
->limit
) {
671 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
673 ptr
= dt
->base
+ intno
* 8;
674 e1
= cpu_ldl_kernel(env
, ptr
);
675 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
676 /* check gate type */
677 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
679 case 5: /* task gate */
680 case 6: /* 286 interrupt gate */
681 case 7: /* 286 trap gate */
682 case 14: /* 386 interrupt gate */
683 case 15: /* 386 trap gate */
686 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
689 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
690 cpl
= env
->hflags
& HF_CPL_MASK
;
691 /* check privilege if software int */
692 if (is_int
&& dpl
< cpl
) {
693 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
698 sa
.mmu_index
= cpu_mmu_index_kernel(env
);
702 /* must do that check here to return the correct error code */
703 if (!(e2
& DESC_P_MASK
)) {
704 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
706 shift
= switch_tss(env
, intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
707 if (has_error_code
) {
708 /* push the error code */
709 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
710 sa
.sp_mask
= 0xffffffff;
714 sa
.sp
= env
->regs
[R_ESP
];
715 sa
.ss_base
= env
->segs
[R_SS
].base
;
717 pushl(&sa
, error_code
);
719 pushw(&sa
, error_code
);
721 SET_ESP(sa
.sp
, sa
.sp_mask
);
726 /* Otherwise, trap or interrupt gate */
728 /* check valid bit */
729 if (!(e2
& DESC_P_MASK
)) {
730 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
733 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
734 if ((selector
& 0xfffc) == 0) {
735 raise_exception_err(env
, EXCP0D_GPF
, 0);
737 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
738 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
740 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
741 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
743 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
745 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
747 if (!(e2
& DESC_P_MASK
)) {
748 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
750 if (e2
& DESC_C_MASK
) {
754 /* to inner privilege */
756 get_ss_esp_from_tss(env
, &ss
, &esp
, dpl
, 0);
757 if ((ss
& 0xfffc) == 0) {
758 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
760 if ((ss
& 3) != dpl
) {
761 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
763 if (load_segment(env
, &ss_e1
, &ss_e2
, ss
) != 0) {
764 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
766 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
768 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
770 if (!(ss_e2
& DESC_S_MASK
) ||
771 (ss_e2
& DESC_CS_MASK
) ||
772 !(ss_e2
& DESC_W_MASK
)) {
773 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
775 if (!(ss_e2
& DESC_P_MASK
)) {
776 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
780 sa
.sp_mask
= get_sp_mask(ss_e2
);
781 sa
.ss_base
= get_seg_base(ss_e1
, ss_e2
);
783 /* to same privilege */
785 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
788 sa
.sp
= env
->regs
[R_ESP
];
789 sa
.sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
790 sa
.ss_base
= env
->segs
[R_SS
].base
;
796 /* XXX: check that enough room is available */
797 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
803 eflags
= cpu_compute_eflags(env
);
805 * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
806 * as is. AMD behavior could be implemented in check_hw_breakpoints().
815 pushl(&sa
, env
->segs
[R_GS
].selector
);
816 pushl(&sa
, env
->segs
[R_FS
].selector
);
817 pushl(&sa
, env
->segs
[R_DS
].selector
);
818 pushl(&sa
, env
->segs
[R_ES
].selector
);
820 pushl(&sa
, env
->segs
[R_SS
].selector
);
821 pushl(&sa
, env
->regs
[R_ESP
]);
824 pushl(&sa
, env
->segs
[R_CS
].selector
);
826 if (has_error_code
) {
827 pushl(&sa
, error_code
);
832 pushw(&sa
, env
->segs
[R_GS
].selector
);
833 pushw(&sa
, env
->segs
[R_FS
].selector
);
834 pushw(&sa
, env
->segs
[R_DS
].selector
);
835 pushw(&sa
, env
->segs
[R_ES
].selector
);
837 pushw(&sa
, env
->segs
[R_SS
].selector
);
838 pushw(&sa
, env
->regs
[R_ESP
]);
841 pushw(&sa
, env
->segs
[R_CS
].selector
);
843 if (has_error_code
) {
844 pushw(&sa
, error_code
);
848 /* interrupt gate clear IF mask */
849 if ((type
& 1) == 0) {
850 env
->eflags
&= ~IF_MASK
;
852 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
856 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
857 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
858 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
859 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
861 ss
= (ss
& ~3) | dpl
;
862 cpu_x86_load_seg_cache(env
, R_SS
, ss
, sa
.ss_base
,
863 get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
865 SET_ESP(sa
.sp
, sa
.sp_mask
);
867 selector
= (selector
& ~3) | dpl
;
868 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
869 get_seg_base(e1
, e2
),
870 get_seg_limit(e1
, e2
),
877 static void pushq(StackAccess
*sa
, uint64_t val
)
880 cpu_stq_mmuidx_ra(sa
->env
, sa
->sp
, val
, sa
->mmu_index
, sa
->ra
);
883 static uint64_t popq(StackAccess
*sa
)
885 uint64_t ret
= cpu_ldq_mmuidx_ra(sa
->env
, sa
->sp
, sa
->mmu_index
, sa
->ra
);
890 static inline target_ulong
get_rsp_from_tss(CPUX86State
*env
, int level
)
892 X86CPU
*cpu
= env_archcpu(env
);
898 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
899 env
->tr
.base
, env
->tr
.limit
);
902 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
903 cpu_abort(CPU(cpu
), "invalid tss");
905 index
= 8 * level
+ 4;
906 if ((index
+ 7) > env
->tr
.limit
) {
907 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
910 rsp
= cpu_ldq_kernel(env
, env
->tr
.base
+ index
);
912 /* test virtual address sign extension */
913 pg_mode
= get_pg_mode(env
);
914 sext
= (int64_t)rsp
>> (pg_mode
& PG_MODE_LA57
? 56 : 47);
915 if (sext
!= 0 && sext
!= -1) {
916 raise_exception_err(env
, EXCP0C_STACK
, 0);
922 /* 64 bit interrupt */
923 static void do_interrupt64(CPUX86State
*env
, int intno
, int is_int
,
924 int error_code
, target_ulong next_eip
, int is_hw
)
928 int type
, dpl
, selector
, cpl
, ist
;
929 int has_error_code
, new_stack
;
930 uint32_t e1
, e2
, e3
, eflags
;
931 target_ulong old_eip
, offset
;
936 if (!is_int
&& !is_hw
) {
937 has_error_code
= exception_has_error_code(intno
);
944 set_rf
= exception_is_fault(intno
);
948 if (intno
* 16 + 15 > dt
->limit
) {
949 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
951 ptr
= dt
->base
+ intno
* 16;
952 e1
= cpu_ldl_kernel(env
, ptr
);
953 e2
= cpu_ldl_kernel(env
, ptr
+ 4);
954 e3
= cpu_ldl_kernel(env
, ptr
+ 8);
955 /* check gate type */
956 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
958 case 14: /* 386 interrupt gate */
959 case 15: /* 386 trap gate */
962 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
965 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
966 cpl
= env
->hflags
& HF_CPL_MASK
;
967 /* check privilege if software int */
968 if (is_int
&& dpl
< cpl
) {
969 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
971 /* check valid bit */
972 if (!(e2
& DESC_P_MASK
)) {
973 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
976 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
978 if ((selector
& 0xfffc) == 0) {
979 raise_exception_err(env
, EXCP0D_GPF
, 0);
982 if (load_segment(env
, &e1
, &e2
, selector
) != 0) {
983 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
985 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
986 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
988 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
990 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
992 if (!(e2
& DESC_P_MASK
)) {
993 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
995 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
996 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
998 if (e2
& DESC_C_MASK
) {
1004 sa
.mmu_index
= cpu_mmu_index_kernel(env
);
1007 if (dpl
< cpl
|| ist
!= 0) {
1008 /* to inner privilege */
1010 sa
.sp
= get_rsp_from_tss(env
, ist
!= 0 ? ist
+ 3 : dpl
);
1012 /* to same privilege */
1013 if (env
->eflags
& VM_MASK
) {
1014 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1017 sa
.sp
= env
->regs
[R_ESP
];
1019 sa
.sp
&= ~0xfLL
; /* align stack */
1021 /* See do_interrupt_protected. */
1022 eflags
= cpu_compute_eflags(env
);
1027 pushq(&sa
, env
->segs
[R_SS
].selector
);
1028 pushq(&sa
, env
->regs
[R_ESP
]);
1030 pushq(&sa
, env
->segs
[R_CS
].selector
);
1031 pushq(&sa
, old_eip
);
1032 if (has_error_code
) {
1033 pushq(&sa
, error_code
);
1036 /* interrupt gate clear IF mask */
1037 if ((type
& 1) == 0) {
1038 env
->eflags
&= ~IF_MASK
;
1040 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
1043 uint32_t ss
= 0 | dpl
; /* SS = NULL selector with RPL = new CPL */
1044 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, dpl
<< DESC_DPL_SHIFT
);
1046 env
->regs
[R_ESP
] = sa
.sp
;
1048 selector
= (selector
& ~3) | dpl
;
1049 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1050 get_seg_base(e1
, e2
),
1051 get_seg_limit(e1
, e2
),
1055 #endif /* TARGET_X86_64 */
1057 void helper_sysret(CPUX86State
*env
, int dflag
)
1061 if (!(env
->efer
& MSR_EFER_SCE
)) {
1062 raise_exception_err_ra(env
, EXCP06_ILLOP
, 0, GETPC());
1064 cpl
= env
->hflags
& HF_CPL_MASK
;
1065 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1066 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1068 selector
= (env
->star
>> 48) & 0xffff;
1069 #ifdef TARGET_X86_64
1070 if (env
->hflags
& HF_LMA_MASK
) {
1071 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1072 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1075 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1077 DESC_G_MASK
| DESC_P_MASK
|
1078 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1079 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1081 env
->eip
= env
->regs
[R_ECX
];
1083 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1085 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1086 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1087 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1088 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1090 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1092 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1093 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1094 DESC_W_MASK
| DESC_A_MASK
);
1098 env
->eflags
|= IF_MASK
;
1099 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1101 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1102 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1103 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1104 env
->eip
= (uint32_t)env
->regs
[R_ECX
];
1105 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) | 3,
1107 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1108 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1109 DESC_W_MASK
| DESC_A_MASK
);
1113 /* real mode interrupt */
1114 static void do_interrupt_real(CPUX86State
*env
, int intno
, int is_int
,
1115 int error_code
, unsigned int next_eip
)
1121 uint32_t old_cs
, old_eip
;
1124 /* real mode (simpler!) */
1126 if (intno
* 4 + 3 > dt
->limit
) {
1127 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1129 ptr
= dt
->base
+ intno
* 4;
1130 offset
= cpu_lduw_kernel(env
, ptr
);
1131 selector
= cpu_lduw_kernel(env
, ptr
+ 2);
1135 sa
.sp
= env
->regs
[R_ESP
];
1136 sa
.sp_mask
= 0xffff;
1137 sa
.ss_base
= env
->segs
[R_SS
].base
;
1138 sa
.mmu_index
= cpu_mmu_index_kernel(env
);
1145 old_cs
= env
->segs
[R_CS
].selector
;
1146 /* XXX: use SS segment size? */
1147 pushw(&sa
, cpu_compute_eflags(env
));
1149 pushw(&sa
, old_eip
);
1151 /* update processor state */
1152 SET_ESP(sa
.sp
, sa
.sp_mask
);
1154 env
->segs
[R_CS
].selector
= selector
;
1155 env
->segs
[R_CS
].base
= (selector
<< 4);
1156 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1160 * Begin execution of an interruption. is_int is TRUE if coming from
1161 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1162 * instruction. It is only relevant if is_int is TRUE.
1164 void do_interrupt_all(X86CPU
*cpu
, int intno
, int is_int
,
1165 int error_code
, target_ulong next_eip
, int is_hw
)
1167 CPUX86State
*env
= &cpu
->env
;
1169 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1170 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1173 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1174 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1175 count
, intno
, error_code
, is_int
,
1176 env
->hflags
& HF_CPL_MASK
,
1177 env
->segs
[R_CS
].selector
, env
->eip
,
1178 (int)env
->segs
[R_CS
].base
+ env
->eip
,
1179 env
->segs
[R_SS
].selector
, env
->regs
[R_ESP
]);
1180 if (intno
== 0x0e) {
1181 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1183 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx
, env
->regs
[R_EAX
]);
1186 log_cpu_state(CPU(cpu
), CPU_DUMP_CCOP
);
1193 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1194 for (i
= 0; i
< 16; i
++) {
1195 qemu_log(" %02x", ldub(ptr
+ i
));
1203 if (env
->cr
[0] & CR0_PE_MASK
) {
1204 #if !defined(CONFIG_USER_ONLY)
1205 if (env
->hflags
& HF_GUEST_MASK
) {
1206 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 0);
1209 #ifdef TARGET_X86_64
1210 if (env
->hflags
& HF_LMA_MASK
) {
1211 do_interrupt64(env
, intno
, is_int
, error_code
, next_eip
, is_hw
);
1215 do_interrupt_protected(env
, intno
, is_int
, error_code
, next_eip
,
1219 #if !defined(CONFIG_USER_ONLY)
1220 if (env
->hflags
& HF_GUEST_MASK
) {
1221 handle_even_inj(env
, intno
, is_int
, error_code
, is_hw
, 1);
1224 do_interrupt_real(env
, intno
, is_int
, error_code
, next_eip
);
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env
->hflags
& HF_GUEST_MASK
) {
1229 CPUState
*cs
= CPU(cpu
);
1230 uint32_t event_inj
= x86_ldl_phys(cs
, env
->vm_vmcb
+
1231 offsetof(struct vmcb
,
1232 control
.event_inj
));
1235 env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1236 event_inj
& ~SVM_EVTINJ_VALID
);
1241 void do_interrupt_x86_hardirq(CPUX86State
*env
, int intno
, int is_hw
)
1243 do_interrupt_all(env_archcpu(env
), intno
, 0, 0, 0, is_hw
);
1246 void helper_lldt(CPUX86State
*env
, int selector
)
1250 int index
, entry_limit
;
1254 if ((selector
& 0xfffc) == 0) {
1255 /* XXX: NULL selector case: invalid LDT */
1259 if (selector
& 0x4) {
1260 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1263 index
= selector
& ~7;
1264 #ifdef TARGET_X86_64
1265 if (env
->hflags
& HF_LMA_MASK
) {
1272 if ((index
+ entry_limit
) > dt
->limit
) {
1273 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1275 ptr
= dt
->base
+ index
;
1276 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1277 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1278 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1279 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1281 if (!(e2
& DESC_P_MASK
)) {
1282 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1284 #ifdef TARGET_X86_64
1285 if (env
->hflags
& HF_LMA_MASK
) {
1288 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1289 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1290 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1294 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1297 env
->ldt
.selector
= selector
;
1300 void helper_ltr(CPUX86State
*env
, int selector
)
1304 int index
, type
, entry_limit
;
1308 if ((selector
& 0xfffc) == 0) {
1309 /* NULL selector case: invalid TR */
1314 if (selector
& 0x4) {
1315 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1318 index
= selector
& ~7;
1319 #ifdef TARGET_X86_64
1320 if (env
->hflags
& HF_LMA_MASK
) {
1327 if ((index
+ entry_limit
) > dt
->limit
) {
1328 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1330 ptr
= dt
->base
+ index
;
1331 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1332 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1333 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1334 if ((e2
& DESC_S_MASK
) ||
1335 (type
!= 1 && type
!= 9)) {
1336 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1338 if (!(e2
& DESC_P_MASK
)) {
1339 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1341 #ifdef TARGET_X86_64
1342 if (env
->hflags
& HF_LMA_MASK
) {
1345 e3
= cpu_ldl_kernel_ra(env
, ptr
+ 8, GETPC());
1346 e4
= cpu_ldl_kernel_ra(env
, ptr
+ 12, GETPC());
1347 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1348 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1350 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1351 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1355 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1357 e2
|= DESC_TSS_BUSY_MASK
;
1358 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1360 env
->tr
.selector
= selector
;
1363 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1364 void helper_load_seg(CPUX86State
*env
, int seg_reg
, int selector
)
1373 cpl
= env
->hflags
& HF_CPL_MASK
;
1374 if ((selector
& 0xfffc) == 0) {
1375 /* null selector case */
1377 #ifdef TARGET_X86_64
1378 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1381 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1383 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1386 if (selector
& 0x4) {
1391 index
= selector
& ~7;
1392 if ((index
+ 7) > dt
->limit
) {
1393 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1395 ptr
= dt
->base
+ index
;
1396 e1
= cpu_ldl_kernel_ra(env
, ptr
, GETPC());
1397 e2
= cpu_ldl_kernel_ra(env
, ptr
+ 4, GETPC());
1399 if (!(e2
& DESC_S_MASK
)) {
1400 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1403 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1404 if (seg_reg
== R_SS
) {
1405 /* must be writable segment */
1406 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1407 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1409 if (rpl
!= cpl
|| dpl
!= cpl
) {
1410 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1413 /* must be readable segment */
1414 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1415 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1418 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1419 /* if not conforming code, test rights */
1420 if (dpl
< cpl
|| dpl
< rpl
) {
1421 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1426 if (!(e2
& DESC_P_MASK
)) {
1427 if (seg_reg
== R_SS
) {
1428 raise_exception_err_ra(env
, EXCP0C_STACK
, selector
& 0xfffc, GETPC());
1430 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1434 /* set the access bit if not already set */
1435 if (!(e2
& DESC_A_MASK
)) {
1437 cpu_stl_kernel_ra(env
, ptr
+ 4, e2
, GETPC());
1440 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1441 get_seg_base(e1
, e2
),
1442 get_seg_limit(e1
, e2
),
1445 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1446 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1451 /* protected mode jump */
1452 void helper_ljmp_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1453 target_ulong next_eip
)
1456 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1458 if ((new_cs
& 0xfffc) == 0) {
1459 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1461 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1462 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1464 cpl
= env
->hflags
& HF_CPL_MASK
;
1465 if (e2
& DESC_S_MASK
) {
1466 if (!(e2
& DESC_CS_MASK
)) {
1467 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1469 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1470 if (e2
& DESC_C_MASK
) {
1471 /* conforming code segment */
1473 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1476 /* non conforming code segment */
1479 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1482 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1485 if (!(e2
& DESC_P_MASK
)) {
1486 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1488 limit
= get_seg_limit(e1
, e2
);
1489 if (new_eip
> limit
&&
1490 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1491 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1493 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1494 get_seg_base(e1
, e2
), limit
, e2
);
1497 /* jump to call or task gate */
1498 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1500 cpl
= env
->hflags
& HF_CPL_MASK
;
1501 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1503 #ifdef TARGET_X86_64
1504 if (env
->efer
& MSR_EFER_LMA
) {
1506 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1511 case 1: /* 286 TSS */
1512 case 9: /* 386 TSS */
1513 case 5: /* task gate */
1514 if (dpl
< cpl
|| dpl
< rpl
) {
1515 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1517 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
, GETPC());
1519 case 4: /* 286 call gate */
1520 case 12: /* 386 call gate */
1521 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1522 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1524 if (!(e2
& DESC_P_MASK
)) {
1525 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1528 new_eip
= (e1
& 0xffff);
1530 new_eip
|= (e2
& 0xffff0000);
1533 #ifdef TARGET_X86_64
1534 if (env
->efer
& MSR_EFER_LMA
) {
1535 /* load the upper 8 bytes of the 64-bit call gate */
1536 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1537 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1540 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1542 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1545 new_eip
|= ((target_ulong
)e1
) << 32;
1549 if (load_segment_ra(env
, &e1
, &e2
, gate_cs
, GETPC()) != 0) {
1550 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1552 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1553 /* must be code segment */
1554 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1555 (DESC_S_MASK
| DESC_CS_MASK
))) {
1556 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1558 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1559 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1560 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1562 #ifdef TARGET_X86_64
1563 if (env
->efer
& MSR_EFER_LMA
) {
1564 if (!(e2
& DESC_L_MASK
)) {
1565 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1567 if (e2
& DESC_B_MASK
) {
1568 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1572 if (!(e2
& DESC_P_MASK
)) {
1573 raise_exception_err_ra(env
, EXCP0D_GPF
, gate_cs
& 0xfffc, GETPC());
1575 limit
= get_seg_limit(e1
, e2
);
1576 if (new_eip
> limit
&&
1577 (!(env
->hflags
& HF_LMA_MASK
) || !(e2
& DESC_L_MASK
))) {
1578 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1580 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1581 get_seg_base(e1
, e2
), limit
, e2
);
1585 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1591 /* real mode call */
1592 void helper_lcall_real(CPUX86State
*env
, uint32_t new_cs
, uint32_t new_eip
,
1593 int shift
, uint32_t next_eip
)
1599 sa
.sp
= env
->regs
[R_ESP
];
1600 sa
.sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1601 sa
.ss_base
= env
->segs
[R_SS
].base
;
1602 sa
.mmu_index
= cpu_mmu_index_kernel(env
);
1605 pushl(&sa
, env
->segs
[R_CS
].selector
);
1606 pushl(&sa
, next_eip
);
1608 pushw(&sa
, env
->segs
[R_CS
].selector
);
1609 pushw(&sa
, next_eip
);
1612 SET_ESP(sa
.sp
, sa
.sp_mask
);
1614 env
->segs
[R_CS
].selector
= new_cs
;
1615 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1618 /* protected mode call */
1619 void helper_lcall_protected(CPUX86State
*env
, int new_cs
, target_ulong new_eip
,
1620 int shift
, target_ulong next_eip
)
1623 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, param_count
;
1624 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, type
, ss_dpl
;
1625 uint32_t val
, limit
, old_sp_mask
;
1626 target_ulong old_ssp
, offset
;
1629 LOG_PCALL("lcall %04x:" TARGET_FMT_lx
" s=%d\n", new_cs
, new_eip
, shift
);
1630 LOG_PCALL_STATE(env_cpu(env
));
1631 if ((new_cs
& 0xfffc) == 0) {
1632 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1634 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, GETPC()) != 0) {
1635 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1637 cpl
= env
->hflags
& HF_CPL_MASK
;
1638 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1642 sa
.mmu_index
= cpu_mmu_index_kernel(env
);
1644 if (e2
& DESC_S_MASK
) {
1645 if (!(e2
& DESC_CS_MASK
)) {
1646 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1648 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1649 if (e2
& DESC_C_MASK
) {
1650 /* conforming code segment */
1652 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1655 /* non conforming code segment */
1658 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1661 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1664 if (!(e2
& DESC_P_MASK
)) {
1665 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1668 #ifdef TARGET_X86_64
1669 /* XXX: check 16/32 bit cases in long mode */
1672 sa
.sp
= env
->regs
[R_ESP
];
1675 pushq(&sa
, env
->segs
[R_CS
].selector
);
1676 pushq(&sa
, next_eip
);
1677 /* from this point, not restartable */
1678 env
->regs
[R_ESP
] = sa
.sp
;
1679 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1680 get_seg_base(e1
, e2
),
1681 get_seg_limit(e1
, e2
), e2
);
1686 sa
.sp
= env
->regs
[R_ESP
];
1687 sa
.sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1688 sa
.ss_base
= env
->segs
[R_SS
].base
;
1690 pushl(&sa
, env
->segs
[R_CS
].selector
);
1691 pushl(&sa
, next_eip
);
1693 pushw(&sa
, env
->segs
[R_CS
].selector
);
1694 pushw(&sa
, next_eip
);
1697 limit
= get_seg_limit(e1
, e2
);
1698 if (new_eip
> limit
) {
1699 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1701 /* from this point, not restartable */
1702 SET_ESP(sa
.sp
, sa
.sp_mask
);
1703 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1704 get_seg_base(e1
, e2
), limit
, e2
);
1708 /* check gate type */
1709 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1710 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1713 #ifdef TARGET_X86_64
1714 if (env
->efer
& MSR_EFER_LMA
) {
1716 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1722 case 1: /* available 286 TSS */
1723 case 9: /* available 386 TSS */
1724 case 5: /* task gate */
1725 if (dpl
< cpl
|| dpl
< rpl
) {
1726 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1728 switch_tss_ra(env
, new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
, GETPC());
1730 case 4: /* 286 call gate */
1731 case 12: /* 386 call gate */
1734 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1739 if (dpl
< cpl
|| dpl
< rpl
) {
1740 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, GETPC());
1742 /* check valid bit */
1743 if (!(e2
& DESC_P_MASK
)) {
1744 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, GETPC());
1746 selector
= e1
>> 16;
1747 param_count
= e2
& 0x1f;
1748 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1749 #ifdef TARGET_X86_64
1750 if (env
->efer
& MSR_EFER_LMA
) {
1751 /* load the upper 8 bytes of the 64-bit call gate */
1752 if (load_segment_ra(env
, &e1
, &e2
, new_cs
+ 8, GETPC())) {
1753 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1756 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1758 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc,
1761 offset
|= ((target_ulong
)e1
) << 32;
1764 if ((selector
& 0xfffc) == 0) {
1765 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
1768 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
1769 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1771 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1772 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1774 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1776 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1778 #ifdef TARGET_X86_64
1779 if (env
->efer
& MSR_EFER_LMA
) {
1780 if (!(e2
& DESC_L_MASK
)) {
1781 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1783 if (e2
& DESC_B_MASK
) {
1784 raise_exception_err_ra(env
, EXCP0D_GPF
, selector
& 0xfffc, GETPC());
1789 if (!(e2
& DESC_P_MASK
)) {
1790 raise_exception_err_ra(env
, EXCP0B_NOSEG
, selector
& 0xfffc, GETPC());
1793 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1794 /* to inner privilege */
1795 #ifdef TARGET_X86_64
1797 ss
= dpl
; /* SS = NULL selector with RPL = new CPL */
1799 sa
.sp
= get_rsp_from_tss(env
, dpl
);
1801 sa
.ss_base
= 0; /* SS base is always zero in IA-32e mode */
1802 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1803 TARGET_FMT_lx
"\n", ss
, sa
.sp
, env
->regs
[R_ESP
]);
1808 get_ss_esp_from_tss(env
, &ss
, &sp32
, dpl
, GETPC());
1809 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1810 TARGET_FMT_lx
"\n", ss
, sp32
, param_count
,
1812 if ((ss
& 0xfffc) == 0) {
1813 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1815 if ((ss
& 3) != dpl
) {
1816 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1818 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, ss
, GETPC()) != 0) {
1819 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1821 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1822 if (ss_dpl
!= dpl
) {
1823 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1825 if (!(ss_e2
& DESC_S_MASK
) ||
1826 (ss_e2
& DESC_CS_MASK
) ||
1827 !(ss_e2
& DESC_W_MASK
)) {
1828 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1830 if (!(ss_e2
& DESC_P_MASK
)) {
1831 raise_exception_err_ra(env
, EXCP0A_TSS
, ss
& 0xfffc, GETPC());
1835 sa
.sp_mask
= get_sp_mask(ss_e2
);
1836 sa
.ss_base
= get_seg_base(ss_e1
, ss_e2
);
1839 /* push_size = ((param_count * 2) + 8) << shift; */
1840 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1841 old_ssp
= env
->segs
[R_SS
].base
;
1843 #ifdef TARGET_X86_64
1845 /* XXX: verify if new stack address is canonical */
1846 pushq(&sa
, env
->segs
[R_SS
].selector
);
1847 pushq(&sa
, env
->regs
[R_ESP
]);
1848 /* parameters aren't supported for 64-bit call gates */
1852 pushl(&sa
, env
->segs
[R_SS
].selector
);
1853 pushl(&sa
, env
->regs
[R_ESP
]);
1854 for (i
= param_count
- 1; i
>= 0; i
--) {
1855 val
= cpu_ldl_data_ra(env
,
1856 old_ssp
+ ((env
->regs
[R_ESP
] + i
* 4) & old_sp_mask
),
1861 pushw(&sa
, env
->segs
[R_SS
].selector
);
1862 pushw(&sa
, env
->regs
[R_ESP
]);
1863 for (i
= param_count
- 1; i
>= 0; i
--) {
1864 val
= cpu_lduw_data_ra(env
,
1865 old_ssp
+ ((env
->regs
[R_ESP
] + i
* 2) & old_sp_mask
),
1872 /* to same privilege */
1873 sa
.sp
= env
->regs
[R_ESP
];
1874 sa
.sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1875 sa
.ss_base
= env
->segs
[R_SS
].base
;
1876 /* push_size = (4 << shift); */
1880 #ifdef TARGET_X86_64
1882 pushq(&sa
, env
->segs
[R_CS
].selector
);
1883 pushq(&sa
, next_eip
);
1887 pushl(&sa
, env
->segs
[R_CS
].selector
);
1888 pushl(&sa
, next_eip
);
1890 pushw(&sa
, env
->segs
[R_CS
].selector
);
1891 pushw(&sa
, next_eip
);
1894 /* from this point, not restartable */
1897 #ifdef TARGET_X86_64
1899 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
1903 ss
= (ss
& ~3) | dpl
;
1904 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1906 get_seg_limit(ss_e1
, ss_e2
),
1911 selector
= (selector
& ~3) | dpl
;
1912 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1913 get_seg_base(e1
, e2
),
1914 get_seg_limit(e1
, e2
),
1916 SET_ESP(sa
.sp
, sa
.sp_mask
);
1921 /* real and vm86 mode iret */
1922 void helper_iret_real(CPUX86State
*env
, int shift
)
1924 uint32_t new_cs
, new_eip
, new_eflags
;
1930 sa
.mmu_index
= x86_mmu_index_pl(env
, 0);
1931 sa
.sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1932 sa
.sp
= env
->regs
[R_ESP
];
1933 sa
.ss_base
= env
->segs
[R_SS
].base
;
1937 new_eip
= popl(&sa
);
1938 new_cs
= popl(&sa
) & 0xffff;
1939 new_eflags
= popl(&sa
);
1942 new_eip
= popw(&sa
);
1944 new_eflags
= popw(&sa
);
1946 SET_ESP(sa
.sp
, sa
.sp_mask
);
1947 env
->segs
[R_CS
].selector
= new_cs
;
1948 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1950 if (env
->eflags
& VM_MASK
) {
1951 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1954 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1958 eflags_mask
&= 0xffff;
1960 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1961 env
->hflags2
&= ~HF2_NMI_MASK
;
1964 static inline void validate_seg(CPUX86State
*env
, X86Seg seg_reg
, int cpl
)
1969 /* XXX: on x86_64, we do not want to nullify FS and GS because
1970 they may still contain a valid base. I would be interested to
1971 know how a real x86_64 CPU behaves */
1972 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1973 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1977 e2
= env
->segs
[seg_reg
].flags
;
1978 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1979 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1980 /* data or non conforming code segment */
1982 cpu_x86_load_seg_cache(env
, seg_reg
, 0,
1983 env
->segs
[seg_reg
].base
,
1984 env
->segs
[seg_reg
].limit
,
1985 env
->segs
[seg_reg
].flags
& ~DESC_P_MASK
);
1990 /* protected mode iret */
1991 static inline void helper_ret_protected(CPUX86State
*env
, int shift
,
1992 int is_iret
, int addend
,
1995 uint32_t new_cs
, new_eflags
, new_ss
;
1996 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1997 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1998 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1999 target_ulong new_eip
, new_esp
;
2002 cpl
= env
->hflags
& HF_CPL_MASK
;
2006 sa
.mmu_index
= x86_mmu_index_pl(env
, cpl
);
2008 #ifdef TARGET_X86_64
2014 sa
.sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
2016 sa
.sp
= env
->regs
[R_ESP
];
2017 sa
.ss_base
= env
->segs
[R_SS
].base
;
2018 new_eflags
= 0; /* avoid warning */
2019 #ifdef TARGET_X86_64
2021 new_eip
= popq(&sa
);
2022 new_cs
= popq(&sa
) & 0xffff;
2024 new_eflags
= popq(&sa
);
2031 new_eip
= popl(&sa
);
2032 new_cs
= popl(&sa
) & 0xffff;
2034 new_eflags
= popl(&sa
);
2035 if (new_eflags
& VM_MASK
) {
2036 goto return_to_vm86
;
2041 new_eip
= popw(&sa
);
2044 new_eflags
= popw(&sa
);
2048 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2049 new_cs
, new_eip
, shift
, addend
);
2050 LOG_PCALL_STATE(env_cpu(env
));
2051 if ((new_cs
& 0xfffc) == 0) {
2052 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2054 if (load_segment_ra(env
, &e1
, &e2
, new_cs
, retaddr
) != 0) {
2055 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2057 if (!(e2
& DESC_S_MASK
) ||
2058 !(e2
& DESC_CS_MASK
)) {
2059 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2063 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2065 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2066 if (e2
& DESC_C_MASK
) {
2068 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2072 raise_exception_err_ra(env
, EXCP0D_GPF
, new_cs
& 0xfffc, retaddr
);
2075 if (!(e2
& DESC_P_MASK
)) {
2076 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc, retaddr
);
2080 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2081 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2082 /* return to same privilege level */
2083 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2084 get_seg_base(e1
, e2
),
2085 get_seg_limit(e1
, e2
),
2088 /* return to different privilege level */
2089 #ifdef TARGET_X86_64
2091 new_esp
= popq(&sa
);
2092 new_ss
= popq(&sa
) & 0xffff;
2098 new_esp
= popl(&sa
);
2099 new_ss
= popl(&sa
) & 0xffff;
2102 new_esp
= popw(&sa
);
2106 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2108 if ((new_ss
& 0xfffc) == 0) {
2109 #ifdef TARGET_X86_64
2110 /* NULL ss is allowed in long mode if cpl != 3 */
2111 /* XXX: test CS64? */
2112 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2113 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2115 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2116 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2117 DESC_W_MASK
| DESC_A_MASK
);
2118 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2122 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, retaddr
);
2125 if ((new_ss
& 3) != rpl
) {
2126 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2128 if (load_segment_ra(env
, &ss_e1
, &ss_e2
, new_ss
, retaddr
) != 0) {
2129 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2131 if (!(ss_e2
& DESC_S_MASK
) ||
2132 (ss_e2
& DESC_CS_MASK
) ||
2133 !(ss_e2
& DESC_W_MASK
)) {
2134 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2136 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2138 raise_exception_err_ra(env
, EXCP0D_GPF
, new_ss
& 0xfffc, retaddr
);
2140 if (!(ss_e2
& DESC_P_MASK
)) {
2141 raise_exception_err_ra(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc, retaddr
);
2143 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2144 get_seg_base(ss_e1
, ss_e2
),
2145 get_seg_limit(ss_e1
, ss_e2
),
2149 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2150 get_seg_base(e1
, e2
),
2151 get_seg_limit(e1
, e2
),
2154 #ifdef TARGET_X86_64
2155 if (env
->hflags
& HF_CS64_MASK
) {
2160 sa
.sp_mask
= get_sp_mask(ss_e2
);
2163 /* validate data segments */
2164 validate_seg(env
, R_ES
, rpl
);
2165 validate_seg(env
, R_DS
, rpl
);
2166 validate_seg(env
, R_FS
, rpl
);
2167 validate_seg(env
, R_GS
, rpl
);
2171 SET_ESP(sa
.sp
, sa
.sp_mask
);
2174 /* NOTE: 'cpl' is the _old_ CPL */
2175 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2177 eflags_mask
|= IOPL_MASK
;
2179 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2181 eflags_mask
|= IF_MASK
;
2184 eflags_mask
&= 0xffff;
2186 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2191 new_esp
= popl(&sa
);
2198 /* modify processor state */
2199 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2200 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2202 load_seg_vm(env
, R_CS
, new_cs
& 0xffff);
2203 load_seg_vm(env
, R_SS
, new_ss
& 0xffff);
2204 load_seg_vm(env
, R_ES
, new_es
& 0xffff);
2205 load_seg_vm(env
, R_DS
, new_ds
& 0xffff);
2206 load_seg_vm(env
, R_FS
, new_fs
& 0xffff);
2207 load_seg_vm(env
, R_GS
, new_gs
& 0xffff);
2209 env
->eip
= new_eip
& 0xffff;
2210 env
->regs
[R_ESP
] = new_esp
;
2213 void helper_iret_protected(CPUX86State
*env
, int shift
, int next_eip
)
2215 int tss_selector
, type
;
2218 /* specific case for TSS */
2219 if (env
->eflags
& NT_MASK
) {
2220 #ifdef TARGET_X86_64
2221 if (env
->hflags
& HF_LMA_MASK
) {
2222 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2225 tss_selector
= cpu_lduw_kernel_ra(env
, env
->tr
.base
+ 0, GETPC());
2226 if (tss_selector
& 4) {
2227 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2229 if (load_segment_ra(env
, &e1
, &e2
, tss_selector
, GETPC()) != 0) {
2230 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2232 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2233 /* NOTE: we check both segment and busy TSS */
2235 raise_exception_err_ra(env
, EXCP0A_TSS
, tss_selector
& 0xfffc, GETPC());
2237 switch_tss_ra(env
, tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
, GETPC());
2239 helper_ret_protected(env
, shift
, 1, 0, GETPC());
2241 env
->hflags2
&= ~HF2_NMI_MASK
;
2244 void helper_lret_protected(CPUX86State
*env
, int shift
, int addend
)
2246 helper_ret_protected(env
, shift
, 0, addend
, GETPC());
2249 void helper_sysenter(CPUX86State
*env
)
2251 if (env
->sysenter_cs
== 0) {
2252 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2254 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2256 #ifdef TARGET_X86_64
2257 if (env
->hflags
& HF_LMA_MASK
) {
2258 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2260 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2262 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2267 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2269 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2271 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2273 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2275 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2277 DESC_W_MASK
| DESC_A_MASK
);
2278 env
->regs
[R_ESP
] = env
->sysenter_esp
;
2279 env
->eip
= env
->sysenter_eip
;
2282 void helper_sysexit(CPUX86State
*env
, int dflag
)
2286 cpl
= env
->hflags
& HF_CPL_MASK
;
2287 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2288 raise_exception_err_ra(env
, EXCP0D_GPF
, 0, GETPC());
2290 #ifdef TARGET_X86_64
2292 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2294 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2295 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2296 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2298 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2300 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2301 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2302 DESC_W_MASK
| DESC_A_MASK
);
2306 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2308 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2309 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2310 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2311 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2313 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2314 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2315 DESC_W_MASK
| DESC_A_MASK
);
2317 env
->regs
[R_ESP
] = env
->regs
[R_ECX
];
2318 env
->eip
= env
->regs
[R_EDX
];
2321 target_ulong
helper_lsl(CPUX86State
*env
, target_ulong selector1
)
2324 uint32_t e1
, e2
, selector
;
2325 int rpl
, dpl
, cpl
, type
;
2327 selector
= selector1
& 0xffff;
2328 assert(CC_OP
== CC_OP_EFLAGS
);
2329 if ((selector
& 0xfffc) == 0) {
2332 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2336 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2337 cpl
= env
->hflags
& HF_CPL_MASK
;
2338 if (e2
& DESC_S_MASK
) {
2339 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2342 if (dpl
< cpl
|| dpl
< rpl
) {
2347 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2358 if (dpl
< cpl
|| dpl
< rpl
) {
2364 limit
= get_seg_limit(e1
, e2
);
2369 target_ulong
helper_lar(CPUX86State
*env
, target_ulong selector1
)
2371 uint32_t e1
, e2
, selector
;
2372 int rpl
, dpl
, cpl
, type
;
2374 selector
= selector1
& 0xffff;
2375 assert(CC_OP
== CC_OP_EFLAGS
);
2376 if ((selector
& 0xfffc) == 0) {
2379 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2383 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2384 cpl
= env
->hflags
& HF_CPL_MASK
;
2385 if (e2
& DESC_S_MASK
) {
2386 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2389 if (dpl
< cpl
|| dpl
< rpl
) {
2394 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2408 if (dpl
< cpl
|| dpl
< rpl
) {
2415 return e2
& 0x00f0ff00;
2418 void helper_verr(CPUX86State
*env
, target_ulong selector1
)
2420 uint32_t e1
, e2
, eflags
, selector
;
2423 selector
= selector1
& 0xffff;
2424 eflags
= cpu_cc_compute_all(env
) | CC_Z
;
2425 if ((selector
& 0xfffc) == 0) {
2428 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2431 if (!(e2
& DESC_S_MASK
)) {
2435 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2436 cpl
= env
->hflags
& HF_CPL_MASK
;
2437 if (e2
& DESC_CS_MASK
) {
2438 if (!(e2
& DESC_R_MASK
)) {
2441 if (!(e2
& DESC_C_MASK
)) {
2442 if (dpl
< cpl
|| dpl
< rpl
) {
2447 if (dpl
< cpl
|| dpl
< rpl
) {
2453 CC_OP
= CC_OP_EFLAGS
;
2456 void helper_verw(CPUX86State
*env
, target_ulong selector1
)
2458 uint32_t e1
, e2
, eflags
, selector
;
2461 selector
= selector1
& 0xffff;
2462 eflags
= cpu_cc_compute_all(env
) | CC_Z
;
2463 if ((selector
& 0xfffc) == 0) {
2466 if (load_segment_ra(env
, &e1
, &e2
, selector
, GETPC()) != 0) {
2469 if (!(e2
& DESC_S_MASK
)) {
2473 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2474 cpl
= env
->hflags
& HF_CPL_MASK
;
2475 if (e2
& DESC_CS_MASK
) {
2478 if (dpl
< cpl
|| dpl
< rpl
) {
2481 if (!(e2
& DESC_W_MASK
)) {
2487 CC_OP
= CC_OP_EFLAGS
;