2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
26 #if !defined(CONFIG_USER_ONLY)
27 #include "softmmu_exec.h"
28 #endif /* !defined(CONFIG_USER_ONLY) */
33 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
34 # define LOG_PCALL_STATE(env) \
35 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
37 # define LOG_PCALL(...) do { } while (0)
38 # define LOG_PCALL_STATE(env) do { } while (0)
41 /* return non zero if error */
42 static inline int load_segment(uint32_t *e1_ptr
, uint32_t *e2_ptr
,
54 index
= selector
& ~7;
55 if ((index
+ 7) > dt
->limit
) {
58 ptr
= dt
->base
+ index
;
59 *e1_ptr
= ldl_kernel(ptr
);
60 *e2_ptr
= ldl_kernel(ptr
+ 4);
64 static inline unsigned int get_seg_limit(uint32_t e1
, uint32_t e2
)
68 limit
= (e1
& 0xffff) | (e2
& 0x000f0000);
69 if (e2
& DESC_G_MASK
) {
70 limit
= (limit
<< 12) | 0xfff;
75 static inline uint32_t get_seg_base(uint32_t e1
, uint32_t e2
)
77 return (e1
>> 16) | ((e2
& 0xff) << 16) | (e2
& 0xff000000);
80 static inline void load_seg_cache_raw_dt(SegmentCache
*sc
, uint32_t e1
,
83 sc
->base
= get_seg_base(e1
, e2
);
84 sc
->limit
= get_seg_limit(e1
, e2
);
88 /* init the segment cache in vm86 mode. */
89 static inline void load_seg_vm(int seg
, int selector
)
92 cpu_x86_load_seg_cache(env
, seg
, selector
,
93 (selector
<< 4), 0xffff, 0);
96 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr
,
97 uint32_t *esp_ptr
, int dpl
)
99 int type
, index
, shift
;
104 printf("TR: base=%p limit=%x\n", env
->tr
.base
, env
->tr
.limit
);
105 for (i
= 0; i
< env
->tr
.limit
; i
++) {
106 printf("%02x ", env
->tr
.base
[i
]);
115 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
116 cpu_abort(env
, "invalid tss");
118 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
119 if ((type
& 7) != 1) {
120 cpu_abort(env
, "invalid tss type");
123 index
= (dpl
* 4 + 2) << shift
;
124 if (index
+ (4 << shift
) - 1 > env
->tr
.limit
) {
125 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
128 *esp_ptr
= lduw_kernel(env
->tr
.base
+ index
);
129 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 2);
131 *esp_ptr
= ldl_kernel(env
->tr
.base
+ index
);
132 *ss_ptr
= lduw_kernel(env
->tr
.base
+ index
+ 4);
136 /* XXX: merge with load_seg() */
137 static void tss_load_seg(int seg_reg
, int selector
)
142 if ((selector
& 0xfffc) != 0) {
143 if (load_segment(&e1
, &e2
, selector
) != 0) {
144 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
146 if (!(e2
& DESC_S_MASK
)) {
147 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
150 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
151 cpl
= env
->hflags
& HF_CPL_MASK
;
152 if (seg_reg
== R_CS
) {
153 if (!(e2
& DESC_CS_MASK
)) {
154 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
156 /* XXX: is it correct? */
158 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
160 if ((e2
& DESC_C_MASK
) && dpl
> rpl
) {
161 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
163 } else if (seg_reg
== R_SS
) {
164 /* SS must be writable data */
165 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
166 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
168 if (dpl
!= cpl
|| dpl
!= rpl
) {
169 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
172 /* not readable code */
173 if ((e2
& DESC_CS_MASK
) && !(e2
& DESC_R_MASK
)) {
174 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
176 /* if data or non conforming code, checks the rights */
177 if (((e2
>> DESC_TYPE_SHIFT
) & 0xf) < 12) {
178 if (dpl
< cpl
|| dpl
< rpl
) {
179 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
183 if (!(e2
& DESC_P_MASK
)) {
184 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
186 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
187 get_seg_base(e1
, e2
),
188 get_seg_limit(e1
, e2
),
191 if (seg_reg
== R_SS
|| seg_reg
== R_CS
) {
192 raise_exception_err(env
, EXCP0A_TSS
, selector
& 0xfffc);
197 #define SWITCH_TSS_JMP 0
198 #define SWITCH_TSS_IRET 1
199 #define SWITCH_TSS_CALL 2
201 /* XXX: restore CPU state in registers (PowerPC case) */
202 static void switch_tss(int tss_selector
,
203 uint32_t e1
, uint32_t e2
, int source
,
206 int tss_limit
, tss_limit_max
, type
, old_tss_limit_max
, old_type
, v1
, v2
, i
;
207 target_ulong tss_base
;
208 uint32_t new_regs
[8], new_segs
[6];
209 uint32_t new_eflags
, new_eip
, new_cr3
, new_ldt
, new_trap
;
210 uint32_t old_eflags
, eflags_mask
;
215 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
216 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector
, type
,
219 /* if task gate, we read the TSS segment and we load it */
221 if (!(e2
& DESC_P_MASK
)) {
222 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
224 tss_selector
= e1
>> 16;
225 if (tss_selector
& 4) {
226 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
228 if (load_segment(&e1
, &e2
, tss_selector
) != 0) {
229 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
231 if (e2
& DESC_S_MASK
) {
232 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
234 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
235 if ((type
& 7) != 1) {
236 raise_exception_err(env
, EXCP0D_GPF
, tss_selector
& 0xfffc);
240 if (!(e2
& DESC_P_MASK
)) {
241 raise_exception_err(env
, EXCP0B_NOSEG
, tss_selector
& 0xfffc);
249 tss_limit
= get_seg_limit(e1
, e2
);
250 tss_base
= get_seg_base(e1
, e2
);
251 if ((tss_selector
& 4) != 0 ||
252 tss_limit
< tss_limit_max
) {
253 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
255 old_type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
257 old_tss_limit_max
= 103;
259 old_tss_limit_max
= 43;
262 /* read all the registers from the new TSS */
265 new_cr3
= ldl_kernel(tss_base
+ 0x1c);
266 new_eip
= ldl_kernel(tss_base
+ 0x20);
267 new_eflags
= ldl_kernel(tss_base
+ 0x24);
268 for (i
= 0; i
< 8; i
++) {
269 new_regs
[i
] = ldl_kernel(tss_base
+ (0x28 + i
* 4));
271 for (i
= 0; i
< 6; i
++) {
272 new_segs
[i
] = lduw_kernel(tss_base
+ (0x48 + i
* 4));
274 new_ldt
= lduw_kernel(tss_base
+ 0x60);
275 new_trap
= ldl_kernel(tss_base
+ 0x64);
279 new_eip
= lduw_kernel(tss_base
+ 0x0e);
280 new_eflags
= lduw_kernel(tss_base
+ 0x10);
281 for (i
= 0; i
< 8; i
++) {
282 new_regs
[i
] = lduw_kernel(tss_base
+ (0x12 + i
* 2)) | 0xffff0000;
284 for (i
= 0; i
< 4; i
++) {
285 new_segs
[i
] = lduw_kernel(tss_base
+ (0x22 + i
* 4));
287 new_ldt
= lduw_kernel(tss_base
+ 0x2a);
292 /* XXX: avoid a compiler warning, see
293 http://support.amd.com/us/Processor_TechDocs/24593.pdf
294 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
297 /* NOTE: we must avoid memory exceptions during the task switch,
298 so we make dummy accesses before */
299 /* XXX: it can still fail in some cases, so a bigger hack is
300 necessary to valid the TLB after having done the accesses */
302 v1
= ldub_kernel(env
->tr
.base
);
303 v2
= ldub_kernel(env
->tr
.base
+ old_tss_limit_max
);
304 stb_kernel(env
->tr
.base
, v1
);
305 stb_kernel(env
->tr
.base
+ old_tss_limit_max
, v2
);
307 /* clear busy bit (it is restartable) */
308 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_IRET
) {
312 ptr
= env
->gdt
.base
+ (env
->tr
.selector
& ~7);
313 e2
= ldl_kernel(ptr
+ 4);
314 e2
&= ~DESC_TSS_BUSY_MASK
;
315 stl_kernel(ptr
+ 4, e2
);
317 old_eflags
= cpu_compute_eflags(env
);
318 if (source
== SWITCH_TSS_IRET
) {
319 old_eflags
&= ~NT_MASK
;
322 /* save the current state in the old TSS */
325 stl_kernel(env
->tr
.base
+ 0x20, next_eip
);
326 stl_kernel(env
->tr
.base
+ 0x24, old_eflags
);
327 stl_kernel(env
->tr
.base
+ (0x28 + 0 * 4), EAX
);
328 stl_kernel(env
->tr
.base
+ (0x28 + 1 * 4), ECX
);
329 stl_kernel(env
->tr
.base
+ (0x28 + 2 * 4), EDX
);
330 stl_kernel(env
->tr
.base
+ (0x28 + 3 * 4), EBX
);
331 stl_kernel(env
->tr
.base
+ (0x28 + 4 * 4), ESP
);
332 stl_kernel(env
->tr
.base
+ (0x28 + 5 * 4), EBP
);
333 stl_kernel(env
->tr
.base
+ (0x28 + 6 * 4), ESI
);
334 stl_kernel(env
->tr
.base
+ (0x28 + 7 * 4), EDI
);
335 for (i
= 0; i
< 6; i
++) {
336 stw_kernel(env
->tr
.base
+ (0x48 + i
* 4), env
->segs
[i
].selector
);
340 stw_kernel(env
->tr
.base
+ 0x0e, next_eip
);
341 stw_kernel(env
->tr
.base
+ 0x10, old_eflags
);
342 stw_kernel(env
->tr
.base
+ (0x12 + 0 * 2), EAX
);
343 stw_kernel(env
->tr
.base
+ (0x12 + 1 * 2), ECX
);
344 stw_kernel(env
->tr
.base
+ (0x12 + 2 * 2), EDX
);
345 stw_kernel(env
->tr
.base
+ (0x12 + 3 * 2), EBX
);
346 stw_kernel(env
->tr
.base
+ (0x12 + 4 * 2), ESP
);
347 stw_kernel(env
->tr
.base
+ (0x12 + 5 * 2), EBP
);
348 stw_kernel(env
->tr
.base
+ (0x12 + 6 * 2), ESI
);
349 stw_kernel(env
->tr
.base
+ (0x12 + 7 * 2), EDI
);
350 for (i
= 0; i
< 4; i
++) {
351 stw_kernel(env
->tr
.base
+ (0x22 + i
* 4), env
->segs
[i
].selector
);
355 /* now if an exception occurs, it will occurs in the next task
358 if (source
== SWITCH_TSS_CALL
) {
359 stw_kernel(tss_base
, env
->tr
.selector
);
360 new_eflags
|= NT_MASK
;
364 if (source
== SWITCH_TSS_JMP
|| source
== SWITCH_TSS_CALL
) {
368 ptr
= env
->gdt
.base
+ (tss_selector
& ~7);
369 e2
= ldl_kernel(ptr
+ 4);
370 e2
|= DESC_TSS_BUSY_MASK
;
371 stl_kernel(ptr
+ 4, e2
);
374 /* set the new CPU state */
375 /* from this point, any exception which occurs can give problems */
376 env
->cr
[0] |= CR0_TS_MASK
;
377 env
->hflags
|= HF_TS_MASK
;
378 env
->tr
.selector
= tss_selector
;
379 env
->tr
.base
= tss_base
;
380 env
->tr
.limit
= tss_limit
;
381 env
->tr
.flags
= e2
& ~DESC_TSS_BUSY_MASK
;
383 if ((type
& 8) && (env
->cr
[0] & CR0_PG_MASK
)) {
384 cpu_x86_update_cr3(env
, new_cr3
);
387 /* load all registers without an exception, then reload them with
388 possible exception */
390 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
|
391 IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
| NT_MASK
;
393 eflags_mask
&= 0xffff;
395 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
396 /* XXX: what to do in 16 bit case? */
405 if (new_eflags
& VM_MASK
) {
406 for (i
= 0; i
< 6; i
++) {
407 load_seg_vm(i
, new_segs
[i
]);
409 /* in vm86, CPL is always 3 */
410 cpu_x86_set_cpl(env
, 3);
412 /* CPL is set the RPL of CS */
413 cpu_x86_set_cpl(env
, new_segs
[R_CS
] & 3);
414 /* first just selectors as the rest may trigger exceptions */
415 for (i
= 0; i
< 6; i
++) {
416 cpu_x86_load_seg_cache(env
, i
, new_segs
[i
], 0, 0, 0);
420 env
->ldt
.selector
= new_ldt
& ~4;
427 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
430 if ((new_ldt
& 0xfffc) != 0) {
432 index
= new_ldt
& ~7;
433 if ((index
+ 7) > dt
->limit
) {
434 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
436 ptr
= dt
->base
+ index
;
437 e1
= ldl_kernel(ptr
);
438 e2
= ldl_kernel(ptr
+ 4);
439 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
440 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
442 if (!(e2
& DESC_P_MASK
)) {
443 raise_exception_err(env
, EXCP0A_TSS
, new_ldt
& 0xfffc);
445 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
448 /* load the segments */
449 if (!(new_eflags
& VM_MASK
)) {
450 tss_load_seg(R_CS
, new_segs
[R_CS
]);
451 tss_load_seg(R_SS
, new_segs
[R_SS
]);
452 tss_load_seg(R_ES
, new_segs
[R_ES
]);
453 tss_load_seg(R_DS
, new_segs
[R_DS
]);
454 tss_load_seg(R_FS
, new_segs
[R_FS
]);
455 tss_load_seg(R_GS
, new_segs
[R_GS
]);
458 /* check that EIP is in the CS segment limits */
459 if (new_eip
> env
->segs
[R_CS
].limit
) {
460 /* XXX: different exception if CALL? */
461 raise_exception_err(env
, EXCP0D_GPF
, 0);
464 #ifndef CONFIG_USER_ONLY
465 /* reset local breakpoints */
466 if (env
->dr
[7] & 0x55) {
467 for (i
= 0; i
< 4; i
++) {
468 if (hw_breakpoint_enabled(env
->dr
[7], i
) == 0x1) {
469 hw_breakpoint_remove(env
, i
);
477 static inline unsigned int get_sp_mask(unsigned int e2
)
479 if (e2
& DESC_B_MASK
) {
486 static int exception_has_error_code(int intno
)
502 #define SET_ESP(val, sp_mask) \
504 if ((sp_mask) == 0xffff) { \
505 ESP = (ESP & ~0xffff) | ((val) & 0xffff); \
506 } else if ((sp_mask) == 0xffffffffLL) { \
507 ESP = (uint32_t)(val); \
513 #define SET_ESP(val, sp_mask) \
515 ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask)); \
519 /* in 64-bit machines, this can overflow. So this segment addition macro
520 * can be used to trim the value to 32-bit whenever needed */
521 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
523 /* XXX: add a is_user flag to have proper security support */
524 #define PUSHW(ssp, sp, sp_mask, val) \
527 stw_kernel((ssp) + (sp & (sp_mask)), (val)); \
530 #define PUSHL(ssp, sp, sp_mask, val) \
533 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val)); \
536 #define POPW(ssp, sp, sp_mask, val) \
538 val = lduw_kernel((ssp) + (sp & (sp_mask))); \
542 #define POPL(ssp, sp, sp_mask, val) \
544 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask)); \
548 /* protected mode interrupt */
549 static void do_interrupt_protected(int intno
, int is_int
, int error_code
,
550 unsigned int next_eip
, int is_hw
)
553 target_ulong ptr
, ssp
;
554 int type
, dpl
, selector
, ss_dpl
, cpl
;
555 int has_error_code
, new_stack
, shift
;
556 uint32_t e1
, e2
, offset
, ss
= 0, esp
, ss_e1
= 0, ss_e2
= 0;
557 uint32_t old_eip
, sp_mask
;
560 if (!is_int
&& !is_hw
) {
561 has_error_code
= exception_has_error_code(intno
);
570 if (intno
* 8 + 7 > dt
->limit
) {
571 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
573 ptr
= dt
->base
+ intno
* 8;
574 e1
= ldl_kernel(ptr
);
575 e2
= ldl_kernel(ptr
+ 4);
576 /* check gate type */
577 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
579 case 5: /* task gate */
580 /* must do that check here to return the correct error code */
581 if (!(e2
& DESC_P_MASK
)) {
582 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
584 switch_tss(intno
* 8, e1
, e2
, SWITCH_TSS_CALL
, old_eip
);
585 if (has_error_code
) {
589 /* push the error code */
590 type
= (env
->tr
.flags
>> DESC_TYPE_SHIFT
) & 0xf;
592 if (env
->segs
[R_SS
].flags
& DESC_B_MASK
) {
597 esp
= (ESP
- (2 << shift
)) & mask
;
598 ssp
= env
->segs
[R_SS
].base
+ esp
;
600 stl_kernel(ssp
, error_code
);
602 stw_kernel(ssp
, error_code
);
607 case 6: /* 286 interrupt gate */
608 case 7: /* 286 trap gate */
609 case 14: /* 386 interrupt gate */
610 case 15: /* 386 trap gate */
613 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
616 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
617 cpl
= env
->hflags
& HF_CPL_MASK
;
618 /* check privilege if software int */
619 if (is_int
&& dpl
< cpl
) {
620 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
622 /* check valid bit */
623 if (!(e2
& DESC_P_MASK
)) {
624 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 8 + 2);
627 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
628 if ((selector
& 0xfffc) == 0) {
629 raise_exception_err(env
, EXCP0D_GPF
, 0);
631 if (load_segment(&e1
, &e2
, selector
) != 0) {
632 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
634 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
635 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
637 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
639 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
641 if (!(e2
& DESC_P_MASK
)) {
642 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
644 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
645 /* to inner privilege */
646 get_ss_esp_from_tss(&ss
, &esp
, dpl
);
647 if ((ss
& 0xfffc) == 0) {
648 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
650 if ((ss
& 3) != dpl
) {
651 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
653 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0) {
654 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
656 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
658 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
660 if (!(ss_e2
& DESC_S_MASK
) ||
661 (ss_e2
& DESC_CS_MASK
) ||
662 !(ss_e2
& DESC_W_MASK
)) {
663 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
665 if (!(ss_e2
& DESC_P_MASK
)) {
666 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
669 sp_mask
= get_sp_mask(ss_e2
);
670 ssp
= get_seg_base(ss_e1
, ss_e2
);
671 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
672 /* to same privilege */
673 if (env
->eflags
& VM_MASK
) {
674 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
677 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
678 ssp
= env
->segs
[R_SS
].base
;
682 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
683 new_stack
= 0; /* avoid warning */
684 sp_mask
= 0; /* avoid warning */
685 ssp
= 0; /* avoid warning */
686 esp
= 0; /* avoid warning */
692 /* XXX: check that enough room is available */
693 push_size
= 6 + (new_stack
<< 2) + (has_error_code
<< 1);
694 if (env
->eflags
& VM_MASK
) {
701 if (env
->eflags
& VM_MASK
) {
702 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
703 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
704 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
705 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
707 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
708 PUSHL(ssp
, esp
, sp_mask
, ESP
);
710 PUSHL(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
711 PUSHL(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
712 PUSHL(ssp
, esp
, sp_mask
, old_eip
);
713 if (has_error_code
) {
714 PUSHL(ssp
, esp
, sp_mask
, error_code
);
718 if (env
->eflags
& VM_MASK
) {
719 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_GS
].selector
);
720 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_FS
].selector
);
721 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_DS
].selector
);
722 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_ES
].selector
);
724 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_SS
].selector
);
725 PUSHW(ssp
, esp
, sp_mask
, ESP
);
727 PUSHW(ssp
, esp
, sp_mask
, cpu_compute_eflags(env
));
728 PUSHW(ssp
, esp
, sp_mask
, env
->segs
[R_CS
].selector
);
729 PUSHW(ssp
, esp
, sp_mask
, old_eip
);
730 if (has_error_code
) {
731 PUSHW(ssp
, esp
, sp_mask
, error_code
);
736 if (env
->eflags
& VM_MASK
) {
737 cpu_x86_load_seg_cache(env
, R_ES
, 0, 0, 0, 0);
738 cpu_x86_load_seg_cache(env
, R_DS
, 0, 0, 0, 0);
739 cpu_x86_load_seg_cache(env
, R_FS
, 0, 0, 0, 0);
740 cpu_x86_load_seg_cache(env
, R_GS
, 0, 0, 0, 0);
742 ss
= (ss
& ~3) | dpl
;
743 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
744 ssp
, get_seg_limit(ss_e1
, ss_e2
), ss_e2
);
746 SET_ESP(esp
, sp_mask
);
748 selector
= (selector
& ~3) | dpl
;
749 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
750 get_seg_base(e1
, e2
),
751 get_seg_limit(e1
, e2
),
753 cpu_x86_set_cpl(env
, dpl
);
756 /* interrupt gate clear IF mask */
757 if ((type
& 1) == 0) {
758 env
->eflags
&= ~IF_MASK
;
760 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
765 #define PUSHQ(sp, val) \
768 stq_kernel(sp, (val)); \
771 #define POPQ(sp, val) \
773 val = ldq_kernel(sp); \
777 static inline target_ulong
get_rsp_from_tss(int level
)
782 printf("TR: base=" TARGET_FMT_lx
" limit=%x\n",
783 env
->tr
.base
, env
->tr
.limit
);
786 if (!(env
->tr
.flags
& DESC_P_MASK
)) {
787 cpu_abort(env
, "invalid tss");
789 index
= 8 * level
+ 4;
790 if ((index
+ 7) > env
->tr
.limit
) {
791 raise_exception_err(env
, EXCP0A_TSS
, env
->tr
.selector
& 0xfffc);
793 return ldq_kernel(env
->tr
.base
+ index
);
796 /* 64 bit interrupt */
797 static void do_interrupt64(int intno
, int is_int
, int error_code
,
798 target_ulong next_eip
, int is_hw
)
802 int type
, dpl
, selector
, cpl
, ist
;
803 int has_error_code
, new_stack
;
804 uint32_t e1
, e2
, e3
, ss
;
805 target_ulong old_eip
, esp
, offset
;
808 if (!is_int
&& !is_hw
) {
809 has_error_code
= exception_has_error_code(intno
);
818 if (intno
* 16 + 15 > dt
->limit
) {
819 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
821 ptr
= dt
->base
+ intno
* 16;
822 e1
= ldl_kernel(ptr
);
823 e2
= ldl_kernel(ptr
+ 4);
824 e3
= ldl_kernel(ptr
+ 8);
825 /* check gate type */
826 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
828 case 14: /* 386 interrupt gate */
829 case 15: /* 386 trap gate */
832 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
835 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
836 cpl
= env
->hflags
& HF_CPL_MASK
;
837 /* check privilege if software int */
838 if (is_int
&& dpl
< cpl
) {
839 raise_exception_err(env
, EXCP0D_GPF
, intno
* 16 + 2);
841 /* check valid bit */
842 if (!(e2
& DESC_P_MASK
)) {
843 raise_exception_err(env
, EXCP0B_NOSEG
, intno
* 16 + 2);
846 offset
= ((target_ulong
)e3
<< 32) | (e2
& 0xffff0000) | (e1
& 0x0000ffff);
848 if ((selector
& 0xfffc) == 0) {
849 raise_exception_err(env
, EXCP0D_GPF
, 0);
852 if (load_segment(&e1
, &e2
, selector
) != 0) {
853 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
855 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
856 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
858 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
860 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
862 if (!(e2
& DESC_P_MASK
)) {
863 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
865 if (!(e2
& DESC_L_MASK
) || (e2
& DESC_B_MASK
)) {
866 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
868 if ((!(e2
& DESC_C_MASK
) && dpl
< cpl
) || ist
!= 0) {
869 /* to inner privilege */
871 esp
= get_rsp_from_tss(ist
+ 3);
873 esp
= get_rsp_from_tss(dpl
);
875 esp
&= ~0xfLL
; /* align stack */
878 } else if ((e2
& DESC_C_MASK
) || dpl
== cpl
) {
879 /* to same privilege */
880 if (env
->eflags
& VM_MASK
) {
881 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
885 esp
= get_rsp_from_tss(ist
+ 3);
889 esp
&= ~0xfLL
; /* align stack */
892 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
893 new_stack
= 0; /* avoid warning */
894 esp
= 0; /* avoid warning */
897 PUSHQ(esp
, env
->segs
[R_SS
].selector
);
899 PUSHQ(esp
, cpu_compute_eflags(env
));
900 PUSHQ(esp
, env
->segs
[R_CS
].selector
);
902 if (has_error_code
) {
903 PUSHQ(esp
, error_code
);
908 cpu_x86_load_seg_cache(env
, R_SS
, ss
, 0, 0, 0);
912 selector
= (selector
& ~3) | dpl
;
913 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
914 get_seg_base(e1
, e2
),
915 get_seg_limit(e1
, e2
),
917 cpu_x86_set_cpl(env
, dpl
);
920 /* interrupt gate clear IF mask */
921 if ((type
& 1) == 0) {
922 env
->eflags
&= ~IF_MASK
;
924 env
->eflags
&= ~(TF_MASK
| VM_MASK
| RF_MASK
| NT_MASK
);
929 #if defined(CONFIG_USER_ONLY)
930 void helper_syscall(int next_eip_addend
)
932 env
->exception_index
= EXCP_SYSCALL
;
933 env
->exception_next_eip
= env
->eip
+ next_eip_addend
;
937 void helper_syscall(int next_eip_addend
)
941 if (!(env
->efer
& MSR_EFER_SCE
)) {
942 raise_exception_err(env
, EXCP06_ILLOP
, 0);
944 selector
= (env
->star
>> 32) & 0xffff;
945 if (env
->hflags
& HF_LMA_MASK
) {
948 ECX
= env
->eip
+ next_eip_addend
;
949 env
->regs
[11] = cpu_compute_eflags(env
);
951 code64
= env
->hflags
& HF_CS64_MASK
;
953 cpu_x86_set_cpl(env
, 0);
954 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
956 DESC_G_MASK
| DESC_P_MASK
|
958 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
960 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
962 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
964 DESC_W_MASK
| DESC_A_MASK
);
965 env
->eflags
&= ~env
->fmask
;
966 cpu_load_eflags(env
, env
->eflags
, 0);
968 env
->eip
= env
->lstar
;
970 env
->eip
= env
->cstar
;
973 ECX
= (uint32_t)(env
->eip
+ next_eip_addend
);
975 cpu_x86_set_cpl(env
, 0);
976 cpu_x86_load_seg_cache(env
, R_CS
, selector
& 0xfffc,
978 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
980 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
981 cpu_x86_load_seg_cache(env
, R_SS
, (selector
+ 8) & 0xfffc,
983 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
985 DESC_W_MASK
| DESC_A_MASK
);
986 env
->eflags
&= ~(IF_MASK
| RF_MASK
| VM_MASK
);
987 env
->eip
= (uint32_t)env
->star
;
994 void helper_sysret(int dflag
)
998 if (!(env
->efer
& MSR_EFER_SCE
)) {
999 raise_exception_err(env
, EXCP06_ILLOP
, 0);
1001 cpl
= env
->hflags
& HF_CPL_MASK
;
1002 if (!(env
->cr
[0] & CR0_PE_MASK
) || cpl
!= 0) {
1003 raise_exception_err(env
, EXCP0D_GPF
, 0);
1005 selector
= (env
->star
>> 48) & 0xffff;
1006 if (env
->hflags
& HF_LMA_MASK
) {
1008 cpu_x86_load_seg_cache(env
, R_CS
, (selector
+ 16) | 3,
1010 DESC_G_MASK
| DESC_P_MASK
|
1011 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1012 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
1016 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1018 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1019 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1020 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1021 env
->eip
= (uint32_t)ECX
;
1023 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1025 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1026 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1027 DESC_W_MASK
| DESC_A_MASK
);
1028 cpu_load_eflags(env
, (uint32_t)(env
->regs
[11]), TF_MASK
| AC_MASK
1029 | ID_MASK
| IF_MASK
| IOPL_MASK
| VM_MASK
| RF_MASK
|
1031 cpu_x86_set_cpl(env
, 3);
1033 cpu_x86_load_seg_cache(env
, R_CS
, selector
| 3,
1035 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1036 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1037 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
1038 env
->eip
= (uint32_t)ECX
;
1039 cpu_x86_load_seg_cache(env
, R_SS
, selector
+ 8,
1041 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
1042 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
1043 DESC_W_MASK
| DESC_A_MASK
);
1044 env
->eflags
|= IF_MASK
;
1045 cpu_x86_set_cpl(env
, 3);
1050 /* real mode interrupt */
1051 static void do_interrupt_real(int intno
, int is_int
, int error_code
,
1052 unsigned int next_eip
)
1055 target_ulong ptr
, ssp
;
1057 uint32_t offset
, esp
;
1058 uint32_t old_cs
, old_eip
;
1060 /* real mode (simpler!) */
1062 if (intno
* 4 + 3 > dt
->limit
) {
1063 raise_exception_err(env
, EXCP0D_GPF
, intno
* 8 + 2);
1065 ptr
= dt
->base
+ intno
* 4;
1066 offset
= lduw_kernel(ptr
);
1067 selector
= lduw_kernel(ptr
+ 2);
1069 ssp
= env
->segs
[R_SS
].base
;
1075 old_cs
= env
->segs
[R_CS
].selector
;
1076 /* XXX: use SS segment size? */
1077 PUSHW(ssp
, esp
, 0xffff, cpu_compute_eflags(env
));
1078 PUSHW(ssp
, esp
, 0xffff, old_cs
);
1079 PUSHW(ssp
, esp
, 0xffff, old_eip
);
1081 /* update processor state */
1082 ESP
= (ESP
& ~0xffff) | (esp
& 0xffff);
1084 env
->segs
[R_CS
].selector
= selector
;
1085 env
->segs
[R_CS
].base
= (selector
<< 4);
1086 env
->eflags
&= ~(IF_MASK
| TF_MASK
| AC_MASK
| RF_MASK
);
1089 #if defined(CONFIG_USER_ONLY)
1090 /* fake user mode interrupt */
1091 static void do_interrupt_user(int intno
, int is_int
, int error_code
,
1092 target_ulong next_eip
)
1096 int dpl
, cpl
, shift
;
1100 if (env
->hflags
& HF_LMA_MASK
) {
1105 ptr
= dt
->base
+ (intno
<< shift
);
1106 e2
= ldl_kernel(ptr
+ 4);
1108 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1109 cpl
= env
->hflags
& HF_CPL_MASK
;
1110 /* check privilege if software int */
1111 if (is_int
&& dpl
< cpl
) {
1112 raise_exception_err(env
, EXCP0D_GPF
, (intno
<< shift
) + 2);
1115 /* Since we emulate only user space, we cannot do more than
1116 exiting the emulation with the suitable exception and error
1125 static void handle_even_inj(int intno
, int is_int
, int error_code
,
1128 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1129 control
.event_inj
));
1131 if (!(event_inj
& SVM_EVTINJ_VALID
)) {
1135 type
= SVM_EVTINJ_TYPE_SOFT
;
1137 type
= SVM_EVTINJ_TYPE_EXEPT
;
1139 event_inj
= intno
| type
| SVM_EVTINJ_VALID
;
1140 if (!rm
&& exception_has_error_code(intno
)) {
1141 event_inj
|= SVM_EVTINJ_VALID_ERR
;
1142 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
,
1143 control
.event_inj_err
),
1146 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1153 * Begin execution of an interruption. is_int is TRUE if coming from
1154 * the int instruction. next_eip is the EIP value AFTER the interrupt
1155 * instruction. It is only relevant if is_int is TRUE.
1157 static void do_interrupt_all(int intno
, int is_int
, int error_code
,
1158 target_ulong next_eip
, int is_hw
)
1160 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
1161 if ((env
->cr
[0] & CR0_PE_MASK
)) {
1164 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1165 " pc=" TARGET_FMT_lx
" SP=%04x:" TARGET_FMT_lx
,
1166 count
, intno
, error_code
, is_int
,
1167 env
->hflags
& HF_CPL_MASK
,
1168 env
->segs
[R_CS
].selector
, EIP
,
1169 (int)env
->segs
[R_CS
].base
+ EIP
,
1170 env
->segs
[R_SS
].selector
, ESP
);
1171 if (intno
== 0x0e) {
1172 qemu_log(" CR2=" TARGET_FMT_lx
, env
->cr
[2]);
1174 qemu_log(" EAX=" TARGET_FMT_lx
, EAX
);
1177 log_cpu_state(env
, X86_DUMP_CCOP
);
1184 ptr
= env
->segs
[R_CS
].base
+ env
->eip
;
1185 for (i
= 0; i
< 16; i
++) {
1186 qemu_log(" %02x", ldub(ptr
+ i
));
1194 if (env
->cr
[0] & CR0_PE_MASK
) {
1195 #if !defined(CONFIG_USER_ONLY)
1196 if (env
->hflags
& HF_SVMI_MASK
) {
1197 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 0);
1200 #ifdef TARGET_X86_64
1201 if (env
->hflags
& HF_LMA_MASK
) {
1202 do_interrupt64(intno
, is_int
, error_code
, next_eip
, is_hw
);
1206 do_interrupt_protected(intno
, is_int
, error_code
, next_eip
, is_hw
);
1209 #if !defined(CONFIG_USER_ONLY)
1210 if (env
->hflags
& HF_SVMI_MASK
) {
1211 handle_even_inj(intno
, is_int
, error_code
, is_hw
, 1);
1214 do_interrupt_real(intno
, is_int
, error_code
, next_eip
);
1217 #if !defined(CONFIG_USER_ONLY)
1218 if (env
->hflags
& HF_SVMI_MASK
) {
1219 uint32_t event_inj
= ldl_phys(env
->vm_vmcb
+
1220 offsetof(struct vmcb
,
1221 control
.event_inj
));
1223 stl_phys(env
->vm_vmcb
+ offsetof(struct vmcb
, control
.event_inj
),
1224 event_inj
& ~SVM_EVTINJ_VALID
);
1229 void do_interrupt(CPUX86State
*env1
)
1231 CPUX86State
*saved_env
;
1235 #if defined(CONFIG_USER_ONLY)
1236 /* if user mode only, we simulate a fake exception
1237 which will be handled outside the cpu execution
1239 do_interrupt_user(env
->exception_index
,
1240 env
->exception_is_int
,
1242 env
->exception_next_eip
);
1243 /* successfully delivered */
1244 env
->old_exception
= -1;
1246 /* simulate a real cpu exception. On i386, it can
1247 trigger new exceptions, but we do not handle
1248 double or triple faults yet. */
1249 do_interrupt_all(env
->exception_index
,
1250 env
->exception_is_int
,
1252 env
->exception_next_eip
, 0);
1253 /* successfully delivered */
1254 env
->old_exception
= -1;
1259 void do_interrupt_x86_hardirq(CPUX86State
*env1
, int intno
, int is_hw
)
1261 CPUX86State
*saved_env
;
1265 do_interrupt_all(intno
, 0, 0, 0, is_hw
);
1269 void helper_enter_level(int level
, int data32
, target_ulong t1
)
1272 uint32_t esp_mask
, esp
, ebp
;
1274 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1275 ssp
= env
->segs
[R_SS
].base
;
1284 stl(ssp
+ (esp
& esp_mask
), ldl(ssp
+ (ebp
& esp_mask
)));
1287 stl(ssp
+ (esp
& esp_mask
), t1
);
1294 stw(ssp
+ (esp
& esp_mask
), lduw(ssp
+ (ebp
& esp_mask
)));
1297 stw(ssp
+ (esp
& esp_mask
), t1
);
1301 #ifdef TARGET_X86_64
1302 void helper_enter64_level(int level
, int data64
, target_ulong t1
)
1304 target_ulong esp
, ebp
;
1325 stw(esp
, lduw(ebp
));
1333 void helper_lldt(int selector
)
1337 int index
, entry_limit
;
1341 if ((selector
& 0xfffc) == 0) {
1342 /* XXX: NULL selector case: invalid LDT */
1346 if (selector
& 0x4) {
1347 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1350 index
= selector
& ~7;
1351 #ifdef TARGET_X86_64
1352 if (env
->hflags
& HF_LMA_MASK
) {
1359 if ((index
+ entry_limit
) > dt
->limit
) {
1360 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1362 ptr
= dt
->base
+ index
;
1363 e1
= ldl_kernel(ptr
);
1364 e2
= ldl_kernel(ptr
+ 4);
1365 if ((e2
& DESC_S_MASK
) || ((e2
>> DESC_TYPE_SHIFT
) & 0xf) != 2) {
1366 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1368 if (!(e2
& DESC_P_MASK
)) {
1369 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1371 #ifdef TARGET_X86_64
1372 if (env
->hflags
& HF_LMA_MASK
) {
1375 e3
= ldl_kernel(ptr
+ 8);
1376 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1377 env
->ldt
.base
|= (target_ulong
)e3
<< 32;
1381 load_seg_cache_raw_dt(&env
->ldt
, e1
, e2
);
1384 env
->ldt
.selector
= selector
;
1387 void helper_ltr(int selector
)
1391 int index
, type
, entry_limit
;
1395 if ((selector
& 0xfffc) == 0) {
1396 /* NULL selector case: invalid TR */
1401 if (selector
& 0x4) {
1402 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1405 index
= selector
& ~7;
1406 #ifdef TARGET_X86_64
1407 if (env
->hflags
& HF_LMA_MASK
) {
1414 if ((index
+ entry_limit
) > dt
->limit
) {
1415 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1417 ptr
= dt
->base
+ index
;
1418 e1
= ldl_kernel(ptr
);
1419 e2
= ldl_kernel(ptr
+ 4);
1420 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1421 if ((e2
& DESC_S_MASK
) ||
1422 (type
!= 1 && type
!= 9)) {
1423 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1425 if (!(e2
& DESC_P_MASK
)) {
1426 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1428 #ifdef TARGET_X86_64
1429 if (env
->hflags
& HF_LMA_MASK
) {
1432 e3
= ldl_kernel(ptr
+ 8);
1433 e4
= ldl_kernel(ptr
+ 12);
1434 if ((e4
>> DESC_TYPE_SHIFT
) & 0xf) {
1435 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1437 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1438 env
->tr
.base
|= (target_ulong
)e3
<< 32;
1442 load_seg_cache_raw_dt(&env
->tr
, e1
, e2
);
1444 e2
|= DESC_TSS_BUSY_MASK
;
1445 stl_kernel(ptr
+ 4, e2
);
1447 env
->tr
.selector
= selector
;
1450 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1451 void helper_load_seg(int seg_reg
, int selector
)
1460 cpl
= env
->hflags
& HF_CPL_MASK
;
1461 if ((selector
& 0xfffc) == 0) {
1462 /* null selector case */
1464 #ifdef TARGET_X86_64
1465 && (!(env
->hflags
& HF_CS64_MASK
) || cpl
== 3)
1468 raise_exception_err(env
, EXCP0D_GPF
, 0);
1470 cpu_x86_load_seg_cache(env
, seg_reg
, selector
, 0, 0, 0);
1473 if (selector
& 0x4) {
1478 index
= selector
& ~7;
1479 if ((index
+ 7) > dt
->limit
) {
1480 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1482 ptr
= dt
->base
+ index
;
1483 e1
= ldl_kernel(ptr
);
1484 e2
= ldl_kernel(ptr
+ 4);
1486 if (!(e2
& DESC_S_MASK
)) {
1487 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1490 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1491 if (seg_reg
== R_SS
) {
1492 /* must be writable segment */
1493 if ((e2
& DESC_CS_MASK
) || !(e2
& DESC_W_MASK
)) {
1494 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1496 if (rpl
!= cpl
|| dpl
!= cpl
) {
1497 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1500 /* must be readable segment */
1501 if ((e2
& (DESC_CS_MASK
| DESC_R_MASK
)) == DESC_CS_MASK
) {
1502 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1505 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1506 /* if not conforming code, test rights */
1507 if (dpl
< cpl
|| dpl
< rpl
) {
1508 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1513 if (!(e2
& DESC_P_MASK
)) {
1514 if (seg_reg
== R_SS
) {
1515 raise_exception_err(env
, EXCP0C_STACK
, selector
& 0xfffc);
1517 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1521 /* set the access bit if not already set */
1522 if (!(e2
& DESC_A_MASK
)) {
1524 stl_kernel(ptr
+ 4, e2
);
1527 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
1528 get_seg_base(e1
, e2
),
1529 get_seg_limit(e1
, e2
),
1532 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1533 selector
, (unsigned long)sc
->base
, sc
->limit
, sc
->flags
);
1538 /* protected mode jump */
1539 void helper_ljmp_protected(int new_cs
, target_ulong new_eip
,
1540 int next_eip_addend
)
1543 uint32_t e1
, e2
, cpl
, dpl
, rpl
, limit
;
1544 target_ulong next_eip
;
1546 if ((new_cs
& 0xfffc) == 0) {
1547 raise_exception_err(env
, EXCP0D_GPF
, 0);
1549 if (load_segment(&e1
, &e2
, new_cs
) != 0) {
1550 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1552 cpl
= env
->hflags
& HF_CPL_MASK
;
1553 if (e2
& DESC_S_MASK
) {
1554 if (!(e2
& DESC_CS_MASK
)) {
1555 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1557 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1558 if (e2
& DESC_C_MASK
) {
1559 /* conforming code segment */
1561 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1564 /* non conforming code segment */
1567 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1570 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1573 if (!(e2
& DESC_P_MASK
)) {
1574 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1576 limit
= get_seg_limit(e1
, e2
);
1577 if (new_eip
> limit
&&
1578 !(env
->hflags
& HF_LMA_MASK
) && !(e2
& DESC_L_MASK
)) {
1579 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1581 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1582 get_seg_base(e1
, e2
), limit
, e2
);
1585 /* jump to call or task gate */
1586 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1588 cpl
= env
->hflags
& HF_CPL_MASK
;
1589 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
1591 case 1: /* 286 TSS */
1592 case 9: /* 386 TSS */
1593 case 5: /* task gate */
1594 if (dpl
< cpl
|| dpl
< rpl
) {
1595 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1597 next_eip
= env
->eip
+ next_eip_addend
;
1598 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_JMP
, next_eip
);
1599 CC_OP
= CC_OP_EFLAGS
;
1601 case 4: /* 286 call gate */
1602 case 12: /* 386 call gate */
1603 if ((dpl
< cpl
) || (dpl
< rpl
)) {
1604 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1606 if (!(e2
& DESC_P_MASK
)) {
1607 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1610 new_eip
= (e1
& 0xffff);
1612 new_eip
|= (e2
& 0xffff0000);
1614 if (load_segment(&e1
, &e2
, gate_cs
) != 0) {
1615 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1617 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1618 /* must be code segment */
1619 if (((e2
& (DESC_S_MASK
| DESC_CS_MASK
)) !=
1620 (DESC_S_MASK
| DESC_CS_MASK
))) {
1621 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1623 if (((e2
& DESC_C_MASK
) && (dpl
> cpl
)) ||
1624 (!(e2
& DESC_C_MASK
) && (dpl
!= cpl
))) {
1625 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1627 if (!(e2
& DESC_P_MASK
)) {
1628 raise_exception_err(env
, EXCP0D_GPF
, gate_cs
& 0xfffc);
1630 limit
= get_seg_limit(e1
, e2
);
1631 if (new_eip
> limit
) {
1632 raise_exception_err(env
, EXCP0D_GPF
, 0);
1634 cpu_x86_load_seg_cache(env
, R_CS
, (gate_cs
& 0xfffc) | cpl
,
1635 get_seg_base(e1
, e2
), limit
, e2
);
1639 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1645 /* real mode call */
1646 void helper_lcall_real(int new_cs
, target_ulong new_eip1
,
1647 int shift
, int next_eip
)
1650 uint32_t esp
, esp_mask
;
1655 esp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1656 ssp
= env
->segs
[R_SS
].base
;
1658 PUSHL(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1659 PUSHL(ssp
, esp
, esp_mask
, next_eip
);
1661 PUSHW(ssp
, esp
, esp_mask
, env
->segs
[R_CS
].selector
);
1662 PUSHW(ssp
, esp
, esp_mask
, next_eip
);
1665 SET_ESP(esp
, esp_mask
);
1667 env
->segs
[R_CS
].selector
= new_cs
;
1668 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1671 /* protected mode call */
1672 void helper_lcall_protected(int new_cs
, target_ulong new_eip
,
1673 int shift
, int next_eip_addend
)
1676 uint32_t e1
, e2
, cpl
, dpl
, rpl
, selector
, offset
, param_count
;
1677 uint32_t ss
= 0, ss_e1
= 0, ss_e2
= 0, sp
, type
, ss_dpl
, sp_mask
;
1678 uint32_t val
, limit
, old_sp_mask
;
1679 target_ulong ssp
, old_ssp
, next_eip
;
1681 next_eip
= env
->eip
+ next_eip_addend
;
1682 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs
, (uint32_t)new_eip
, shift
);
1683 LOG_PCALL_STATE(env
);
1684 if ((new_cs
& 0xfffc) == 0) {
1685 raise_exception_err(env
, EXCP0D_GPF
, 0);
1687 if (load_segment(&e1
, &e2
, new_cs
) != 0) {
1688 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1690 cpl
= env
->hflags
& HF_CPL_MASK
;
1691 LOG_PCALL("desc=%08x:%08x\n", e1
, e2
);
1692 if (e2
& DESC_S_MASK
) {
1693 if (!(e2
& DESC_CS_MASK
)) {
1694 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1696 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1697 if (e2
& DESC_C_MASK
) {
1698 /* conforming code segment */
1700 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1703 /* non conforming code segment */
1706 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1709 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1712 if (!(e2
& DESC_P_MASK
)) {
1713 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1716 #ifdef TARGET_X86_64
1717 /* XXX: check 16/32 bit cases in long mode */
1723 PUSHQ(rsp
, env
->segs
[R_CS
].selector
);
1724 PUSHQ(rsp
, next_eip
);
1725 /* from this point, not restartable */
1727 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1728 get_seg_base(e1
, e2
),
1729 get_seg_limit(e1
, e2
), e2
);
1735 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1736 ssp
= env
->segs
[R_SS
].base
;
1738 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1739 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1741 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1742 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1745 limit
= get_seg_limit(e1
, e2
);
1746 if (new_eip
> limit
) {
1747 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1749 /* from this point, not restartable */
1750 SET_ESP(sp
, sp_mask
);
1751 cpu_x86_load_seg_cache(env
, R_CS
, (new_cs
& 0xfffc) | cpl
,
1752 get_seg_base(e1
, e2
), limit
, e2
);
1756 /* check gate type */
1757 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x1f;
1758 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1761 case 1: /* available 286 TSS */
1762 case 9: /* available 386 TSS */
1763 case 5: /* task gate */
1764 if (dpl
< cpl
|| dpl
< rpl
) {
1765 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1767 switch_tss(new_cs
, e1
, e2
, SWITCH_TSS_CALL
, next_eip
);
1768 CC_OP
= CC_OP_EFLAGS
;
1770 case 4: /* 286 call gate */
1771 case 12: /* 386 call gate */
1774 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1779 if (dpl
< cpl
|| dpl
< rpl
) {
1780 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
1782 /* check valid bit */
1783 if (!(e2
& DESC_P_MASK
)) {
1784 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
1786 selector
= e1
>> 16;
1787 offset
= (e2
& 0xffff0000) | (e1
& 0x0000ffff);
1788 param_count
= e2
& 0x1f;
1789 if ((selector
& 0xfffc) == 0) {
1790 raise_exception_err(env
, EXCP0D_GPF
, 0);
1793 if (load_segment(&e1
, &e2
, selector
) != 0) {
1794 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1796 if (!(e2
& DESC_S_MASK
) || !(e2
& (DESC_CS_MASK
))) {
1797 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1799 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1801 raise_exception_err(env
, EXCP0D_GPF
, selector
& 0xfffc);
1803 if (!(e2
& DESC_P_MASK
)) {
1804 raise_exception_err(env
, EXCP0B_NOSEG
, selector
& 0xfffc);
1807 if (!(e2
& DESC_C_MASK
) && dpl
< cpl
) {
1808 /* to inner privilege */
1809 get_ss_esp_from_tss(&ss
, &sp
, dpl
);
1810 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx
1812 ss
, sp
, param_count
, ESP
);
1813 if ((ss
& 0xfffc) == 0) {
1814 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1816 if ((ss
& 3) != dpl
) {
1817 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1819 if (load_segment(&ss_e1
, &ss_e2
, ss
) != 0) {
1820 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1822 ss_dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
1823 if (ss_dpl
!= dpl
) {
1824 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1826 if (!(ss_e2
& DESC_S_MASK
) ||
1827 (ss_e2
& DESC_CS_MASK
) ||
1828 !(ss_e2
& DESC_W_MASK
)) {
1829 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1831 if (!(ss_e2
& DESC_P_MASK
)) {
1832 raise_exception_err(env
, EXCP0A_TSS
, ss
& 0xfffc);
1835 /* push_size = ((param_count * 2) + 8) << shift; */
1837 old_sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1838 old_ssp
= env
->segs
[R_SS
].base
;
1840 sp_mask
= get_sp_mask(ss_e2
);
1841 ssp
= get_seg_base(ss_e1
, ss_e2
);
1843 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1844 PUSHL(ssp
, sp
, sp_mask
, ESP
);
1845 for (i
= param_count
- 1; i
>= 0; i
--) {
1846 val
= ldl_kernel(old_ssp
+ ((ESP
+ i
* 4) & old_sp_mask
));
1847 PUSHL(ssp
, sp
, sp_mask
, val
);
1850 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_SS
].selector
);
1851 PUSHW(ssp
, sp
, sp_mask
, ESP
);
1852 for (i
= param_count
- 1; i
>= 0; i
--) {
1853 val
= lduw_kernel(old_ssp
+ ((ESP
+ i
* 2) & old_sp_mask
));
1854 PUSHW(ssp
, sp
, sp_mask
, val
);
1859 /* to same privilege */
1861 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1862 ssp
= env
->segs
[R_SS
].base
;
1863 /* push_size = (4 << shift); */
1868 PUSHL(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1869 PUSHL(ssp
, sp
, sp_mask
, next_eip
);
1871 PUSHW(ssp
, sp
, sp_mask
, env
->segs
[R_CS
].selector
);
1872 PUSHW(ssp
, sp
, sp_mask
, next_eip
);
1875 /* from this point, not restartable */
1878 ss
= (ss
& ~3) | dpl
;
1879 cpu_x86_load_seg_cache(env
, R_SS
, ss
,
1881 get_seg_limit(ss_e1
, ss_e2
),
1885 selector
= (selector
& ~3) | dpl
;
1886 cpu_x86_load_seg_cache(env
, R_CS
, selector
,
1887 get_seg_base(e1
, e2
),
1888 get_seg_limit(e1
, e2
),
1890 cpu_x86_set_cpl(env
, dpl
);
1891 SET_ESP(sp
, sp_mask
);
1896 /* real and vm86 mode iret */
1897 void helper_iret_real(int shift
)
1899 uint32_t sp
, new_cs
, new_eip
, new_eflags
, sp_mask
;
1903 sp_mask
= 0xffff; /* XXXX: use SS segment size? */
1905 ssp
= env
->segs
[R_SS
].base
;
1908 POPL(ssp
, sp
, sp_mask
, new_eip
);
1909 POPL(ssp
, sp
, sp_mask
, new_cs
);
1911 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1914 POPW(ssp
, sp
, sp_mask
, new_eip
);
1915 POPW(ssp
, sp
, sp_mask
, new_cs
);
1916 POPW(ssp
, sp
, sp_mask
, new_eflags
);
1918 ESP
= (ESP
& ~sp_mask
) | (sp
& sp_mask
);
1919 env
->segs
[R_CS
].selector
= new_cs
;
1920 env
->segs
[R_CS
].base
= (new_cs
<< 4);
1922 if (env
->eflags
& VM_MASK
) {
1923 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| RF_MASK
|
1926 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| IF_MASK
| IOPL_MASK
|
1930 eflags_mask
&= 0xffff;
1932 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
1933 env
->hflags2
&= ~HF2_NMI_MASK
;
1936 static inline void validate_seg(int seg_reg
, int cpl
)
1941 /* XXX: on x86_64, we do not want to nullify FS and GS because
1942 they may still contain a valid base. I would be interested to
1943 know how a real x86_64 CPU behaves */
1944 if ((seg_reg
== R_FS
|| seg_reg
== R_GS
) &&
1945 (env
->segs
[seg_reg
].selector
& 0xfffc) == 0) {
1949 e2
= env
->segs
[seg_reg
].flags
;
1950 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
1951 if (!(e2
& DESC_CS_MASK
) || !(e2
& DESC_C_MASK
)) {
1952 /* data or non conforming code segment */
1954 cpu_x86_load_seg_cache(env
, seg_reg
, 0, 0, 0, 0);
1959 /* protected mode iret */
1960 static inline void helper_ret_protected(int shift
, int is_iret
, int addend
)
1962 uint32_t new_cs
, new_eflags
, new_ss
;
1963 uint32_t new_es
, new_ds
, new_fs
, new_gs
;
1964 uint32_t e1
, e2
, ss_e1
, ss_e2
;
1965 int cpl
, dpl
, rpl
, eflags_mask
, iopl
;
1966 target_ulong ssp
, sp
, new_eip
, new_esp
, sp_mask
;
1968 #ifdef TARGET_X86_64
1974 sp_mask
= get_sp_mask(env
->segs
[R_SS
].flags
);
1977 ssp
= env
->segs
[R_SS
].base
;
1978 new_eflags
= 0; /* avoid warning */
1979 #ifdef TARGET_X86_64
1985 POPQ(sp
, new_eflags
);
1992 POPL(ssp
, sp
, sp_mask
, new_eip
);
1993 POPL(ssp
, sp
, sp_mask
, new_cs
);
1996 POPL(ssp
, sp
, sp_mask
, new_eflags
);
1997 if (new_eflags
& VM_MASK
) {
1998 goto return_to_vm86
;
2003 POPW(ssp
, sp
, sp_mask
, new_eip
);
2004 POPW(ssp
, sp
, sp_mask
, new_cs
);
2006 POPW(ssp
, sp
, sp_mask
, new_eflags
);
2010 LOG_PCALL("lret new %04x:" TARGET_FMT_lx
" s=%d addend=0x%x\n",
2011 new_cs
, new_eip
, shift
, addend
);
2012 LOG_PCALL_STATE(env
);
2013 if ((new_cs
& 0xfffc) == 0) {
2014 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2016 if (load_segment(&e1
, &e2
, new_cs
) != 0) {
2017 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2019 if (!(e2
& DESC_S_MASK
) ||
2020 !(e2
& DESC_CS_MASK
)) {
2021 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2023 cpl
= env
->hflags
& HF_CPL_MASK
;
2026 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2028 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2029 if (e2
& DESC_C_MASK
) {
2031 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2035 raise_exception_err(env
, EXCP0D_GPF
, new_cs
& 0xfffc);
2038 if (!(e2
& DESC_P_MASK
)) {
2039 raise_exception_err(env
, EXCP0B_NOSEG
, new_cs
& 0xfffc);
2043 if (rpl
== cpl
&& (!(env
->hflags
& HF_CS64_MASK
) ||
2044 ((env
->hflags
& HF_CS64_MASK
) && !is_iret
))) {
2045 /* return to same privilege level */
2046 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2047 get_seg_base(e1
, e2
),
2048 get_seg_limit(e1
, e2
),
2051 /* return to different privilege level */
2052 #ifdef TARGET_X86_64
2062 POPL(ssp
, sp
, sp_mask
, new_esp
);
2063 POPL(ssp
, sp
, sp_mask
, new_ss
);
2067 POPW(ssp
, sp
, sp_mask
, new_esp
);
2068 POPW(ssp
, sp
, sp_mask
, new_ss
);
2071 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx
"\n",
2073 if ((new_ss
& 0xfffc) == 0) {
2074 #ifdef TARGET_X86_64
2075 /* NULL ss is allowed in long mode if cpl != 3 */
2076 /* XXX: test CS64? */
2077 if ((env
->hflags
& HF_LMA_MASK
) && rpl
!= 3) {
2078 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2080 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2081 DESC_S_MASK
| (rpl
<< DESC_DPL_SHIFT
) |
2082 DESC_W_MASK
| DESC_A_MASK
);
2083 ss_e2
= DESC_B_MASK
; /* XXX: should not be needed? */
2087 raise_exception_err(env
, EXCP0D_GPF
, 0);
2090 if ((new_ss
& 3) != rpl
) {
2091 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2093 if (load_segment(&ss_e1
, &ss_e2
, new_ss
) != 0) {
2094 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2096 if (!(ss_e2
& DESC_S_MASK
) ||
2097 (ss_e2
& DESC_CS_MASK
) ||
2098 !(ss_e2
& DESC_W_MASK
)) {
2099 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2101 dpl
= (ss_e2
>> DESC_DPL_SHIFT
) & 3;
2103 raise_exception_err(env
, EXCP0D_GPF
, new_ss
& 0xfffc);
2105 if (!(ss_e2
& DESC_P_MASK
)) {
2106 raise_exception_err(env
, EXCP0B_NOSEG
, new_ss
& 0xfffc);
2108 cpu_x86_load_seg_cache(env
, R_SS
, new_ss
,
2109 get_seg_base(ss_e1
, ss_e2
),
2110 get_seg_limit(ss_e1
, ss_e2
),
2114 cpu_x86_load_seg_cache(env
, R_CS
, new_cs
,
2115 get_seg_base(e1
, e2
),
2116 get_seg_limit(e1
, e2
),
2118 cpu_x86_set_cpl(env
, rpl
);
2120 #ifdef TARGET_X86_64
2121 if (env
->hflags
& HF_CS64_MASK
) {
2126 sp_mask
= get_sp_mask(ss_e2
);
2129 /* validate data segments */
2130 validate_seg(R_ES
, rpl
);
2131 validate_seg(R_DS
, rpl
);
2132 validate_seg(R_FS
, rpl
);
2133 validate_seg(R_GS
, rpl
);
2137 SET_ESP(sp
, sp_mask
);
2140 /* NOTE: 'cpl' is the _old_ CPL */
2141 eflags_mask
= TF_MASK
| AC_MASK
| ID_MASK
| RF_MASK
| NT_MASK
;
2143 eflags_mask
|= IOPL_MASK
;
2145 iopl
= (env
->eflags
>> IOPL_SHIFT
) & 3;
2147 eflags_mask
|= IF_MASK
;
2150 eflags_mask
&= 0xffff;
2152 cpu_load_eflags(env
, new_eflags
, eflags_mask
);
2157 POPL(ssp
, sp
, sp_mask
, new_esp
);
2158 POPL(ssp
, sp
, sp_mask
, new_ss
);
2159 POPL(ssp
, sp
, sp_mask
, new_es
);
2160 POPL(ssp
, sp
, sp_mask
, new_ds
);
2161 POPL(ssp
, sp
, sp_mask
, new_fs
);
2162 POPL(ssp
, sp
, sp_mask
, new_gs
);
2164 /* modify processor state */
2165 cpu_load_eflags(env
, new_eflags
, TF_MASK
| AC_MASK
| ID_MASK
|
2166 IF_MASK
| IOPL_MASK
| VM_MASK
| NT_MASK
| VIF_MASK
|
2168 load_seg_vm(R_CS
, new_cs
& 0xffff);
2169 cpu_x86_set_cpl(env
, 3);
2170 load_seg_vm(R_SS
, new_ss
& 0xffff);
2171 load_seg_vm(R_ES
, new_es
& 0xffff);
2172 load_seg_vm(R_DS
, new_ds
& 0xffff);
2173 load_seg_vm(R_FS
, new_fs
& 0xffff);
2174 load_seg_vm(R_GS
, new_gs
& 0xffff);
2176 env
->eip
= new_eip
& 0xffff;
2180 void helper_iret_protected(int shift
, int next_eip
)
2182 int tss_selector
, type
;
2185 /* specific case for TSS */
2186 if (env
->eflags
& NT_MASK
) {
2187 #ifdef TARGET_X86_64
2188 if (env
->hflags
& HF_LMA_MASK
) {
2189 raise_exception_err(env
, EXCP0D_GPF
, 0);
2192 tss_selector
= lduw_kernel(env
->tr
.base
+ 0);
2193 if (tss_selector
& 4) {
2194 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2196 if (load_segment(&e1
, &e2
, tss_selector
) != 0) {
2197 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2199 type
= (e2
>> DESC_TYPE_SHIFT
) & 0x17;
2200 /* NOTE: we check both segment and busy TSS */
2202 raise_exception_err(env
, EXCP0A_TSS
, tss_selector
& 0xfffc);
2204 switch_tss(tss_selector
, e1
, e2
, SWITCH_TSS_IRET
, next_eip
);
2206 helper_ret_protected(shift
, 1, 0);
2208 env
->hflags2
&= ~HF2_NMI_MASK
;
2211 void helper_lret_protected(int shift
, int addend
)
2213 helper_ret_protected(shift
, 0, addend
);
2216 void helper_sysenter(void)
2218 if (env
->sysenter_cs
== 0) {
2219 raise_exception_err(env
, EXCP0D_GPF
, 0);
2221 env
->eflags
&= ~(VM_MASK
| IF_MASK
| RF_MASK
);
2222 cpu_x86_set_cpl(env
, 0);
2224 #ifdef TARGET_X86_64
2225 if (env
->hflags
& HF_LMA_MASK
) {
2226 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2228 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2230 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2235 cpu_x86_load_seg_cache(env
, R_CS
, env
->sysenter_cs
& 0xfffc,
2237 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2239 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2241 cpu_x86_load_seg_cache(env
, R_SS
, (env
->sysenter_cs
+ 8) & 0xfffc,
2243 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2245 DESC_W_MASK
| DESC_A_MASK
);
2246 ESP
= env
->sysenter_esp
;
2247 EIP
= env
->sysenter_eip
;
2250 void helper_sysexit(int dflag
)
2254 cpl
= env
->hflags
& HF_CPL_MASK
;
2255 if (env
->sysenter_cs
== 0 || cpl
!= 0) {
2256 raise_exception_err(env
, EXCP0D_GPF
, 0);
2258 cpu_x86_set_cpl(env
, 3);
2259 #ifdef TARGET_X86_64
2261 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 32) & 0xfffc) |
2263 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2264 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2265 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
|
2267 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 40) & 0xfffc) |
2269 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2270 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2271 DESC_W_MASK
| DESC_A_MASK
);
2275 cpu_x86_load_seg_cache(env
, R_CS
, ((env
->sysenter_cs
+ 16) & 0xfffc) |
2277 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2278 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2279 DESC_CS_MASK
| DESC_R_MASK
| DESC_A_MASK
);
2280 cpu_x86_load_seg_cache(env
, R_SS
, ((env
->sysenter_cs
+ 24) & 0xfffc) |
2282 DESC_G_MASK
| DESC_B_MASK
| DESC_P_MASK
|
2283 DESC_S_MASK
| (3 << DESC_DPL_SHIFT
) |
2284 DESC_W_MASK
| DESC_A_MASK
);
2290 target_ulong
helper_lsl(target_ulong selector1
)
2293 uint32_t e1
, e2
, eflags
, selector
;
2294 int rpl
, dpl
, cpl
, type
;
2296 selector
= selector1
& 0xffff;
2297 eflags
= helper_cc_compute_all(CC_OP
);
2298 if ((selector
& 0xfffc) == 0) {
2301 if (load_segment(&e1
, &e2
, selector
) != 0) {
2305 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2306 cpl
= env
->hflags
& HF_CPL_MASK
;
2307 if (e2
& DESC_S_MASK
) {
2308 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2311 if (dpl
< cpl
|| dpl
< rpl
) {
2316 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2327 if (dpl
< cpl
|| dpl
< rpl
) {
2329 CC_SRC
= eflags
& ~CC_Z
;
2333 limit
= get_seg_limit(e1
, e2
);
2334 CC_SRC
= eflags
| CC_Z
;
2338 target_ulong
helper_lar(target_ulong selector1
)
2340 uint32_t e1
, e2
, eflags
, selector
;
2341 int rpl
, dpl
, cpl
, type
;
2343 selector
= selector1
& 0xffff;
2344 eflags
= helper_cc_compute_all(CC_OP
);
2345 if ((selector
& 0xfffc) == 0) {
2348 if (load_segment(&e1
, &e2
, selector
) != 0) {
2352 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2353 cpl
= env
->hflags
& HF_CPL_MASK
;
2354 if (e2
& DESC_S_MASK
) {
2355 if ((e2
& DESC_CS_MASK
) && (e2
& DESC_C_MASK
)) {
2358 if (dpl
< cpl
|| dpl
< rpl
) {
2363 type
= (e2
>> DESC_TYPE_SHIFT
) & 0xf;
2377 if (dpl
< cpl
|| dpl
< rpl
) {
2379 CC_SRC
= eflags
& ~CC_Z
;
2383 CC_SRC
= eflags
| CC_Z
;
2384 return e2
& 0x00f0ff00;
2387 void helper_verr(target_ulong selector1
)
2389 uint32_t e1
, e2
, eflags
, selector
;
2392 selector
= selector1
& 0xffff;
2393 eflags
= helper_cc_compute_all(CC_OP
);
2394 if ((selector
& 0xfffc) == 0) {
2397 if (load_segment(&e1
, &e2
, selector
) != 0) {
2400 if (!(e2
& DESC_S_MASK
)) {
2404 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2405 cpl
= env
->hflags
& HF_CPL_MASK
;
2406 if (e2
& DESC_CS_MASK
) {
2407 if (!(e2
& DESC_R_MASK
)) {
2410 if (!(e2
& DESC_C_MASK
)) {
2411 if (dpl
< cpl
|| dpl
< rpl
) {
2416 if (dpl
< cpl
|| dpl
< rpl
) {
2418 CC_SRC
= eflags
& ~CC_Z
;
2422 CC_SRC
= eflags
| CC_Z
;
2425 void helper_verw(target_ulong selector1
)
2427 uint32_t e1
, e2
, eflags
, selector
;
2430 selector
= selector1
& 0xffff;
2431 eflags
= helper_cc_compute_all(CC_OP
);
2432 if ((selector
& 0xfffc) == 0) {
2435 if (load_segment(&e1
, &e2
, selector
) != 0) {
2438 if (!(e2
& DESC_S_MASK
)) {
2442 dpl
= (e2
>> DESC_DPL_SHIFT
) & 3;
2443 cpl
= env
->hflags
& HF_CPL_MASK
;
2444 if (e2
& DESC_CS_MASK
) {
2447 if (dpl
< cpl
|| dpl
< rpl
) {
2450 if (!(e2
& DESC_W_MASK
)) {
2452 CC_SRC
= eflags
& ~CC_Z
;
2456 CC_SRC
= eflags
| CC_Z
;
2459 #if defined(CONFIG_USER_ONLY)
2460 void cpu_x86_load_seg(CPUX86State
*s
, int seg_reg
, int selector
)
2462 CPUX86State
*saved_env
;
2466 if (!(env
->cr
[0] & CR0_PE_MASK
) || (env
->eflags
& VM_MASK
)) {
2468 cpu_x86_load_seg_cache(env
, seg_reg
, selector
,
2469 (selector
<< 4), 0xffff, 0);
2471 helper_load_seg(seg_reg
, selector
);