Merge tag 'pull-loongarch-20241016' of https://gitlab.com/gaosong/qemu into staging
[qemu/armbru.git] / target / i386 / tcg / seg_helper.c
blob3b8fd827e1f78dbfbf1320009ca751b9c96ce6b3
1 /*
2 * x86 segmentation related helpers:
3 * TSS, interrupts, system calls, jumps and call/task gates, descriptors
5 * Copyright (c) 2003 Fabrice Bellard
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "qemu/log.h"
24 #include "exec/helper-proto.h"
25 #include "exec/exec-all.h"
26 #include "exec/cpu_ldst.h"
27 #include "exec/log.h"
28 #include "helper-tcg.h"
29 #include "seg_helper.h"
30 #include "access.h"
32 #ifdef TARGET_X86_64
33 #define SET_ESP(val, sp_mask) \
34 do { \
35 if ((sp_mask) == 0xffff) { \
36 env->regs[R_ESP] = (env->regs[R_ESP] & ~0xffff) | \
37 ((val) & 0xffff); \
38 } else if ((sp_mask) == 0xffffffffLL) { \
39 env->regs[R_ESP] = (uint32_t)(val); \
40 } else { \
41 env->regs[R_ESP] = (val); \
42 } \
43 } while (0)
44 #else
45 #define SET_ESP(val, sp_mask) \
46 do { \
47 env->regs[R_ESP] = (env->regs[R_ESP] & ~(sp_mask)) | \
48 ((val) & (sp_mask)); \
49 } while (0)
50 #endif
52 /* XXX: use mmu_index to have proper DPL support */
53 typedef struct StackAccess
55 CPUX86State *env;
56 uintptr_t ra;
57 target_ulong ss_base;
58 target_ulong sp;
59 target_ulong sp_mask;
60 int mmu_index;
61 } StackAccess;
63 static void pushw(StackAccess *sa, uint16_t val)
65 sa->sp -= 2;
66 cpu_stw_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
67 val, sa->mmu_index, sa->ra);
70 static void pushl(StackAccess *sa, uint32_t val)
72 sa->sp -= 4;
73 cpu_stl_mmuidx_ra(sa->env, sa->ss_base + (sa->sp & sa->sp_mask),
74 val, sa->mmu_index, sa->ra);
77 static uint16_t popw(StackAccess *sa)
79 uint16_t ret = cpu_lduw_mmuidx_ra(sa->env,
80 sa->ss_base + (sa->sp & sa->sp_mask),
81 sa->mmu_index, sa->ra);
82 sa->sp += 2;
83 return ret;
86 static uint32_t popl(StackAccess *sa)
88 uint32_t ret = cpu_ldl_mmuidx_ra(sa->env,
89 sa->ss_base + (sa->sp & sa->sp_mask),
90 sa->mmu_index, sa->ra);
91 sa->sp += 4;
92 return ret;
95 int get_pg_mode(CPUX86State *env)
97 int pg_mode = 0;
98 if (!(env->cr[0] & CR0_PG_MASK)) {
99 return 0;
101 if (env->cr[0] & CR0_WP_MASK) {
102 pg_mode |= PG_MODE_WP;
104 if (env->cr[4] & CR4_PAE_MASK) {
105 pg_mode |= PG_MODE_PAE;
106 if (env->efer & MSR_EFER_NXE) {
107 pg_mode |= PG_MODE_NXE;
110 if (env->cr[4] & CR4_PSE_MASK) {
111 pg_mode |= PG_MODE_PSE;
113 if (env->cr[4] & CR4_SMEP_MASK) {
114 pg_mode |= PG_MODE_SMEP;
116 if (env->hflags & HF_LMA_MASK) {
117 pg_mode |= PG_MODE_LMA;
118 if (env->cr[4] & CR4_PKE_MASK) {
119 pg_mode |= PG_MODE_PKE;
121 if (env->cr[4] & CR4_PKS_MASK) {
122 pg_mode |= PG_MODE_PKS;
124 if (env->cr[4] & CR4_LA57_MASK) {
125 pg_mode |= PG_MODE_LA57;
128 return pg_mode;
131 /* return non zero if error */
132 static inline int load_segment_ra(CPUX86State *env, uint32_t *e1_ptr,
133 uint32_t *e2_ptr, int selector,
134 uintptr_t retaddr)
136 SegmentCache *dt;
137 int index;
138 target_ulong ptr;
140 if (selector & 0x4) {
141 dt = &env->ldt;
142 } else {
143 dt = &env->gdt;
145 index = selector & ~7;
146 if ((index + 7) > dt->limit) {
147 return -1;
149 ptr = dt->base + index;
150 *e1_ptr = cpu_ldl_kernel_ra(env, ptr, retaddr);
151 *e2_ptr = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
152 return 0;
155 static inline int load_segment(CPUX86State *env, uint32_t *e1_ptr,
156 uint32_t *e2_ptr, int selector)
158 return load_segment_ra(env, e1_ptr, e2_ptr, selector, 0);
161 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
163 unsigned int limit;
165 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
166 if (e2 & DESC_G_MASK) {
167 limit = (limit << 12) | 0xfff;
169 return limit;
172 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
174 return (e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000);
177 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1,
178 uint32_t e2)
180 sc->base = get_seg_base(e1, e2);
181 sc->limit = get_seg_limit(e1, e2);
182 sc->flags = e2;
185 /* init the segment cache in vm86 mode. */
186 static inline void load_seg_vm(CPUX86State *env, int seg, int selector)
188 selector &= 0xffff;
190 cpu_x86_load_seg_cache(env, seg, selector, (selector << 4), 0xffff,
191 DESC_P_MASK | DESC_S_MASK | DESC_W_MASK |
192 DESC_A_MASK | (3 << DESC_DPL_SHIFT));
195 static inline void get_ss_esp_from_tss(CPUX86State *env, uint32_t *ss_ptr,
196 uint32_t *esp_ptr, int dpl,
197 uintptr_t retaddr)
199 X86CPU *cpu = env_archcpu(env);
200 int type, index, shift;
202 #if 0
204 int i;
205 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
206 for (i = 0; i < env->tr.limit; i++) {
207 printf("%02x ", env->tr.base[i]);
208 if ((i & 7) == 7) {
209 printf("\n");
212 printf("\n");
214 #endif
216 if (!(env->tr.flags & DESC_P_MASK)) {
217 cpu_abort(CPU(cpu), "invalid tss");
219 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
220 if ((type & 7) != 1) {
221 cpu_abort(CPU(cpu), "invalid tss type");
223 shift = type >> 3;
224 index = (dpl * 4 + 2) << shift;
225 if (index + (4 << shift) - 1 > env->tr.limit) {
226 raise_exception_err_ra(env, EXCP0A_TSS, env->tr.selector & 0xfffc, retaddr);
228 if (shift == 0) {
229 *esp_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index, retaddr);
230 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 2, retaddr);
231 } else {
232 *esp_ptr = cpu_ldl_kernel_ra(env, env->tr.base + index, retaddr);
233 *ss_ptr = cpu_lduw_kernel_ra(env, env->tr.base + index + 4, retaddr);
237 static void tss_load_seg(CPUX86State *env, X86Seg seg_reg, int selector,
238 int cpl, uintptr_t retaddr)
240 uint32_t e1, e2;
241 int rpl, dpl;
243 if ((selector & 0xfffc) != 0) {
244 if (load_segment_ra(env, &e1, &e2, selector, retaddr) != 0) {
245 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
247 if (!(e2 & DESC_S_MASK)) {
248 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
250 rpl = selector & 3;
251 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
252 if (seg_reg == R_CS) {
253 if (!(e2 & DESC_CS_MASK)) {
254 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
256 if (dpl != rpl) {
257 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
259 } else if (seg_reg == R_SS) {
260 /* SS must be writable data */
261 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
262 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
264 if (dpl != cpl || dpl != rpl) {
265 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
267 } else {
268 /* not readable code */
269 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK)) {
270 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
272 /* if data or non conforming code, checks the rights */
273 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
274 if (dpl < cpl || dpl < rpl) {
275 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
279 if (!(e2 & DESC_P_MASK)) {
280 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, retaddr);
282 cpu_x86_load_seg_cache(env, seg_reg, selector,
283 get_seg_base(e1, e2),
284 get_seg_limit(e1, e2),
285 e2);
286 } else {
287 if (seg_reg == R_SS || seg_reg == R_CS) {
288 raise_exception_err_ra(env, EXCP0A_TSS, selector & 0xfffc, retaddr);
293 static void tss_set_busy(CPUX86State *env, int tss_selector, bool value,
294 uintptr_t retaddr)
296 target_ulong ptr = env->gdt.base + (tss_selector & ~7);
297 uint32_t e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
299 if (value) {
300 e2 |= DESC_TSS_BUSY_MASK;
301 } else {
302 e2 &= ~DESC_TSS_BUSY_MASK;
305 cpu_stl_kernel_ra(env, ptr + 4, e2, retaddr);
308 #define SWITCH_TSS_JMP 0
309 #define SWITCH_TSS_IRET 1
310 #define SWITCH_TSS_CALL 2
312 /* return 0 if switching to a 16-bit selector */
313 static int switch_tss_ra(CPUX86State *env, int tss_selector,
314 uint32_t e1, uint32_t e2, int source,
315 uint32_t next_eip, uintptr_t retaddr)
317 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, i;
318 target_ulong tss_base;
319 uint32_t new_regs[8], new_segs[6];
320 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
321 uint32_t old_eflags, eflags_mask;
322 SegmentCache *dt;
323 int mmu_index, index;
324 target_ulong ptr;
325 X86Access old, new;
327 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
328 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type,
329 source);
331 /* if task gate, we read the TSS segment and we load it */
332 if (type == 5) {
333 if (!(e2 & DESC_P_MASK)) {
334 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
336 tss_selector = e1 >> 16;
337 if (tss_selector & 4) {
338 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
340 if (load_segment_ra(env, &e1, &e2, tss_selector, retaddr) != 0) {
341 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
343 if (e2 & DESC_S_MASK) {
344 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
346 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
347 if ((type & 7) != 1) {
348 raise_exception_err_ra(env, EXCP0D_GPF, tss_selector & 0xfffc, retaddr);
352 if (!(e2 & DESC_P_MASK)) {
353 raise_exception_err_ra(env, EXCP0B_NOSEG, tss_selector & 0xfffc, retaddr);
356 if (type & 8) {
357 tss_limit_max = 103;
358 } else {
359 tss_limit_max = 43;
361 tss_limit = get_seg_limit(e1, e2);
362 tss_base = get_seg_base(e1, e2);
363 if ((tss_selector & 4) != 0 ||
364 tss_limit < tss_limit_max) {
365 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
367 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
368 if (old_type & 8) {
369 old_tss_limit_max = 103;
370 } else {
371 old_tss_limit_max = 43;
374 /* new TSS must be busy iff the source is an IRET instruction */
375 if (!!(e2 & DESC_TSS_BUSY_MASK) != (source == SWITCH_TSS_IRET)) {
376 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, retaddr);
379 /* X86Access avoids memory exceptions during the task switch */
380 mmu_index = cpu_mmu_index_kernel(env);
381 access_prepare_mmu(&old, env, env->tr.base, old_tss_limit_max + 1,
382 MMU_DATA_STORE, mmu_index, retaddr);
384 if (source == SWITCH_TSS_CALL) {
385 /* Probe for future write of parent task */
386 probe_access(env, tss_base, 2, MMU_DATA_STORE,
387 mmu_index, retaddr);
389 /* While true tss_limit may be larger, we don't access the iopb here. */
390 access_prepare_mmu(&new, env, tss_base, tss_limit_max + 1,
391 MMU_DATA_LOAD, mmu_index, retaddr);
393 /* save the current state in the old TSS */
394 old_eflags = cpu_compute_eflags(env);
395 if (old_type & 8) {
396 /* 32 bit */
397 access_stl(&old, env->tr.base + 0x20, next_eip);
398 access_stl(&old, env->tr.base + 0x24, old_eflags);
399 access_stl(&old, env->tr.base + (0x28 + 0 * 4), env->regs[R_EAX]);
400 access_stl(&old, env->tr.base + (0x28 + 1 * 4), env->regs[R_ECX]);
401 access_stl(&old, env->tr.base + (0x28 + 2 * 4), env->regs[R_EDX]);
402 access_stl(&old, env->tr.base + (0x28 + 3 * 4), env->regs[R_EBX]);
403 access_stl(&old, env->tr.base + (0x28 + 4 * 4), env->regs[R_ESP]);
404 access_stl(&old, env->tr.base + (0x28 + 5 * 4), env->regs[R_EBP]);
405 access_stl(&old, env->tr.base + (0x28 + 6 * 4), env->regs[R_ESI]);
406 access_stl(&old, env->tr.base + (0x28 + 7 * 4), env->regs[R_EDI]);
407 for (i = 0; i < 6; i++) {
408 access_stw(&old, env->tr.base + (0x48 + i * 4),
409 env->segs[i].selector);
411 } else {
412 /* 16 bit */
413 access_stw(&old, env->tr.base + 0x0e, next_eip);
414 access_stw(&old, env->tr.base + 0x10, old_eflags);
415 access_stw(&old, env->tr.base + (0x12 + 0 * 2), env->regs[R_EAX]);
416 access_stw(&old, env->tr.base + (0x12 + 1 * 2), env->regs[R_ECX]);
417 access_stw(&old, env->tr.base + (0x12 + 2 * 2), env->regs[R_EDX]);
418 access_stw(&old, env->tr.base + (0x12 + 3 * 2), env->regs[R_EBX]);
419 access_stw(&old, env->tr.base + (0x12 + 4 * 2), env->regs[R_ESP]);
420 access_stw(&old, env->tr.base + (0x12 + 5 * 2), env->regs[R_EBP]);
421 access_stw(&old, env->tr.base + (0x12 + 6 * 2), env->regs[R_ESI]);
422 access_stw(&old, env->tr.base + (0x12 + 7 * 2), env->regs[R_EDI]);
423 for (i = 0; i < 4; i++) {
424 access_stw(&old, env->tr.base + (0x22 + i * 2),
425 env->segs[i].selector);
429 /* read all the registers from the new TSS */
430 if (type & 8) {
431 /* 32 bit */
432 new_cr3 = access_ldl(&new, tss_base + 0x1c);
433 new_eip = access_ldl(&new, tss_base + 0x20);
434 new_eflags = access_ldl(&new, tss_base + 0x24);
435 for (i = 0; i < 8; i++) {
436 new_regs[i] = access_ldl(&new, tss_base + (0x28 + i * 4));
438 for (i = 0; i < 6; i++) {
439 new_segs[i] = access_ldw(&new, tss_base + (0x48 + i * 4));
441 new_ldt = access_ldw(&new, tss_base + 0x60);
442 new_trap = access_ldl(&new, tss_base + 0x64);
443 } else {
444 /* 16 bit */
445 new_cr3 = 0;
446 new_eip = access_ldw(&new, tss_base + 0x0e);
447 new_eflags = access_ldw(&new, tss_base + 0x10);
448 for (i = 0; i < 8; i++) {
449 new_regs[i] = access_ldw(&new, tss_base + (0x12 + i * 2));
451 for (i = 0; i < 4; i++) {
452 new_segs[i] = access_ldw(&new, tss_base + (0x22 + i * 2));
454 new_ldt = access_ldw(&new, tss_base + 0x2a);
455 new_segs[R_FS] = 0;
456 new_segs[R_GS] = 0;
457 new_trap = 0;
459 /* XXX: avoid a compiler warning, see
460 http://support.amd.com/us/Processor_TechDocs/24593.pdf
461 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
462 (void)new_trap;
464 /* clear busy bit (it is restartable) */
465 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
466 tss_set_busy(env, env->tr.selector, 0, retaddr);
469 if (source == SWITCH_TSS_IRET) {
470 old_eflags &= ~NT_MASK;
471 if (old_type & 8) {
472 access_stl(&old, env->tr.base + 0x24, old_eflags);
473 } else {
474 access_stw(&old, env->tr.base + 0x10, old_eflags);
478 if (source == SWITCH_TSS_CALL) {
480 * Thanks to the probe_access above, we know the first two
481 * bytes addressed by &new are writable too.
483 access_stw(&new, tss_base, env->tr.selector);
484 new_eflags |= NT_MASK;
487 /* set busy bit */
488 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
489 tss_set_busy(env, tss_selector, 1, retaddr);
492 /* set the new CPU state */
494 /* now if an exception occurs, it will occur in the next task context */
496 env->cr[0] |= CR0_TS_MASK;
497 env->hflags |= HF_TS_MASK;
498 env->tr.selector = tss_selector;
499 env->tr.base = tss_base;
500 env->tr.limit = tss_limit;
501 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
503 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
504 cpu_x86_update_cr3(env, new_cr3);
507 /* load all registers without an exception, then reload them with
508 possible exception */
509 env->eip = new_eip;
510 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
511 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
512 if (type & 8) {
513 cpu_load_eflags(env, new_eflags, eflags_mask);
514 for (i = 0; i < 8; i++) {
515 env->regs[i] = new_regs[i];
517 } else {
518 cpu_load_eflags(env, new_eflags, eflags_mask & 0xffff);
519 for (i = 0; i < 8; i++) {
520 env->regs[i] = (env->regs[i] & 0xffff0000) | new_regs[i];
523 if (new_eflags & VM_MASK) {
524 for (i = 0; i < 6; i++) {
525 load_seg_vm(env, i, new_segs[i]);
527 } else {
528 /* first just selectors as the rest may trigger exceptions */
529 for (i = 0; i < 6; i++) {
530 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
534 env->ldt.selector = new_ldt & ~4;
535 env->ldt.base = 0;
536 env->ldt.limit = 0;
537 env->ldt.flags = 0;
539 /* load the LDT */
540 if (new_ldt & 4) {
541 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
544 if ((new_ldt & 0xfffc) != 0) {
545 dt = &env->gdt;
546 index = new_ldt & ~7;
547 if ((index + 7) > dt->limit) {
548 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
550 ptr = dt->base + index;
551 e1 = cpu_ldl_kernel_ra(env, ptr, retaddr);
552 e2 = cpu_ldl_kernel_ra(env, ptr + 4, retaddr);
553 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
554 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
556 if (!(e2 & DESC_P_MASK)) {
557 raise_exception_err_ra(env, EXCP0A_TSS, new_ldt & 0xfffc, retaddr);
559 load_seg_cache_raw_dt(&env->ldt, e1, e2);
562 /* load the segments */
563 if (!(new_eflags & VM_MASK)) {
564 int cpl = new_segs[R_CS] & 3;
565 tss_load_seg(env, R_CS, new_segs[R_CS], cpl, retaddr);
566 tss_load_seg(env, R_SS, new_segs[R_SS], cpl, retaddr);
567 tss_load_seg(env, R_ES, new_segs[R_ES], cpl, retaddr);
568 tss_load_seg(env, R_DS, new_segs[R_DS], cpl, retaddr);
569 tss_load_seg(env, R_FS, new_segs[R_FS], cpl, retaddr);
570 tss_load_seg(env, R_GS, new_segs[R_GS], cpl, retaddr);
573 /* check that env->eip is in the CS segment limits */
574 if (new_eip > env->segs[R_CS].limit) {
575 /* XXX: different exception if CALL? */
576 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
579 #ifndef CONFIG_USER_ONLY
580 /* reset local breakpoints */
581 if (env->dr[7] & DR7_LOCAL_BP_MASK) {
582 cpu_x86_update_dr7(env, env->dr[7] & ~DR7_LOCAL_BP_MASK);
584 #endif
585 return type >> 3;
588 static int switch_tss(CPUX86State *env, int tss_selector,
589 uint32_t e1, uint32_t e2, int source,
590 uint32_t next_eip)
592 return switch_tss_ra(env, tss_selector, e1, e2, source, next_eip, 0);
595 static inline unsigned int get_sp_mask(unsigned int e2)
597 #ifdef TARGET_X86_64
598 if (e2 & DESC_L_MASK) {
599 return 0;
600 } else
601 #endif
602 if (e2 & DESC_B_MASK) {
603 return 0xffffffff;
604 } else {
605 return 0xffff;
609 static int exception_is_fault(int intno)
611 switch (intno) {
613 * #DB can be both fault- and trap-like, but it never sets RF=1
614 * in the RFLAGS value pushed on the stack.
616 case EXCP01_DB:
617 case EXCP03_INT3:
618 case EXCP04_INTO:
619 case EXCP08_DBLE:
620 case EXCP12_MCHK:
621 return 0;
623 /* Everything else including reserved exception is a fault. */
624 return 1;
627 int exception_has_error_code(int intno)
629 switch (intno) {
630 case 8:
631 case 10:
632 case 11:
633 case 12:
634 case 13:
635 case 14:
636 case 17:
637 return 1;
639 return 0;
642 /* protected mode interrupt */
643 static void do_interrupt_protected(CPUX86State *env, int intno, int is_int,
644 int error_code, unsigned int next_eip,
645 int is_hw)
647 SegmentCache *dt;
648 target_ulong ptr;
649 int type, dpl, selector, ss_dpl, cpl;
650 int has_error_code, new_stack, shift;
651 uint32_t e1, e2, offset, ss = 0, ss_e1 = 0, ss_e2 = 0;
652 uint32_t old_eip, eflags;
653 int vm86 = env->eflags & VM_MASK;
654 StackAccess sa;
655 bool set_rf;
657 has_error_code = 0;
658 if (!is_int && !is_hw) {
659 has_error_code = exception_has_error_code(intno);
661 if (is_int) {
662 old_eip = next_eip;
663 set_rf = false;
664 } else {
665 old_eip = env->eip;
666 set_rf = exception_is_fault(intno);
669 dt = &env->idt;
670 if (intno * 8 + 7 > dt->limit) {
671 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
673 ptr = dt->base + intno * 8;
674 e1 = cpu_ldl_kernel(env, ptr);
675 e2 = cpu_ldl_kernel(env, ptr + 4);
676 /* check gate type */
677 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
678 switch (type) {
679 case 5: /* task gate */
680 case 6: /* 286 interrupt gate */
681 case 7: /* 286 trap gate */
682 case 14: /* 386 interrupt gate */
683 case 15: /* 386 trap gate */
684 break;
685 default:
686 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
687 break;
689 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
690 cpl = env->hflags & HF_CPL_MASK;
691 /* check privilege if software int */
692 if (is_int && dpl < cpl) {
693 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
696 sa.env = env;
697 sa.ra = 0;
698 sa.mmu_index = cpu_mmu_index_kernel(env);
700 if (type == 5) {
701 /* task gate */
702 /* must do that check here to return the correct error code */
703 if (!(e2 & DESC_P_MASK)) {
704 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
706 shift = switch_tss(env, intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
707 if (has_error_code) {
708 /* push the error code */
709 if (env->segs[R_SS].flags & DESC_B_MASK) {
710 sa.sp_mask = 0xffffffff;
711 } else {
712 sa.sp_mask = 0xffff;
714 sa.sp = env->regs[R_ESP];
715 sa.ss_base = env->segs[R_SS].base;
716 if (shift) {
717 pushl(&sa, error_code);
718 } else {
719 pushw(&sa, error_code);
721 SET_ESP(sa.sp, sa.sp_mask);
723 return;
726 /* Otherwise, trap or interrupt gate */
728 /* check valid bit */
729 if (!(e2 & DESC_P_MASK)) {
730 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
732 selector = e1 >> 16;
733 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
734 if ((selector & 0xfffc) == 0) {
735 raise_exception_err(env, EXCP0D_GPF, 0);
737 if (load_segment(env, &e1, &e2, selector) != 0) {
738 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
740 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
741 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
743 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
744 if (dpl > cpl) {
745 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
747 if (!(e2 & DESC_P_MASK)) {
748 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
750 if (e2 & DESC_C_MASK) {
751 dpl = cpl;
753 if (dpl < cpl) {
754 /* to inner privilege */
755 uint32_t esp;
756 get_ss_esp_from_tss(env, &ss, &esp, dpl, 0);
757 if ((ss & 0xfffc) == 0) {
758 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
760 if ((ss & 3) != dpl) {
761 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
763 if (load_segment(env, &ss_e1, &ss_e2, ss) != 0) {
764 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
766 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
767 if (ss_dpl != dpl) {
768 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
770 if (!(ss_e2 & DESC_S_MASK) ||
771 (ss_e2 & DESC_CS_MASK) ||
772 !(ss_e2 & DESC_W_MASK)) {
773 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
775 if (!(ss_e2 & DESC_P_MASK)) {
776 raise_exception_err(env, EXCP0A_TSS, ss & 0xfffc);
778 new_stack = 1;
779 sa.sp = esp;
780 sa.sp_mask = get_sp_mask(ss_e2);
781 sa.ss_base = get_seg_base(ss_e1, ss_e2);
782 } else {
783 /* to same privilege */
784 if (vm86) {
785 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
787 new_stack = 0;
788 sa.sp = env->regs[R_ESP];
789 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
790 sa.ss_base = env->segs[R_SS].base;
793 shift = type >> 3;
795 #if 0
796 /* XXX: check that enough room is available */
797 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
798 if (vm86) {
799 push_size += 8;
801 push_size <<= shift;
802 #endif
803 eflags = cpu_compute_eflags(env);
805 * AMD states that code breakpoint #DBs clear RF=0, Intel leaves it
806 * as is. AMD behavior could be implemented in check_hw_breakpoints().
808 if (set_rf) {
809 eflags |= RF_MASK;
812 if (shift == 1) {
813 if (new_stack) {
814 if (vm86) {
815 pushl(&sa, env->segs[R_GS].selector);
816 pushl(&sa, env->segs[R_FS].selector);
817 pushl(&sa, env->segs[R_DS].selector);
818 pushl(&sa, env->segs[R_ES].selector);
820 pushl(&sa, env->segs[R_SS].selector);
821 pushl(&sa, env->regs[R_ESP]);
823 pushl(&sa, eflags);
824 pushl(&sa, env->segs[R_CS].selector);
825 pushl(&sa, old_eip);
826 if (has_error_code) {
827 pushl(&sa, error_code);
829 } else {
830 if (new_stack) {
831 if (vm86) {
832 pushw(&sa, env->segs[R_GS].selector);
833 pushw(&sa, env->segs[R_FS].selector);
834 pushw(&sa, env->segs[R_DS].selector);
835 pushw(&sa, env->segs[R_ES].selector);
837 pushw(&sa, env->segs[R_SS].selector);
838 pushw(&sa, env->regs[R_ESP]);
840 pushw(&sa, eflags);
841 pushw(&sa, env->segs[R_CS].selector);
842 pushw(&sa, old_eip);
843 if (has_error_code) {
844 pushw(&sa, error_code);
848 /* interrupt gate clear IF mask */
849 if ((type & 1) == 0) {
850 env->eflags &= ~IF_MASK;
852 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
854 if (new_stack) {
855 if (vm86) {
856 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
857 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
858 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
859 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
861 ss = (ss & ~3) | dpl;
862 cpu_x86_load_seg_cache(env, R_SS, ss, sa.ss_base,
863 get_seg_limit(ss_e1, ss_e2), ss_e2);
865 SET_ESP(sa.sp, sa.sp_mask);
867 selector = (selector & ~3) | dpl;
868 cpu_x86_load_seg_cache(env, R_CS, selector,
869 get_seg_base(e1, e2),
870 get_seg_limit(e1, e2),
871 e2);
872 env->eip = offset;
875 #ifdef TARGET_X86_64
877 static void pushq(StackAccess *sa, uint64_t val)
879 sa->sp -= 8;
880 cpu_stq_mmuidx_ra(sa->env, sa->sp, val, sa->mmu_index, sa->ra);
883 static uint64_t popq(StackAccess *sa)
885 uint64_t ret = cpu_ldq_mmuidx_ra(sa->env, sa->sp, sa->mmu_index, sa->ra);
886 sa->sp += 8;
887 return ret;
890 static inline target_ulong get_rsp_from_tss(CPUX86State *env, int level)
892 X86CPU *cpu = env_archcpu(env);
893 int index, pg_mode;
894 target_ulong rsp;
895 int32_t sext;
897 #if 0
898 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
899 env->tr.base, env->tr.limit);
900 #endif
902 if (!(env->tr.flags & DESC_P_MASK)) {
903 cpu_abort(CPU(cpu), "invalid tss");
905 index = 8 * level + 4;
906 if ((index + 7) > env->tr.limit) {
907 raise_exception_err(env, EXCP0A_TSS, env->tr.selector & 0xfffc);
910 rsp = cpu_ldq_kernel(env, env->tr.base + index);
912 /* test virtual address sign extension */
913 pg_mode = get_pg_mode(env);
914 sext = (int64_t)rsp >> (pg_mode & PG_MODE_LA57 ? 56 : 47);
915 if (sext != 0 && sext != -1) {
916 raise_exception_err(env, EXCP0C_STACK, 0);
919 return rsp;
922 /* 64 bit interrupt */
923 static void do_interrupt64(CPUX86State *env, int intno, int is_int,
924 int error_code, target_ulong next_eip, int is_hw)
926 SegmentCache *dt;
927 target_ulong ptr;
928 int type, dpl, selector, cpl, ist;
929 int has_error_code, new_stack;
930 uint32_t e1, e2, e3, eflags;
931 target_ulong old_eip, offset;
932 bool set_rf;
933 StackAccess sa;
935 has_error_code = 0;
936 if (!is_int && !is_hw) {
937 has_error_code = exception_has_error_code(intno);
939 if (is_int) {
940 old_eip = next_eip;
941 set_rf = false;
942 } else {
943 old_eip = env->eip;
944 set_rf = exception_is_fault(intno);
947 dt = &env->idt;
948 if (intno * 16 + 15 > dt->limit) {
949 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
951 ptr = dt->base + intno * 16;
952 e1 = cpu_ldl_kernel(env, ptr);
953 e2 = cpu_ldl_kernel(env, ptr + 4);
954 e3 = cpu_ldl_kernel(env, ptr + 8);
955 /* check gate type */
956 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
957 switch (type) {
958 case 14: /* 386 interrupt gate */
959 case 15: /* 386 trap gate */
960 break;
961 default:
962 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
963 break;
965 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
966 cpl = env->hflags & HF_CPL_MASK;
967 /* check privilege if software int */
968 if (is_int && dpl < cpl) {
969 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
971 /* check valid bit */
972 if (!(e2 & DESC_P_MASK)) {
973 raise_exception_err(env, EXCP0B_NOSEG, intno * 8 + 2);
975 selector = e1 >> 16;
976 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
977 ist = e2 & 7;
978 if ((selector & 0xfffc) == 0) {
979 raise_exception_err(env, EXCP0D_GPF, 0);
982 if (load_segment(env, &e1, &e2, selector) != 0) {
983 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
985 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
986 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
988 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
989 if (dpl > cpl) {
990 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
992 if (!(e2 & DESC_P_MASK)) {
993 raise_exception_err(env, EXCP0B_NOSEG, selector & 0xfffc);
995 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK)) {
996 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
998 if (e2 & DESC_C_MASK) {
999 dpl = cpl;
1002 sa.env = env;
1003 sa.ra = 0;
1004 sa.mmu_index = cpu_mmu_index_kernel(env);
1005 sa.sp_mask = -1;
1006 sa.ss_base = 0;
1007 if (dpl < cpl || ist != 0) {
1008 /* to inner privilege */
1009 new_stack = 1;
1010 sa.sp = get_rsp_from_tss(env, ist != 0 ? ist + 3 : dpl);
1011 } else {
1012 /* to same privilege */
1013 if (env->eflags & VM_MASK) {
1014 raise_exception_err(env, EXCP0D_GPF, selector & 0xfffc);
1016 new_stack = 0;
1017 sa.sp = env->regs[R_ESP];
1019 sa.sp &= ~0xfLL; /* align stack */
1021 /* See do_interrupt_protected. */
1022 eflags = cpu_compute_eflags(env);
1023 if (set_rf) {
1024 eflags |= RF_MASK;
1027 pushq(&sa, env->segs[R_SS].selector);
1028 pushq(&sa, env->regs[R_ESP]);
1029 pushq(&sa, eflags);
1030 pushq(&sa, env->segs[R_CS].selector);
1031 pushq(&sa, old_eip);
1032 if (has_error_code) {
1033 pushq(&sa, error_code);
1036 /* interrupt gate clear IF mask */
1037 if ((type & 1) == 0) {
1038 env->eflags &= ~IF_MASK;
1040 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1042 if (new_stack) {
1043 uint32_t ss = 0 | dpl; /* SS = NULL selector with RPL = new CPL */
1044 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, dpl << DESC_DPL_SHIFT);
1046 env->regs[R_ESP] = sa.sp;
1048 selector = (selector & ~3) | dpl;
1049 cpu_x86_load_seg_cache(env, R_CS, selector,
1050 get_seg_base(e1, e2),
1051 get_seg_limit(e1, e2),
1052 e2);
1053 env->eip = offset;
1055 #endif /* TARGET_X86_64 */
1057 void helper_sysret(CPUX86State *env, int dflag)
1059 int cpl, selector;
1061 if (!(env->efer & MSR_EFER_SCE)) {
1062 raise_exception_err_ra(env, EXCP06_ILLOP, 0, GETPC());
1064 cpl = env->hflags & HF_CPL_MASK;
1065 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1066 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1068 selector = (env->star >> 48) & 0xffff;
1069 #ifdef TARGET_X86_64
1070 if (env->hflags & HF_LMA_MASK) {
1071 cpu_load_eflags(env, (uint32_t)(env->regs[11]), TF_MASK | AC_MASK
1072 | ID_MASK | IF_MASK | IOPL_MASK | VM_MASK | RF_MASK |
1073 NT_MASK);
1074 if (dflag == 2) {
1075 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1076 0, 0xffffffff,
1077 DESC_G_MASK | DESC_P_MASK |
1078 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1079 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1080 DESC_L_MASK);
1081 env->eip = env->regs[R_ECX];
1082 } else {
1083 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1084 0, 0xffffffff,
1085 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1086 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1087 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1088 env->eip = (uint32_t)env->regs[R_ECX];
1090 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1091 0, 0xffffffff,
1092 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1093 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1094 DESC_W_MASK | DESC_A_MASK);
1095 } else
1096 #endif
1098 env->eflags |= IF_MASK;
1099 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1100 0, 0xffffffff,
1101 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1102 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1103 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1104 env->eip = (uint32_t)env->regs[R_ECX];
1105 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) | 3,
1106 0, 0xffffffff,
1107 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1108 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1109 DESC_W_MASK | DESC_A_MASK);
1113 /* real mode interrupt */
1114 static void do_interrupt_real(CPUX86State *env, int intno, int is_int,
1115 int error_code, unsigned int next_eip)
1117 SegmentCache *dt;
1118 target_ulong ptr;
1119 int selector;
1120 uint32_t offset;
1121 uint32_t old_cs, old_eip;
1122 StackAccess sa;
1124 /* real mode (simpler!) */
1125 dt = &env->idt;
1126 if (intno * 4 + 3 > dt->limit) {
1127 raise_exception_err(env, EXCP0D_GPF, intno * 8 + 2);
1129 ptr = dt->base + intno * 4;
1130 offset = cpu_lduw_kernel(env, ptr);
1131 selector = cpu_lduw_kernel(env, ptr + 2);
1133 sa.env = env;
1134 sa.ra = 0;
1135 sa.sp = env->regs[R_ESP];
1136 sa.sp_mask = 0xffff;
1137 sa.ss_base = env->segs[R_SS].base;
1138 sa.mmu_index = cpu_mmu_index_kernel(env);
1140 if (is_int) {
1141 old_eip = next_eip;
1142 } else {
1143 old_eip = env->eip;
1145 old_cs = env->segs[R_CS].selector;
1146 /* XXX: use SS segment size? */
1147 pushw(&sa, cpu_compute_eflags(env));
1148 pushw(&sa, old_cs);
1149 pushw(&sa, old_eip);
1151 /* update processor state */
1152 SET_ESP(sa.sp, sa.sp_mask);
1153 env->eip = offset;
1154 env->segs[R_CS].selector = selector;
1155 env->segs[R_CS].base = (selector << 4);
1156 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1160 * Begin execution of an interruption. is_int is TRUE if coming from
1161 * the int instruction. next_eip is the env->eip value AFTER the interrupt
1162 * instruction. It is only relevant if is_int is TRUE.
1164 void do_interrupt_all(X86CPU *cpu, int intno, int is_int,
1165 int error_code, target_ulong next_eip, int is_hw)
1167 CPUX86State *env = &cpu->env;
1169 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1170 if ((env->cr[0] & CR0_PE_MASK)) {
1171 static int count;
1173 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx
1174 " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1175 count, intno, error_code, is_int,
1176 env->hflags & HF_CPL_MASK,
1177 env->segs[R_CS].selector, env->eip,
1178 (int)env->segs[R_CS].base + env->eip,
1179 env->segs[R_SS].selector, env->regs[R_ESP]);
1180 if (intno == 0x0e) {
1181 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1182 } else {
1183 qemu_log(" env->regs[R_EAX]=" TARGET_FMT_lx, env->regs[R_EAX]);
1185 qemu_log("\n");
1186 log_cpu_state(CPU(cpu), CPU_DUMP_CCOP);
1187 #if 0
1189 int i;
1190 target_ulong ptr;
1192 qemu_log(" code=");
1193 ptr = env->segs[R_CS].base + env->eip;
1194 for (i = 0; i < 16; i++) {
1195 qemu_log(" %02x", ldub(ptr + i));
1197 qemu_log("\n");
1199 #endif
1200 count++;
1203 if (env->cr[0] & CR0_PE_MASK) {
1204 #if !defined(CONFIG_USER_ONLY)
1205 if (env->hflags & HF_GUEST_MASK) {
1206 handle_even_inj(env, intno, is_int, error_code, is_hw, 0);
1208 #endif
1209 #ifdef TARGET_X86_64
1210 if (env->hflags & HF_LMA_MASK) {
1211 do_interrupt64(env, intno, is_int, error_code, next_eip, is_hw);
1212 } else
1213 #endif
1215 do_interrupt_protected(env, intno, is_int, error_code, next_eip,
1216 is_hw);
1218 } else {
1219 #if !defined(CONFIG_USER_ONLY)
1220 if (env->hflags & HF_GUEST_MASK) {
1221 handle_even_inj(env, intno, is_int, error_code, is_hw, 1);
1223 #endif
1224 do_interrupt_real(env, intno, is_int, error_code, next_eip);
1227 #if !defined(CONFIG_USER_ONLY)
1228 if (env->hflags & HF_GUEST_MASK) {
1229 CPUState *cs = CPU(cpu);
1230 uint32_t event_inj = x86_ldl_phys(cs, env->vm_vmcb +
1231 offsetof(struct vmcb,
1232 control.event_inj));
1234 x86_stl_phys(cs,
1235 env->vm_vmcb + offsetof(struct vmcb, control.event_inj),
1236 event_inj & ~SVM_EVTINJ_VALID);
1238 #endif
1241 void do_interrupt_x86_hardirq(CPUX86State *env, int intno, int is_hw)
1243 do_interrupt_all(env_archcpu(env), intno, 0, 0, 0, is_hw);
1246 void helper_lldt(CPUX86State *env, int selector)
1248 SegmentCache *dt;
1249 uint32_t e1, e2;
1250 int index, entry_limit;
1251 target_ulong ptr;
1253 selector &= 0xffff;
1254 if ((selector & 0xfffc) == 0) {
1255 /* XXX: NULL selector case: invalid LDT */
1256 env->ldt.base = 0;
1257 env->ldt.limit = 0;
1258 } else {
1259 if (selector & 0x4) {
1260 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1262 dt = &env->gdt;
1263 index = selector & ~7;
1264 #ifdef TARGET_X86_64
1265 if (env->hflags & HF_LMA_MASK) {
1266 entry_limit = 15;
1267 } else
1268 #endif
1270 entry_limit = 7;
1272 if ((index + entry_limit) > dt->limit) {
1273 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1275 ptr = dt->base + index;
1276 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1277 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1278 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2) {
1279 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1281 if (!(e2 & DESC_P_MASK)) {
1282 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1284 #ifdef TARGET_X86_64
1285 if (env->hflags & HF_LMA_MASK) {
1286 uint32_t e3;
1288 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1289 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1290 env->ldt.base |= (target_ulong)e3 << 32;
1291 } else
1292 #endif
1294 load_seg_cache_raw_dt(&env->ldt, e1, e2);
1297 env->ldt.selector = selector;
1300 void helper_ltr(CPUX86State *env, int selector)
1302 SegmentCache *dt;
1303 uint32_t e1, e2;
1304 int index, type, entry_limit;
1305 target_ulong ptr;
1307 selector &= 0xffff;
1308 if ((selector & 0xfffc) == 0) {
1309 /* NULL selector case: invalid TR */
1310 env->tr.base = 0;
1311 env->tr.limit = 0;
1312 env->tr.flags = 0;
1313 } else {
1314 if (selector & 0x4) {
1315 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1317 dt = &env->gdt;
1318 index = selector & ~7;
1319 #ifdef TARGET_X86_64
1320 if (env->hflags & HF_LMA_MASK) {
1321 entry_limit = 15;
1322 } else
1323 #endif
1325 entry_limit = 7;
1327 if ((index + entry_limit) > dt->limit) {
1328 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1330 ptr = dt->base + index;
1331 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1332 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1333 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1334 if ((e2 & DESC_S_MASK) ||
1335 (type != 1 && type != 9)) {
1336 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1338 if (!(e2 & DESC_P_MASK)) {
1339 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1341 #ifdef TARGET_X86_64
1342 if (env->hflags & HF_LMA_MASK) {
1343 uint32_t e3, e4;
1345 e3 = cpu_ldl_kernel_ra(env, ptr + 8, GETPC());
1346 e4 = cpu_ldl_kernel_ra(env, ptr + 12, GETPC());
1347 if ((e4 >> DESC_TYPE_SHIFT) & 0xf) {
1348 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1350 load_seg_cache_raw_dt(&env->tr, e1, e2);
1351 env->tr.base |= (target_ulong)e3 << 32;
1352 } else
1353 #endif
1355 load_seg_cache_raw_dt(&env->tr, e1, e2);
1357 e2 |= DESC_TSS_BUSY_MASK;
1358 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1360 env->tr.selector = selector;
1363 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
1364 void helper_load_seg(CPUX86State *env, int seg_reg, int selector)
1366 uint32_t e1, e2;
1367 int cpl, dpl, rpl;
1368 SegmentCache *dt;
1369 int index;
1370 target_ulong ptr;
1372 selector &= 0xffff;
1373 cpl = env->hflags & HF_CPL_MASK;
1374 if ((selector & 0xfffc) == 0) {
1375 /* null selector case */
1376 if (seg_reg == R_SS
1377 #ifdef TARGET_X86_64
1378 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
1379 #endif
1381 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1383 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
1384 } else {
1386 if (selector & 0x4) {
1387 dt = &env->ldt;
1388 } else {
1389 dt = &env->gdt;
1391 index = selector & ~7;
1392 if ((index + 7) > dt->limit) {
1393 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1395 ptr = dt->base + index;
1396 e1 = cpu_ldl_kernel_ra(env, ptr, GETPC());
1397 e2 = cpu_ldl_kernel_ra(env, ptr + 4, GETPC());
1399 if (!(e2 & DESC_S_MASK)) {
1400 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1402 rpl = selector & 3;
1403 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1404 if (seg_reg == R_SS) {
1405 /* must be writable segment */
1406 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK)) {
1407 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1409 if (rpl != cpl || dpl != cpl) {
1410 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1412 } else {
1413 /* must be readable segment */
1414 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK) {
1415 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1418 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1419 /* if not conforming code, test rights */
1420 if (dpl < cpl || dpl < rpl) {
1421 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1426 if (!(e2 & DESC_P_MASK)) {
1427 if (seg_reg == R_SS) {
1428 raise_exception_err_ra(env, EXCP0C_STACK, selector & 0xfffc, GETPC());
1429 } else {
1430 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1434 /* set the access bit if not already set */
1435 if (!(e2 & DESC_A_MASK)) {
1436 e2 |= DESC_A_MASK;
1437 cpu_stl_kernel_ra(env, ptr + 4, e2, GETPC());
1440 cpu_x86_load_seg_cache(env, seg_reg, selector,
1441 get_seg_base(e1, e2),
1442 get_seg_limit(e1, e2),
1443 e2);
1444 #if 0
1445 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
1446 selector, (unsigned long)sc->base, sc->limit, sc->flags);
1447 #endif
1451 /* protected mode jump */
1452 void helper_ljmp_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1453 target_ulong next_eip)
1455 int gate_cs, type;
1456 uint32_t e1, e2, cpl, dpl, rpl, limit;
1458 if ((new_cs & 0xfffc) == 0) {
1459 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1461 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1462 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1464 cpl = env->hflags & HF_CPL_MASK;
1465 if (e2 & DESC_S_MASK) {
1466 if (!(e2 & DESC_CS_MASK)) {
1467 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1469 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1470 if (e2 & DESC_C_MASK) {
1471 /* conforming code segment */
1472 if (dpl > cpl) {
1473 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1475 } else {
1476 /* non conforming code segment */
1477 rpl = new_cs & 3;
1478 if (rpl > cpl) {
1479 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1481 if (dpl != cpl) {
1482 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1485 if (!(e2 & DESC_P_MASK)) {
1486 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1488 limit = get_seg_limit(e1, e2);
1489 if (new_eip > limit &&
1490 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1491 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1493 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1494 get_seg_base(e1, e2), limit, e2);
1495 env->eip = new_eip;
1496 } else {
1497 /* jump to call or task gate */
1498 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1499 rpl = new_cs & 3;
1500 cpl = env->hflags & HF_CPL_MASK;
1501 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
1503 #ifdef TARGET_X86_64
1504 if (env->efer & MSR_EFER_LMA) {
1505 if (type != 12) {
1506 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1509 #endif
1510 switch (type) {
1511 case 1: /* 286 TSS */
1512 case 9: /* 386 TSS */
1513 case 5: /* task gate */
1514 if (dpl < cpl || dpl < rpl) {
1515 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1517 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_JMP, next_eip, GETPC());
1518 break;
1519 case 4: /* 286 call gate */
1520 case 12: /* 386 call gate */
1521 if ((dpl < cpl) || (dpl < rpl)) {
1522 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1524 if (!(e2 & DESC_P_MASK)) {
1525 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1527 gate_cs = e1 >> 16;
1528 new_eip = (e1 & 0xffff);
1529 if (type == 12) {
1530 new_eip |= (e2 & 0xffff0000);
1533 #ifdef TARGET_X86_64
1534 if (env->efer & MSR_EFER_LMA) {
1535 /* load the upper 8 bytes of the 64-bit call gate */
1536 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1537 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1538 GETPC());
1540 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1541 if (type != 0) {
1542 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1543 GETPC());
1545 new_eip |= ((target_ulong)e1) << 32;
1547 #endif
1549 if (load_segment_ra(env, &e1, &e2, gate_cs, GETPC()) != 0) {
1550 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1552 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1553 /* must be code segment */
1554 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
1555 (DESC_S_MASK | DESC_CS_MASK))) {
1556 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1558 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
1559 (!(e2 & DESC_C_MASK) && (dpl != cpl))) {
1560 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1562 #ifdef TARGET_X86_64
1563 if (env->efer & MSR_EFER_LMA) {
1564 if (!(e2 & DESC_L_MASK)) {
1565 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1567 if (e2 & DESC_B_MASK) {
1568 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1571 #endif
1572 if (!(e2 & DESC_P_MASK)) {
1573 raise_exception_err_ra(env, EXCP0D_GPF, gate_cs & 0xfffc, GETPC());
1575 limit = get_seg_limit(e1, e2);
1576 if (new_eip > limit &&
1577 (!(env->hflags & HF_LMA_MASK) || !(e2 & DESC_L_MASK))) {
1578 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1580 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
1581 get_seg_base(e1, e2), limit, e2);
1582 env->eip = new_eip;
1583 break;
1584 default:
1585 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1586 break;
1591 /* real mode call */
1592 void helper_lcall_real(CPUX86State *env, uint32_t new_cs, uint32_t new_eip,
1593 int shift, uint32_t next_eip)
1595 StackAccess sa;
1597 sa.env = env;
1598 sa.ra = GETPC();
1599 sa.sp = env->regs[R_ESP];
1600 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1601 sa.ss_base = env->segs[R_SS].base;
1602 sa.mmu_index = cpu_mmu_index_kernel(env);
1604 if (shift) {
1605 pushl(&sa, env->segs[R_CS].selector);
1606 pushl(&sa, next_eip);
1607 } else {
1608 pushw(&sa, env->segs[R_CS].selector);
1609 pushw(&sa, next_eip);
1612 SET_ESP(sa.sp, sa.sp_mask);
1613 env->eip = new_eip;
1614 env->segs[R_CS].selector = new_cs;
1615 env->segs[R_CS].base = (new_cs << 4);
1618 /* protected mode call */
1619 void helper_lcall_protected(CPUX86State *env, int new_cs, target_ulong new_eip,
1620 int shift, target_ulong next_eip)
1622 int new_stack, i;
1623 uint32_t e1, e2, cpl, dpl, rpl, selector, param_count;
1624 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, type, ss_dpl;
1625 uint32_t val, limit, old_sp_mask;
1626 target_ulong old_ssp, offset;
1627 StackAccess sa;
1629 LOG_PCALL("lcall %04x:" TARGET_FMT_lx " s=%d\n", new_cs, new_eip, shift);
1630 LOG_PCALL_STATE(env_cpu(env));
1631 if ((new_cs & 0xfffc) == 0) {
1632 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1634 if (load_segment_ra(env, &e1, &e2, new_cs, GETPC()) != 0) {
1635 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1637 cpl = env->hflags & HF_CPL_MASK;
1638 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
1640 sa.env = env;
1641 sa.ra = GETPC();
1642 sa.mmu_index = cpu_mmu_index_kernel(env);
1644 if (e2 & DESC_S_MASK) {
1645 if (!(e2 & DESC_CS_MASK)) {
1646 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1648 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1649 if (e2 & DESC_C_MASK) {
1650 /* conforming code segment */
1651 if (dpl > cpl) {
1652 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1654 } else {
1655 /* non conforming code segment */
1656 rpl = new_cs & 3;
1657 if (rpl > cpl) {
1658 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1660 if (dpl != cpl) {
1661 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1664 if (!(e2 & DESC_P_MASK)) {
1665 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1668 #ifdef TARGET_X86_64
1669 /* XXX: check 16/32 bit cases in long mode */
1670 if (shift == 2) {
1671 /* 64 bit case */
1672 sa.sp = env->regs[R_ESP];
1673 sa.sp_mask = -1;
1674 sa.ss_base = 0;
1675 pushq(&sa, env->segs[R_CS].selector);
1676 pushq(&sa, next_eip);
1677 /* from this point, not restartable */
1678 env->regs[R_ESP] = sa.sp;
1679 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1680 get_seg_base(e1, e2),
1681 get_seg_limit(e1, e2), e2);
1682 env->eip = new_eip;
1683 } else
1684 #endif
1686 sa.sp = env->regs[R_ESP];
1687 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1688 sa.ss_base = env->segs[R_SS].base;
1689 if (shift) {
1690 pushl(&sa, env->segs[R_CS].selector);
1691 pushl(&sa, next_eip);
1692 } else {
1693 pushw(&sa, env->segs[R_CS].selector);
1694 pushw(&sa, next_eip);
1697 limit = get_seg_limit(e1, e2);
1698 if (new_eip > limit) {
1699 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1701 /* from this point, not restartable */
1702 SET_ESP(sa.sp, sa.sp_mask);
1703 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
1704 get_seg_base(e1, e2), limit, e2);
1705 env->eip = new_eip;
1707 } else {
1708 /* check gate type */
1709 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1710 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1711 rpl = new_cs & 3;
1713 #ifdef TARGET_X86_64
1714 if (env->efer & MSR_EFER_LMA) {
1715 if (type != 12) {
1716 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1719 #endif
1721 switch (type) {
1722 case 1: /* available 286 TSS */
1723 case 9: /* available 386 TSS */
1724 case 5: /* task gate */
1725 if (dpl < cpl || dpl < rpl) {
1726 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1728 switch_tss_ra(env, new_cs, e1, e2, SWITCH_TSS_CALL, next_eip, GETPC());
1729 return;
1730 case 4: /* 286 call gate */
1731 case 12: /* 386 call gate */
1732 break;
1733 default:
1734 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1735 break;
1737 shift = type >> 3;
1739 if (dpl < cpl || dpl < rpl) {
1740 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, GETPC());
1742 /* check valid bit */
1743 if (!(e2 & DESC_P_MASK)) {
1744 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, GETPC());
1746 selector = e1 >> 16;
1747 param_count = e2 & 0x1f;
1748 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1749 #ifdef TARGET_X86_64
1750 if (env->efer & MSR_EFER_LMA) {
1751 /* load the upper 8 bytes of the 64-bit call gate */
1752 if (load_segment_ra(env, &e1, &e2, new_cs + 8, GETPC())) {
1753 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1754 GETPC());
1756 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1757 if (type != 0) {
1758 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc,
1759 GETPC());
1761 offset |= ((target_ulong)e1) << 32;
1763 #endif
1764 if ((selector & 0xfffc) == 0) {
1765 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
1768 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
1769 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1771 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK))) {
1772 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1774 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1775 if (dpl > cpl) {
1776 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1778 #ifdef TARGET_X86_64
1779 if (env->efer & MSR_EFER_LMA) {
1780 if (!(e2 & DESC_L_MASK)) {
1781 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1783 if (e2 & DESC_B_MASK) {
1784 raise_exception_err_ra(env, EXCP0D_GPF, selector & 0xfffc, GETPC());
1786 shift++;
1788 #endif
1789 if (!(e2 & DESC_P_MASK)) {
1790 raise_exception_err_ra(env, EXCP0B_NOSEG, selector & 0xfffc, GETPC());
1793 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
1794 /* to inner privilege */
1795 #ifdef TARGET_X86_64
1796 if (shift == 2) {
1797 ss = dpl; /* SS = NULL selector with RPL = new CPL */
1798 new_stack = 1;
1799 sa.sp = get_rsp_from_tss(env, dpl);
1800 sa.sp_mask = -1;
1801 sa.ss_base = 0; /* SS base is always zero in IA-32e mode */
1802 LOG_PCALL("new ss:rsp=%04x:%016llx env->regs[R_ESP]="
1803 TARGET_FMT_lx "\n", ss, sa.sp, env->regs[R_ESP]);
1804 } else
1805 #endif
1807 uint32_t sp32;
1808 get_ss_esp_from_tss(env, &ss, &sp32, dpl, GETPC());
1809 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d env->regs[R_ESP]="
1810 TARGET_FMT_lx "\n", ss, sp32, param_count,
1811 env->regs[R_ESP]);
1812 if ((ss & 0xfffc) == 0) {
1813 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1815 if ((ss & 3) != dpl) {
1816 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1818 if (load_segment_ra(env, &ss_e1, &ss_e2, ss, GETPC()) != 0) {
1819 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1821 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
1822 if (ss_dpl != dpl) {
1823 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1825 if (!(ss_e2 & DESC_S_MASK) ||
1826 (ss_e2 & DESC_CS_MASK) ||
1827 !(ss_e2 & DESC_W_MASK)) {
1828 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1830 if (!(ss_e2 & DESC_P_MASK)) {
1831 raise_exception_err_ra(env, EXCP0A_TSS, ss & 0xfffc, GETPC());
1834 sa.sp = sp32;
1835 sa.sp_mask = get_sp_mask(ss_e2);
1836 sa.ss_base = get_seg_base(ss_e1, ss_e2);
1839 /* push_size = ((param_count * 2) + 8) << shift; */
1840 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
1841 old_ssp = env->segs[R_SS].base;
1843 #ifdef TARGET_X86_64
1844 if (shift == 2) {
1845 /* XXX: verify if new stack address is canonical */
1846 pushq(&sa, env->segs[R_SS].selector);
1847 pushq(&sa, env->regs[R_ESP]);
1848 /* parameters aren't supported for 64-bit call gates */
1849 } else
1850 #endif
1851 if (shift == 1) {
1852 pushl(&sa, env->segs[R_SS].selector);
1853 pushl(&sa, env->regs[R_ESP]);
1854 for (i = param_count - 1; i >= 0; i--) {
1855 val = cpu_ldl_data_ra(env,
1856 old_ssp + ((env->regs[R_ESP] + i * 4) & old_sp_mask),
1857 GETPC());
1858 pushl(&sa, val);
1860 } else {
1861 pushw(&sa, env->segs[R_SS].selector);
1862 pushw(&sa, env->regs[R_ESP]);
1863 for (i = param_count - 1; i >= 0; i--) {
1864 val = cpu_lduw_data_ra(env,
1865 old_ssp + ((env->regs[R_ESP] + i * 2) & old_sp_mask),
1866 GETPC());
1867 pushw(&sa, val);
1870 new_stack = 1;
1871 } else {
1872 /* to same privilege */
1873 sa.sp = env->regs[R_ESP];
1874 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
1875 sa.ss_base = env->segs[R_SS].base;
1876 /* push_size = (4 << shift); */
1877 new_stack = 0;
1880 #ifdef TARGET_X86_64
1881 if (shift == 2) {
1882 pushq(&sa, env->segs[R_CS].selector);
1883 pushq(&sa, next_eip);
1884 } else
1885 #endif
1886 if (shift == 1) {
1887 pushl(&sa, env->segs[R_CS].selector);
1888 pushl(&sa, next_eip);
1889 } else {
1890 pushw(&sa, env->segs[R_CS].selector);
1891 pushw(&sa, next_eip);
1894 /* from this point, not restartable */
1896 if (new_stack) {
1897 #ifdef TARGET_X86_64
1898 if (shift == 2) {
1899 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1900 } else
1901 #endif
1903 ss = (ss & ~3) | dpl;
1904 cpu_x86_load_seg_cache(env, R_SS, ss,
1905 sa.ss_base,
1906 get_seg_limit(ss_e1, ss_e2),
1907 ss_e2);
1911 selector = (selector & ~3) | dpl;
1912 cpu_x86_load_seg_cache(env, R_CS, selector,
1913 get_seg_base(e1, e2),
1914 get_seg_limit(e1, e2),
1915 e2);
1916 SET_ESP(sa.sp, sa.sp_mask);
1917 env->eip = offset;
1921 /* real and vm86 mode iret */
1922 void helper_iret_real(CPUX86State *env, int shift)
1924 uint32_t new_cs, new_eip, new_eflags;
1925 int eflags_mask;
1926 StackAccess sa;
1928 sa.env = env;
1929 sa.ra = GETPC();
1930 sa.mmu_index = x86_mmu_index_pl(env, 0);
1931 sa.sp_mask = 0xffff; /* XXXX: use SS segment size? */
1932 sa.sp = env->regs[R_ESP];
1933 sa.ss_base = env->segs[R_SS].base;
1935 if (shift == 1) {
1936 /* 32 bits */
1937 new_eip = popl(&sa);
1938 new_cs = popl(&sa) & 0xffff;
1939 new_eflags = popl(&sa);
1940 } else {
1941 /* 16 bits */
1942 new_eip = popw(&sa);
1943 new_cs = popw(&sa);
1944 new_eflags = popw(&sa);
1946 SET_ESP(sa.sp, sa.sp_mask);
1947 env->segs[R_CS].selector = new_cs;
1948 env->segs[R_CS].base = (new_cs << 4);
1949 env->eip = new_eip;
1950 if (env->eflags & VM_MASK) {
1951 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK |
1952 NT_MASK;
1953 } else {
1954 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK |
1955 RF_MASK | NT_MASK;
1957 if (shift == 0) {
1958 eflags_mask &= 0xffff;
1960 cpu_load_eflags(env, new_eflags, eflags_mask);
1961 env->hflags2 &= ~HF2_NMI_MASK;
1964 static inline void validate_seg(CPUX86State *env, X86Seg seg_reg, int cpl)
1966 int dpl;
1967 uint32_t e2;
1969 /* XXX: on x86_64, we do not want to nullify FS and GS because
1970 they may still contain a valid base. I would be interested to
1971 know how a real x86_64 CPU behaves */
1972 if ((seg_reg == R_FS || seg_reg == R_GS) &&
1973 (env->segs[seg_reg].selector & 0xfffc) == 0) {
1974 return;
1977 e2 = env->segs[seg_reg].flags;
1978 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1979 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
1980 /* data or non conforming code segment */
1981 if (dpl < cpl) {
1982 cpu_x86_load_seg_cache(env, seg_reg, 0,
1983 env->segs[seg_reg].base,
1984 env->segs[seg_reg].limit,
1985 env->segs[seg_reg].flags & ~DESC_P_MASK);
1990 /* protected mode iret */
1991 static inline void helper_ret_protected(CPUX86State *env, int shift,
1992 int is_iret, int addend,
1993 uintptr_t retaddr)
1995 uint32_t new_cs, new_eflags, new_ss;
1996 uint32_t new_es, new_ds, new_fs, new_gs;
1997 uint32_t e1, e2, ss_e1, ss_e2;
1998 int cpl, dpl, rpl, eflags_mask, iopl;
1999 target_ulong new_eip, new_esp;
2000 StackAccess sa;
2002 cpl = env->hflags & HF_CPL_MASK;
2004 sa.env = env;
2005 sa.ra = retaddr;
2006 sa.mmu_index = x86_mmu_index_pl(env, cpl);
2008 #ifdef TARGET_X86_64
2009 if (shift == 2) {
2010 sa.sp_mask = -1;
2011 } else
2012 #endif
2014 sa.sp_mask = get_sp_mask(env->segs[R_SS].flags);
2016 sa.sp = env->regs[R_ESP];
2017 sa.ss_base = env->segs[R_SS].base;
2018 new_eflags = 0; /* avoid warning */
2019 #ifdef TARGET_X86_64
2020 if (shift == 2) {
2021 new_eip = popq(&sa);
2022 new_cs = popq(&sa) & 0xffff;
2023 if (is_iret) {
2024 new_eflags = popq(&sa);
2026 } else
2027 #endif
2029 if (shift == 1) {
2030 /* 32 bits */
2031 new_eip = popl(&sa);
2032 new_cs = popl(&sa) & 0xffff;
2033 if (is_iret) {
2034 new_eflags = popl(&sa);
2035 if (new_eflags & VM_MASK) {
2036 goto return_to_vm86;
2039 } else {
2040 /* 16 bits */
2041 new_eip = popw(&sa);
2042 new_cs = popw(&sa);
2043 if (is_iret) {
2044 new_eflags = popw(&sa);
2048 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2049 new_cs, new_eip, shift, addend);
2050 LOG_PCALL_STATE(env_cpu(env));
2051 if ((new_cs & 0xfffc) == 0) {
2052 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2054 if (load_segment_ra(env, &e1, &e2, new_cs, retaddr) != 0) {
2055 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2057 if (!(e2 & DESC_S_MASK) ||
2058 !(e2 & DESC_CS_MASK)) {
2059 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2061 rpl = new_cs & 3;
2062 if (rpl < cpl) {
2063 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2065 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2066 if (e2 & DESC_C_MASK) {
2067 if (dpl > rpl) {
2068 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2070 } else {
2071 if (dpl != rpl) {
2072 raise_exception_err_ra(env, EXCP0D_GPF, new_cs & 0xfffc, retaddr);
2075 if (!(e2 & DESC_P_MASK)) {
2076 raise_exception_err_ra(env, EXCP0B_NOSEG, new_cs & 0xfffc, retaddr);
2079 sa.sp += addend;
2080 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2081 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2082 /* return to same privilege level */
2083 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2084 get_seg_base(e1, e2),
2085 get_seg_limit(e1, e2),
2086 e2);
2087 } else {
2088 /* return to different privilege level */
2089 #ifdef TARGET_X86_64
2090 if (shift == 2) {
2091 new_esp = popq(&sa);
2092 new_ss = popq(&sa) & 0xffff;
2093 } else
2094 #endif
2096 if (shift == 1) {
2097 /* 32 bits */
2098 new_esp = popl(&sa);
2099 new_ss = popl(&sa) & 0xffff;
2100 } else {
2101 /* 16 bits */
2102 new_esp = popw(&sa);
2103 new_ss = popw(&sa);
2106 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2107 new_ss, new_esp);
2108 if ((new_ss & 0xfffc) == 0) {
2109 #ifdef TARGET_X86_64
2110 /* NULL ss is allowed in long mode if cpl != 3 */
2111 /* XXX: test CS64? */
2112 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2113 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2114 0, 0xffffffff,
2115 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2116 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2117 DESC_W_MASK | DESC_A_MASK);
2118 ss_e2 = DESC_B_MASK; /* XXX: should not be needed? */
2119 } else
2120 #endif
2122 raise_exception_err_ra(env, EXCP0D_GPF, 0, retaddr);
2124 } else {
2125 if ((new_ss & 3) != rpl) {
2126 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2128 if (load_segment_ra(env, &ss_e1, &ss_e2, new_ss, retaddr) != 0) {
2129 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2131 if (!(ss_e2 & DESC_S_MASK) ||
2132 (ss_e2 & DESC_CS_MASK) ||
2133 !(ss_e2 & DESC_W_MASK)) {
2134 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2136 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2137 if (dpl != rpl) {
2138 raise_exception_err_ra(env, EXCP0D_GPF, new_ss & 0xfffc, retaddr);
2140 if (!(ss_e2 & DESC_P_MASK)) {
2141 raise_exception_err_ra(env, EXCP0B_NOSEG, new_ss & 0xfffc, retaddr);
2143 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2144 get_seg_base(ss_e1, ss_e2),
2145 get_seg_limit(ss_e1, ss_e2),
2146 ss_e2);
2149 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2150 get_seg_base(e1, e2),
2151 get_seg_limit(e1, e2),
2152 e2);
2153 sa.sp = new_esp;
2154 #ifdef TARGET_X86_64
2155 if (env->hflags & HF_CS64_MASK) {
2156 sa.sp_mask = -1;
2157 } else
2158 #endif
2160 sa.sp_mask = get_sp_mask(ss_e2);
2163 /* validate data segments */
2164 validate_seg(env, R_ES, rpl);
2165 validate_seg(env, R_DS, rpl);
2166 validate_seg(env, R_FS, rpl);
2167 validate_seg(env, R_GS, rpl);
2169 sa.sp += addend;
2171 SET_ESP(sa.sp, sa.sp_mask);
2172 env->eip = new_eip;
2173 if (is_iret) {
2174 /* NOTE: 'cpl' is the _old_ CPL */
2175 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2176 if (cpl == 0) {
2177 eflags_mask |= IOPL_MASK;
2179 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2180 if (cpl <= iopl) {
2181 eflags_mask |= IF_MASK;
2183 if (shift == 0) {
2184 eflags_mask &= 0xffff;
2186 cpu_load_eflags(env, new_eflags, eflags_mask);
2188 return;
2190 return_to_vm86:
2191 new_esp = popl(&sa);
2192 new_ss = popl(&sa);
2193 new_es = popl(&sa);
2194 new_ds = popl(&sa);
2195 new_fs = popl(&sa);
2196 new_gs = popl(&sa);
2198 /* modify processor state */
2199 cpu_load_eflags(env, new_eflags, TF_MASK | AC_MASK | ID_MASK |
2200 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK |
2201 VIP_MASK);
2202 load_seg_vm(env, R_CS, new_cs & 0xffff);
2203 load_seg_vm(env, R_SS, new_ss & 0xffff);
2204 load_seg_vm(env, R_ES, new_es & 0xffff);
2205 load_seg_vm(env, R_DS, new_ds & 0xffff);
2206 load_seg_vm(env, R_FS, new_fs & 0xffff);
2207 load_seg_vm(env, R_GS, new_gs & 0xffff);
2209 env->eip = new_eip & 0xffff;
2210 env->regs[R_ESP] = new_esp;
2213 void helper_iret_protected(CPUX86State *env, int shift, int next_eip)
2215 int tss_selector, type;
2216 uint32_t e1, e2;
2218 /* specific case for TSS */
2219 if (env->eflags & NT_MASK) {
2220 #ifdef TARGET_X86_64
2221 if (env->hflags & HF_LMA_MASK) {
2222 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2224 #endif
2225 tss_selector = cpu_lduw_kernel_ra(env, env->tr.base + 0, GETPC());
2226 if (tss_selector & 4) {
2227 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2229 if (load_segment_ra(env, &e1, &e2, tss_selector, GETPC()) != 0) {
2230 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2232 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2233 /* NOTE: we check both segment and busy TSS */
2234 if (type != 3) {
2235 raise_exception_err_ra(env, EXCP0A_TSS, tss_selector & 0xfffc, GETPC());
2237 switch_tss_ra(env, tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip, GETPC());
2238 } else {
2239 helper_ret_protected(env, shift, 1, 0, GETPC());
2241 env->hflags2 &= ~HF2_NMI_MASK;
2244 void helper_lret_protected(CPUX86State *env, int shift, int addend)
2246 helper_ret_protected(env, shift, 0, addend, GETPC());
2249 void helper_sysenter(CPUX86State *env)
2251 if (env->sysenter_cs == 0) {
2252 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2254 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2256 #ifdef TARGET_X86_64
2257 if (env->hflags & HF_LMA_MASK) {
2258 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2259 0, 0xffffffff,
2260 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2261 DESC_S_MASK |
2262 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2263 DESC_L_MASK);
2264 } else
2265 #endif
2267 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2268 0, 0xffffffff,
2269 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2270 DESC_S_MASK |
2271 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2273 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2274 0, 0xffffffff,
2275 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2276 DESC_S_MASK |
2277 DESC_W_MASK | DESC_A_MASK);
2278 env->regs[R_ESP] = env->sysenter_esp;
2279 env->eip = env->sysenter_eip;
2282 void helper_sysexit(CPUX86State *env, int dflag)
2284 int cpl;
2286 cpl = env->hflags & HF_CPL_MASK;
2287 if (env->sysenter_cs == 0 || cpl != 0) {
2288 raise_exception_err_ra(env, EXCP0D_GPF, 0, GETPC());
2290 #ifdef TARGET_X86_64
2291 if (dflag == 2) {
2292 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) |
2293 3, 0, 0xffffffff,
2294 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2295 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2296 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
2297 DESC_L_MASK);
2298 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) |
2299 3, 0, 0xffffffff,
2300 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2301 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2302 DESC_W_MASK | DESC_A_MASK);
2303 } else
2304 #endif
2306 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) |
2307 3, 0, 0xffffffff,
2308 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2309 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2310 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2311 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) |
2312 3, 0, 0xffffffff,
2313 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2314 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2315 DESC_W_MASK | DESC_A_MASK);
2317 env->regs[R_ESP] = env->regs[R_ECX];
2318 env->eip = env->regs[R_EDX];
2321 target_ulong helper_lsl(CPUX86State *env, target_ulong selector1)
2323 unsigned int limit;
2324 uint32_t e1, e2, selector;
2325 int rpl, dpl, cpl, type;
2327 selector = selector1 & 0xffff;
2328 assert(CC_OP == CC_OP_EFLAGS);
2329 if ((selector & 0xfffc) == 0) {
2330 goto fail;
2332 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2333 goto fail;
2335 rpl = selector & 3;
2336 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2337 cpl = env->hflags & HF_CPL_MASK;
2338 if (e2 & DESC_S_MASK) {
2339 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2340 /* conforming */
2341 } else {
2342 if (dpl < cpl || dpl < rpl) {
2343 goto fail;
2346 } else {
2347 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2348 switch (type) {
2349 case 1:
2350 case 2:
2351 case 3:
2352 case 9:
2353 case 11:
2354 break;
2355 default:
2356 goto fail;
2358 if (dpl < cpl || dpl < rpl) {
2359 fail:
2360 CC_SRC &= ~CC_Z;
2361 return 0;
2364 limit = get_seg_limit(e1, e2);
2365 CC_SRC |= CC_Z;
2366 return limit;
2369 target_ulong helper_lar(CPUX86State *env, target_ulong selector1)
2371 uint32_t e1, e2, selector;
2372 int rpl, dpl, cpl, type;
2374 selector = selector1 & 0xffff;
2375 assert(CC_OP == CC_OP_EFLAGS);
2376 if ((selector & 0xfffc) == 0) {
2377 goto fail;
2379 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2380 goto fail;
2382 rpl = selector & 3;
2383 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2384 cpl = env->hflags & HF_CPL_MASK;
2385 if (e2 & DESC_S_MASK) {
2386 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
2387 /* conforming */
2388 } else {
2389 if (dpl < cpl || dpl < rpl) {
2390 goto fail;
2393 } else {
2394 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2395 switch (type) {
2396 case 1:
2397 case 2:
2398 case 3:
2399 case 4:
2400 case 5:
2401 case 9:
2402 case 11:
2403 case 12:
2404 break;
2405 default:
2406 goto fail;
2408 if (dpl < cpl || dpl < rpl) {
2409 fail:
2410 CC_SRC &= ~CC_Z;
2411 return 0;
2414 CC_SRC |= CC_Z;
2415 return e2 & 0x00f0ff00;
2418 void helper_verr(CPUX86State *env, target_ulong selector1)
2420 uint32_t e1, e2, eflags, selector;
2421 int rpl, dpl, cpl;
2423 selector = selector1 & 0xffff;
2424 eflags = cpu_cc_compute_all(env) | CC_Z;
2425 if ((selector & 0xfffc) == 0) {
2426 goto fail;
2428 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2429 goto fail;
2431 if (!(e2 & DESC_S_MASK)) {
2432 goto fail;
2434 rpl = selector & 3;
2435 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2436 cpl = env->hflags & HF_CPL_MASK;
2437 if (e2 & DESC_CS_MASK) {
2438 if (!(e2 & DESC_R_MASK)) {
2439 goto fail;
2441 if (!(e2 & DESC_C_MASK)) {
2442 if (dpl < cpl || dpl < rpl) {
2443 goto fail;
2446 } else {
2447 if (dpl < cpl || dpl < rpl) {
2448 fail:
2449 eflags &= ~CC_Z;
2452 CC_SRC = eflags;
2453 CC_OP = CC_OP_EFLAGS;
2456 void helper_verw(CPUX86State *env, target_ulong selector1)
2458 uint32_t e1, e2, eflags, selector;
2459 int rpl, dpl, cpl;
2461 selector = selector1 & 0xffff;
2462 eflags = cpu_cc_compute_all(env) | CC_Z;
2463 if ((selector & 0xfffc) == 0) {
2464 goto fail;
2466 if (load_segment_ra(env, &e1, &e2, selector, GETPC()) != 0) {
2467 goto fail;
2469 if (!(e2 & DESC_S_MASK)) {
2470 goto fail;
2472 rpl = selector & 3;
2473 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2474 cpl = env->hflags & HF_CPL_MASK;
2475 if (e2 & DESC_CS_MASK) {
2476 goto fail;
2477 } else {
2478 if (dpl < cpl || dpl < rpl) {
2479 goto fail;
2481 if (!(e2 & DESC_W_MASK)) {
2482 fail:
2483 eflags &= ~CC_Z;
2486 CC_SRC = eflags;
2487 CC_OP = CC_OP_EFLAGS;