ppc: Avoid AREG0 for FPU and SPE helpers
[qemu/agraf.git] / target-i386 / op_helper.c
blob2862ea4a92aee79825ee9534b03c0afbc02d8974
1 /*
2 * i386 helpers
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include <math.h>
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "ioport.h"
25 #include "qemu-log.h"
26 #include "cpu-defs.h"
27 #include "helper.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #include "softmmu_exec.h"
31 #endif /* !defined(CONFIG_USER_ONLY) */
33 //#define DEBUG_PCALL
35 #ifdef DEBUG_PCALL
36 # define LOG_PCALL(...) qemu_log_mask(CPU_LOG_PCALL, ## __VA_ARGS__)
37 # define LOG_PCALL_STATE(env) \
38 log_cpu_state_mask(CPU_LOG_PCALL, (env), X86_DUMP_CCOP)
39 #else
40 # define LOG_PCALL(...) do { } while (0)
41 # define LOG_PCALL_STATE(env) do { } while (0)
42 #endif
44 /* n must be a constant to be efficient */
45 static inline target_long lshift(target_long x, int n)
47 if (n >= 0) {
48 return x << n;
49 } else {
50 return x >> (-n);
54 #define FPU_RC_MASK 0xc00
55 #define FPU_RC_NEAR 0x000
56 #define FPU_RC_DOWN 0x400
57 #define FPU_RC_UP 0x800
58 #define FPU_RC_CHOP 0xc00
60 #define MAXTAN 9223372036854775808.0
62 /* the following deal with x86 long double-precision numbers */
63 #define MAXEXPD 0x7fff
64 #define EXPBIAS 16383
65 #define EXPD(fp) (fp.l.upper & 0x7fff)
66 #define SIGND(fp) ((fp.l.upper) & 0x8000)
67 #define MANTD(fp) (fp.l.lower)
68 #define BIASEXPONENT(fp) fp.l.upper = (fp.l.upper & ~(0x7fff)) | EXPBIAS
70 static inline void fpush(void)
72 env->fpstt = (env->fpstt - 1) & 7;
73 env->fptags[env->fpstt] = 0; /* validate stack entry */
76 static inline void fpop(void)
78 env->fptags[env->fpstt] = 1; /* invvalidate stack entry */
79 env->fpstt = (env->fpstt + 1) & 7;
82 static inline floatx80 helper_fldt(target_ulong ptr)
84 CPU_LDoubleU temp;
86 temp.l.lower = ldq(ptr);
87 temp.l.upper = lduw(ptr + 8);
88 return temp.d;
91 static inline void helper_fstt(floatx80 f, target_ulong ptr)
93 CPU_LDoubleU temp;
95 temp.d = f;
96 stq(ptr, temp.l.lower);
97 stw(ptr + 8, temp.l.upper);
100 #define FPUS_IE (1 << 0)
101 #define FPUS_DE (1 << 1)
102 #define FPUS_ZE (1 << 2)
103 #define FPUS_OE (1 << 3)
104 #define FPUS_UE (1 << 4)
105 #define FPUS_PE (1 << 5)
106 #define FPUS_SF (1 << 6)
107 #define FPUS_SE (1 << 7)
108 #define FPUS_B (1 << 15)
110 #define FPUC_EM 0x3f
112 static inline uint32_t compute_eflags(void)
114 return env->eflags | helper_cc_compute_all(CC_OP) | (DF & DF_MASK);
117 /* NOTE: CC_OP must be modified manually to CC_OP_EFLAGS */
118 static inline void load_eflags(int eflags, int update_mask)
120 CC_SRC = eflags & (CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C);
121 DF = 1 - (2 * ((eflags >> 10) & 1));
122 env->eflags = (env->eflags & ~update_mask) |
123 (eflags & update_mask) | 0x2;
126 /* load efer and update the corresponding hflags. XXX: do consistency
127 checks with cpuid bits ? */
128 static inline void cpu_load_efer(CPUX86State *env, uint64_t val)
130 env->efer = val;
131 env->hflags &= ~(HF_LMA_MASK | HF_SVME_MASK);
132 if (env->efer & MSR_EFER_LMA) {
133 env->hflags |= HF_LMA_MASK;
135 if (env->efer & MSR_EFER_SVME) {
136 env->hflags |= HF_SVME_MASK;
140 #if 0
141 #define raise_exception_err(a, b)\
142 do {\
143 qemu_log("raise_exception line=%d\n", __LINE__);\
144 (raise_exception_err)(a, b);\
145 } while (0)
146 #endif
148 static void QEMU_NORETURN raise_exception_err(int exception_index,
149 int error_code);
151 static const uint8_t parity_table[256] = {
152 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
153 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
154 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
155 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
156 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
157 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
158 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
159 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
160 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
161 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
162 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
163 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
164 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
165 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
166 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
167 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
168 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
169 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
170 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
171 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
172 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
173 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
174 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
175 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
176 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
177 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
178 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
179 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
180 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
181 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
182 CC_P, 0, 0, CC_P, 0, CC_P, CC_P, 0,
183 0, CC_P, CC_P, 0, CC_P, 0, 0, CC_P,
186 /* modulo 17 table */
187 static const uint8_t rclw_table[32] = {
188 0, 1, 2, 3, 4, 5, 6, 7,
189 8, 9,10,11,12,13,14,15,
190 16, 0, 1, 2, 3, 4, 5, 6,
191 7, 8, 9,10,11,12,13,14,
194 /* modulo 9 table */
195 static const uint8_t rclb_table[32] = {
196 0, 1, 2, 3, 4, 5, 6, 7,
197 8, 0, 1, 2, 3, 4, 5, 6,
198 7, 8, 0, 1, 2, 3, 4, 5,
199 6, 7, 8, 0, 1, 2, 3, 4,
202 #define floatx80_lg2 make_floatx80( 0x3ffd, 0x9a209a84fbcff799LL )
203 #define floatx80_l2e make_floatx80( 0x3fff, 0xb8aa3b295c17f0bcLL )
204 #define floatx80_l2t make_floatx80( 0x4000, 0xd49a784bcd1b8afeLL )
206 /* broken thread support */
208 static spinlock_t global_cpu_lock = SPIN_LOCK_UNLOCKED;
210 void helper_lock(void)
212 spin_lock(&global_cpu_lock);
215 void helper_unlock(void)
217 spin_unlock(&global_cpu_lock);
220 void helper_write_eflags(target_ulong t0, uint32_t update_mask)
222 load_eflags(t0, update_mask);
225 target_ulong helper_read_eflags(void)
227 uint32_t eflags;
228 eflags = helper_cc_compute_all(CC_OP);
229 eflags |= (DF & DF_MASK);
230 eflags |= env->eflags & ~(VM_MASK | RF_MASK);
231 return eflags;
234 /* return non zero if error */
235 static inline int load_segment(uint32_t *e1_ptr, uint32_t *e2_ptr,
236 int selector)
238 SegmentCache *dt;
239 int index;
240 target_ulong ptr;
242 if (selector & 0x4)
243 dt = &env->ldt;
244 else
245 dt = &env->gdt;
246 index = selector & ~7;
247 if ((index + 7) > dt->limit)
248 return -1;
249 ptr = dt->base + index;
250 *e1_ptr = ldl_kernel(ptr);
251 *e2_ptr = ldl_kernel(ptr + 4);
252 return 0;
255 static inline unsigned int get_seg_limit(uint32_t e1, uint32_t e2)
257 unsigned int limit;
258 limit = (e1 & 0xffff) | (e2 & 0x000f0000);
259 if (e2 & DESC_G_MASK)
260 limit = (limit << 12) | 0xfff;
261 return limit;
264 static inline uint32_t get_seg_base(uint32_t e1, uint32_t e2)
266 return ((e1 >> 16) | ((e2 & 0xff) << 16) | (e2 & 0xff000000));
269 static inline void load_seg_cache_raw_dt(SegmentCache *sc, uint32_t e1, uint32_t e2)
271 sc->base = get_seg_base(e1, e2);
272 sc->limit = get_seg_limit(e1, e2);
273 sc->flags = e2;
276 /* init the segment cache in vm86 mode. */
277 static inline void load_seg_vm(int seg, int selector)
279 selector &= 0xffff;
280 cpu_x86_load_seg_cache(env, seg, selector,
281 (selector << 4), 0xffff, 0);
284 static inline void get_ss_esp_from_tss(uint32_t *ss_ptr,
285 uint32_t *esp_ptr, int dpl)
287 int type, index, shift;
289 #if 0
291 int i;
292 printf("TR: base=%p limit=%x\n", env->tr.base, env->tr.limit);
293 for(i=0;i<env->tr.limit;i++) {
294 printf("%02x ", env->tr.base[i]);
295 if ((i & 7) == 7) printf("\n");
297 printf("\n");
299 #endif
301 if (!(env->tr.flags & DESC_P_MASK))
302 cpu_abort(env, "invalid tss");
303 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
304 if ((type & 7) != 1)
305 cpu_abort(env, "invalid tss type");
306 shift = type >> 3;
307 index = (dpl * 4 + 2) << shift;
308 if (index + (4 << shift) - 1 > env->tr.limit)
309 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
310 if (shift == 0) {
311 *esp_ptr = lduw_kernel(env->tr.base + index);
312 *ss_ptr = lduw_kernel(env->tr.base + index + 2);
313 } else {
314 *esp_ptr = ldl_kernel(env->tr.base + index);
315 *ss_ptr = lduw_kernel(env->tr.base + index + 4);
319 /* XXX: merge with load_seg() */
320 static void tss_load_seg(int seg_reg, int selector)
322 uint32_t e1, e2;
323 int rpl, dpl, cpl;
325 if ((selector & 0xfffc) != 0) {
326 if (load_segment(&e1, &e2, selector) != 0)
327 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
328 if (!(e2 & DESC_S_MASK))
329 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
330 rpl = selector & 3;
331 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
332 cpl = env->hflags & HF_CPL_MASK;
333 if (seg_reg == R_CS) {
334 if (!(e2 & DESC_CS_MASK))
335 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
336 /* XXX: is it correct ? */
337 if (dpl != rpl)
338 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
339 if ((e2 & DESC_C_MASK) && dpl > rpl)
340 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
341 } else if (seg_reg == R_SS) {
342 /* SS must be writable data */
343 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
344 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
345 if (dpl != cpl || dpl != rpl)
346 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
347 } else {
348 /* not readable code */
349 if ((e2 & DESC_CS_MASK) && !(e2 & DESC_R_MASK))
350 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
351 /* if data or non conforming code, checks the rights */
352 if (((e2 >> DESC_TYPE_SHIFT) & 0xf) < 12) {
353 if (dpl < cpl || dpl < rpl)
354 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
357 if (!(e2 & DESC_P_MASK))
358 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
359 cpu_x86_load_seg_cache(env, seg_reg, selector,
360 get_seg_base(e1, e2),
361 get_seg_limit(e1, e2),
362 e2);
363 } else {
364 if (seg_reg == R_SS || seg_reg == R_CS)
365 raise_exception_err(EXCP0A_TSS, selector & 0xfffc);
369 #define SWITCH_TSS_JMP 0
370 #define SWITCH_TSS_IRET 1
371 #define SWITCH_TSS_CALL 2
373 /* XXX: restore CPU state in registers (PowerPC case) */
374 static void switch_tss(int tss_selector,
375 uint32_t e1, uint32_t e2, int source,
376 uint32_t next_eip)
378 int tss_limit, tss_limit_max, type, old_tss_limit_max, old_type, v1, v2, i;
379 target_ulong tss_base;
380 uint32_t new_regs[8], new_segs[6];
381 uint32_t new_eflags, new_eip, new_cr3, new_ldt, new_trap;
382 uint32_t old_eflags, eflags_mask;
383 SegmentCache *dt;
384 int index;
385 target_ulong ptr;
387 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
388 LOG_PCALL("switch_tss: sel=0x%04x type=%d src=%d\n", tss_selector, type, source);
390 /* if task gate, we read the TSS segment and we load it */
391 if (type == 5) {
392 if (!(e2 & DESC_P_MASK))
393 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
394 tss_selector = e1 >> 16;
395 if (tss_selector & 4)
396 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
397 if (load_segment(&e1, &e2, tss_selector) != 0)
398 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
399 if (e2 & DESC_S_MASK)
400 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
401 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
402 if ((type & 7) != 1)
403 raise_exception_err(EXCP0D_GPF, tss_selector & 0xfffc);
406 if (!(e2 & DESC_P_MASK))
407 raise_exception_err(EXCP0B_NOSEG, tss_selector & 0xfffc);
409 if (type & 8)
410 tss_limit_max = 103;
411 else
412 tss_limit_max = 43;
413 tss_limit = get_seg_limit(e1, e2);
414 tss_base = get_seg_base(e1, e2);
415 if ((tss_selector & 4) != 0 ||
416 tss_limit < tss_limit_max)
417 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
418 old_type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
419 if (old_type & 8)
420 old_tss_limit_max = 103;
421 else
422 old_tss_limit_max = 43;
424 /* read all the registers from the new TSS */
425 if (type & 8) {
426 /* 32 bit */
427 new_cr3 = ldl_kernel(tss_base + 0x1c);
428 new_eip = ldl_kernel(tss_base + 0x20);
429 new_eflags = ldl_kernel(tss_base + 0x24);
430 for(i = 0; i < 8; i++)
431 new_regs[i] = ldl_kernel(tss_base + (0x28 + i * 4));
432 for(i = 0; i < 6; i++)
433 new_segs[i] = lduw_kernel(tss_base + (0x48 + i * 4));
434 new_ldt = lduw_kernel(tss_base + 0x60);
435 new_trap = ldl_kernel(tss_base + 0x64);
436 } else {
437 /* 16 bit */
438 new_cr3 = 0;
439 new_eip = lduw_kernel(tss_base + 0x0e);
440 new_eflags = lduw_kernel(tss_base + 0x10);
441 for(i = 0; i < 8; i++)
442 new_regs[i] = lduw_kernel(tss_base + (0x12 + i * 2)) | 0xffff0000;
443 for(i = 0; i < 4; i++)
444 new_segs[i] = lduw_kernel(tss_base + (0x22 + i * 4));
445 new_ldt = lduw_kernel(tss_base + 0x2a);
446 new_segs[R_FS] = 0;
447 new_segs[R_GS] = 0;
448 new_trap = 0;
450 /* XXX: avoid a compiler warning, see
451 http://support.amd.com/us/Processor_TechDocs/24593.pdf
452 chapters 12.2.5 and 13.2.4 on how to implement TSS Trap bit */
453 (void)new_trap;
455 /* NOTE: we must avoid memory exceptions during the task switch,
456 so we make dummy accesses before */
457 /* XXX: it can still fail in some cases, so a bigger hack is
458 necessary to valid the TLB after having done the accesses */
460 v1 = ldub_kernel(env->tr.base);
461 v2 = ldub_kernel(env->tr.base + old_tss_limit_max);
462 stb_kernel(env->tr.base, v1);
463 stb_kernel(env->tr.base + old_tss_limit_max, v2);
465 /* clear busy bit (it is restartable) */
466 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_IRET) {
467 target_ulong ptr;
468 uint32_t e2;
469 ptr = env->gdt.base + (env->tr.selector & ~7);
470 e2 = ldl_kernel(ptr + 4);
471 e2 &= ~DESC_TSS_BUSY_MASK;
472 stl_kernel(ptr + 4, e2);
474 old_eflags = compute_eflags();
475 if (source == SWITCH_TSS_IRET)
476 old_eflags &= ~NT_MASK;
478 /* save the current state in the old TSS */
479 if (type & 8) {
480 /* 32 bit */
481 stl_kernel(env->tr.base + 0x20, next_eip);
482 stl_kernel(env->tr.base + 0x24, old_eflags);
483 stl_kernel(env->tr.base + (0x28 + 0 * 4), EAX);
484 stl_kernel(env->tr.base + (0x28 + 1 * 4), ECX);
485 stl_kernel(env->tr.base + (0x28 + 2 * 4), EDX);
486 stl_kernel(env->tr.base + (0x28 + 3 * 4), EBX);
487 stl_kernel(env->tr.base + (0x28 + 4 * 4), ESP);
488 stl_kernel(env->tr.base + (0x28 + 5 * 4), EBP);
489 stl_kernel(env->tr.base + (0x28 + 6 * 4), ESI);
490 stl_kernel(env->tr.base + (0x28 + 7 * 4), EDI);
491 for(i = 0; i < 6; i++)
492 stw_kernel(env->tr.base + (0x48 + i * 4), env->segs[i].selector);
493 } else {
494 /* 16 bit */
495 stw_kernel(env->tr.base + 0x0e, next_eip);
496 stw_kernel(env->tr.base + 0x10, old_eflags);
497 stw_kernel(env->tr.base + (0x12 + 0 * 2), EAX);
498 stw_kernel(env->tr.base + (0x12 + 1 * 2), ECX);
499 stw_kernel(env->tr.base + (0x12 + 2 * 2), EDX);
500 stw_kernel(env->tr.base + (0x12 + 3 * 2), EBX);
501 stw_kernel(env->tr.base + (0x12 + 4 * 2), ESP);
502 stw_kernel(env->tr.base + (0x12 + 5 * 2), EBP);
503 stw_kernel(env->tr.base + (0x12 + 6 * 2), ESI);
504 stw_kernel(env->tr.base + (0x12 + 7 * 2), EDI);
505 for(i = 0; i < 4; i++)
506 stw_kernel(env->tr.base + (0x22 + i * 4), env->segs[i].selector);
509 /* now if an exception occurs, it will occurs in the next task
510 context */
512 if (source == SWITCH_TSS_CALL) {
513 stw_kernel(tss_base, env->tr.selector);
514 new_eflags |= NT_MASK;
517 /* set busy bit */
518 if (source == SWITCH_TSS_JMP || source == SWITCH_TSS_CALL) {
519 target_ulong ptr;
520 uint32_t e2;
521 ptr = env->gdt.base + (tss_selector & ~7);
522 e2 = ldl_kernel(ptr + 4);
523 e2 |= DESC_TSS_BUSY_MASK;
524 stl_kernel(ptr + 4, e2);
527 /* set the new CPU state */
528 /* from this point, any exception which occurs can give problems */
529 env->cr[0] |= CR0_TS_MASK;
530 env->hflags |= HF_TS_MASK;
531 env->tr.selector = tss_selector;
532 env->tr.base = tss_base;
533 env->tr.limit = tss_limit;
534 env->tr.flags = e2 & ~DESC_TSS_BUSY_MASK;
536 if ((type & 8) && (env->cr[0] & CR0_PG_MASK)) {
537 cpu_x86_update_cr3(env, new_cr3);
540 /* load all registers without an exception, then reload them with
541 possible exception */
542 env->eip = new_eip;
543 eflags_mask = TF_MASK | AC_MASK | ID_MASK |
544 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK;
545 if (!(type & 8))
546 eflags_mask &= 0xffff;
547 load_eflags(new_eflags, eflags_mask);
548 /* XXX: what to do in 16 bit case ? */
549 EAX = new_regs[0];
550 ECX = new_regs[1];
551 EDX = new_regs[2];
552 EBX = new_regs[3];
553 ESP = new_regs[4];
554 EBP = new_regs[5];
555 ESI = new_regs[6];
556 EDI = new_regs[7];
557 if (new_eflags & VM_MASK) {
558 for(i = 0; i < 6; i++)
559 load_seg_vm(i, new_segs[i]);
560 /* in vm86, CPL is always 3 */
561 cpu_x86_set_cpl(env, 3);
562 } else {
563 /* CPL is set the RPL of CS */
564 cpu_x86_set_cpl(env, new_segs[R_CS] & 3);
565 /* first just selectors as the rest may trigger exceptions */
566 for(i = 0; i < 6; i++)
567 cpu_x86_load_seg_cache(env, i, new_segs[i], 0, 0, 0);
570 env->ldt.selector = new_ldt & ~4;
571 env->ldt.base = 0;
572 env->ldt.limit = 0;
573 env->ldt.flags = 0;
575 /* load the LDT */
576 if (new_ldt & 4)
577 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
579 if ((new_ldt & 0xfffc) != 0) {
580 dt = &env->gdt;
581 index = new_ldt & ~7;
582 if ((index + 7) > dt->limit)
583 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
584 ptr = dt->base + index;
585 e1 = ldl_kernel(ptr);
586 e2 = ldl_kernel(ptr + 4);
587 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
588 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
589 if (!(e2 & DESC_P_MASK))
590 raise_exception_err(EXCP0A_TSS, new_ldt & 0xfffc);
591 load_seg_cache_raw_dt(&env->ldt, e1, e2);
594 /* load the segments */
595 if (!(new_eflags & VM_MASK)) {
596 tss_load_seg(R_CS, new_segs[R_CS]);
597 tss_load_seg(R_SS, new_segs[R_SS]);
598 tss_load_seg(R_ES, new_segs[R_ES]);
599 tss_load_seg(R_DS, new_segs[R_DS]);
600 tss_load_seg(R_FS, new_segs[R_FS]);
601 tss_load_seg(R_GS, new_segs[R_GS]);
604 /* check that EIP is in the CS segment limits */
605 if (new_eip > env->segs[R_CS].limit) {
606 /* XXX: different exception if CALL ? */
607 raise_exception_err(EXCP0D_GPF, 0);
610 #ifndef CONFIG_USER_ONLY
611 /* reset local breakpoints */
612 if (env->dr[7] & 0x55) {
613 for (i = 0; i < 4; i++) {
614 if (hw_breakpoint_enabled(env->dr[7], i) == 0x1)
615 hw_breakpoint_remove(env, i);
617 env->dr[7] &= ~0x55;
619 #endif
622 /* check if Port I/O is allowed in TSS */
623 static inline void check_io(int addr, int size)
625 int io_offset, val, mask;
627 /* TSS must be a valid 32 bit one */
628 if (!(env->tr.flags & DESC_P_MASK) ||
629 ((env->tr.flags >> DESC_TYPE_SHIFT) & 0xf) != 9 ||
630 env->tr.limit < 103)
631 goto fail;
632 io_offset = lduw_kernel(env->tr.base + 0x66);
633 io_offset += (addr >> 3);
634 /* Note: the check needs two bytes */
635 if ((io_offset + 1) > env->tr.limit)
636 goto fail;
637 val = lduw_kernel(env->tr.base + io_offset);
638 val >>= (addr & 7);
639 mask = (1 << size) - 1;
640 /* all bits must be zero to allow the I/O */
641 if ((val & mask) != 0) {
642 fail:
643 raise_exception_err(EXCP0D_GPF, 0);
647 void helper_check_iob(uint32_t t0)
649 check_io(t0, 1);
652 void helper_check_iow(uint32_t t0)
654 check_io(t0, 2);
657 void helper_check_iol(uint32_t t0)
659 check_io(t0, 4);
662 void helper_outb(uint32_t port, uint32_t data)
664 cpu_outb(port, data & 0xff);
667 target_ulong helper_inb(uint32_t port)
669 return cpu_inb(port);
672 void helper_outw(uint32_t port, uint32_t data)
674 cpu_outw(port, data & 0xffff);
677 target_ulong helper_inw(uint32_t port)
679 return cpu_inw(port);
682 void helper_outl(uint32_t port, uint32_t data)
684 cpu_outl(port, data);
687 target_ulong helper_inl(uint32_t port)
689 return cpu_inl(port);
692 static inline unsigned int get_sp_mask(unsigned int e2)
694 if (e2 & DESC_B_MASK)
695 return 0xffffffff;
696 else
697 return 0xffff;
700 static int exeption_has_error_code(int intno)
702 switch(intno) {
703 case 8:
704 case 10:
705 case 11:
706 case 12:
707 case 13:
708 case 14:
709 case 17:
710 return 1;
712 return 0;
715 #ifdef TARGET_X86_64
716 #define SET_ESP(val, sp_mask)\
717 do {\
718 if ((sp_mask) == 0xffff)\
719 ESP = (ESP & ~0xffff) | ((val) & 0xffff);\
720 else if ((sp_mask) == 0xffffffffLL)\
721 ESP = (uint32_t)(val);\
722 else\
723 ESP = (val);\
724 } while (0)
725 #else
726 #define SET_ESP(val, sp_mask) ESP = (ESP & ~(sp_mask)) | ((val) & (sp_mask))
727 #endif
729 /* in 64-bit machines, this can overflow. So this segment addition macro
730 * can be used to trim the value to 32-bit whenever needed */
731 #define SEG_ADDL(ssp, sp, sp_mask) ((uint32_t)((ssp) + (sp & (sp_mask))))
733 /* XXX: add a is_user flag to have proper security support */
734 #define PUSHW(ssp, sp, sp_mask, val)\
736 sp -= 2;\
737 stw_kernel((ssp) + (sp & (sp_mask)), (val));\
740 #define PUSHL(ssp, sp, sp_mask, val)\
742 sp -= 4;\
743 stl_kernel(SEG_ADDL(ssp, sp, sp_mask), (uint32_t)(val));\
746 #define POPW(ssp, sp, sp_mask, val)\
748 val = lduw_kernel((ssp) + (sp & (sp_mask)));\
749 sp += 2;\
752 #define POPL(ssp, sp, sp_mask, val)\
754 val = (uint32_t)ldl_kernel(SEG_ADDL(ssp, sp, sp_mask));\
755 sp += 4;\
758 /* protected mode interrupt */
759 static void do_interrupt_protected(int intno, int is_int, int error_code,
760 unsigned int next_eip, int is_hw)
762 SegmentCache *dt;
763 target_ulong ptr, ssp;
764 int type, dpl, selector, ss_dpl, cpl;
765 int has_error_code, new_stack, shift;
766 uint32_t e1, e2, offset, ss = 0, esp, ss_e1 = 0, ss_e2 = 0;
767 uint32_t old_eip, sp_mask;
769 has_error_code = 0;
770 if (!is_int && !is_hw)
771 has_error_code = exeption_has_error_code(intno);
772 if (is_int)
773 old_eip = next_eip;
774 else
775 old_eip = env->eip;
777 dt = &env->idt;
778 if (intno * 8 + 7 > dt->limit)
779 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
780 ptr = dt->base + intno * 8;
781 e1 = ldl_kernel(ptr);
782 e2 = ldl_kernel(ptr + 4);
783 /* check gate type */
784 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
785 switch(type) {
786 case 5: /* task gate */
787 /* must do that check here to return the correct error code */
788 if (!(e2 & DESC_P_MASK))
789 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
790 switch_tss(intno * 8, e1, e2, SWITCH_TSS_CALL, old_eip);
791 if (has_error_code) {
792 int type;
793 uint32_t mask;
794 /* push the error code */
795 type = (env->tr.flags >> DESC_TYPE_SHIFT) & 0xf;
796 shift = type >> 3;
797 if (env->segs[R_SS].flags & DESC_B_MASK)
798 mask = 0xffffffff;
799 else
800 mask = 0xffff;
801 esp = (ESP - (2 << shift)) & mask;
802 ssp = env->segs[R_SS].base + esp;
803 if (shift)
804 stl_kernel(ssp, error_code);
805 else
806 stw_kernel(ssp, error_code);
807 SET_ESP(esp, mask);
809 return;
810 case 6: /* 286 interrupt gate */
811 case 7: /* 286 trap gate */
812 case 14: /* 386 interrupt gate */
813 case 15: /* 386 trap gate */
814 break;
815 default:
816 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
817 break;
819 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
820 cpl = env->hflags & HF_CPL_MASK;
821 /* check privilege if software int */
822 if (is_int && dpl < cpl)
823 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
824 /* check valid bit */
825 if (!(e2 & DESC_P_MASK))
826 raise_exception_err(EXCP0B_NOSEG, intno * 8 + 2);
827 selector = e1 >> 16;
828 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
829 if ((selector & 0xfffc) == 0)
830 raise_exception_err(EXCP0D_GPF, 0);
832 if (load_segment(&e1, &e2, selector) != 0)
833 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
834 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
835 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
836 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
837 if (dpl > cpl)
838 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
839 if (!(e2 & DESC_P_MASK))
840 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
841 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
842 /* to inner privilege */
843 get_ss_esp_from_tss(&ss, &esp, dpl);
844 if ((ss & 0xfffc) == 0)
845 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
846 if ((ss & 3) != dpl)
847 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
848 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
849 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
850 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
851 if (ss_dpl != dpl)
852 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
853 if (!(ss_e2 & DESC_S_MASK) ||
854 (ss_e2 & DESC_CS_MASK) ||
855 !(ss_e2 & DESC_W_MASK))
856 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
857 if (!(ss_e2 & DESC_P_MASK))
858 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
859 new_stack = 1;
860 sp_mask = get_sp_mask(ss_e2);
861 ssp = get_seg_base(ss_e1, ss_e2);
862 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
863 /* to same privilege */
864 if (env->eflags & VM_MASK)
865 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
866 new_stack = 0;
867 sp_mask = get_sp_mask(env->segs[R_SS].flags);
868 ssp = env->segs[R_SS].base;
869 esp = ESP;
870 dpl = cpl;
871 } else {
872 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
873 new_stack = 0; /* avoid warning */
874 sp_mask = 0; /* avoid warning */
875 ssp = 0; /* avoid warning */
876 esp = 0; /* avoid warning */
879 shift = type >> 3;
881 #if 0
882 /* XXX: check that enough room is available */
883 push_size = 6 + (new_stack << 2) + (has_error_code << 1);
884 if (env->eflags & VM_MASK)
885 push_size += 8;
886 push_size <<= shift;
887 #endif
888 if (shift == 1) {
889 if (new_stack) {
890 if (env->eflags & VM_MASK) {
891 PUSHL(ssp, esp, sp_mask, env->segs[R_GS].selector);
892 PUSHL(ssp, esp, sp_mask, env->segs[R_FS].selector);
893 PUSHL(ssp, esp, sp_mask, env->segs[R_DS].selector);
894 PUSHL(ssp, esp, sp_mask, env->segs[R_ES].selector);
896 PUSHL(ssp, esp, sp_mask, env->segs[R_SS].selector);
897 PUSHL(ssp, esp, sp_mask, ESP);
899 PUSHL(ssp, esp, sp_mask, compute_eflags());
900 PUSHL(ssp, esp, sp_mask, env->segs[R_CS].selector);
901 PUSHL(ssp, esp, sp_mask, old_eip);
902 if (has_error_code) {
903 PUSHL(ssp, esp, sp_mask, error_code);
905 } else {
906 if (new_stack) {
907 if (env->eflags & VM_MASK) {
908 PUSHW(ssp, esp, sp_mask, env->segs[R_GS].selector);
909 PUSHW(ssp, esp, sp_mask, env->segs[R_FS].selector);
910 PUSHW(ssp, esp, sp_mask, env->segs[R_DS].selector);
911 PUSHW(ssp, esp, sp_mask, env->segs[R_ES].selector);
913 PUSHW(ssp, esp, sp_mask, env->segs[R_SS].selector);
914 PUSHW(ssp, esp, sp_mask, ESP);
916 PUSHW(ssp, esp, sp_mask, compute_eflags());
917 PUSHW(ssp, esp, sp_mask, env->segs[R_CS].selector);
918 PUSHW(ssp, esp, sp_mask, old_eip);
919 if (has_error_code) {
920 PUSHW(ssp, esp, sp_mask, error_code);
924 if (new_stack) {
925 if (env->eflags & VM_MASK) {
926 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0, 0);
927 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0, 0);
928 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0, 0);
929 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0, 0);
931 ss = (ss & ~3) | dpl;
932 cpu_x86_load_seg_cache(env, R_SS, ss,
933 ssp, get_seg_limit(ss_e1, ss_e2), ss_e2);
935 SET_ESP(esp, sp_mask);
937 selector = (selector & ~3) | dpl;
938 cpu_x86_load_seg_cache(env, R_CS, selector,
939 get_seg_base(e1, e2),
940 get_seg_limit(e1, e2),
941 e2);
942 cpu_x86_set_cpl(env, dpl);
943 env->eip = offset;
945 /* interrupt gate clear IF mask */
946 if ((type & 1) == 0) {
947 env->eflags &= ~IF_MASK;
949 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
952 #ifdef TARGET_X86_64
954 #define PUSHQ(sp, val)\
956 sp -= 8;\
957 stq_kernel(sp, (val));\
960 #define POPQ(sp, val)\
962 val = ldq_kernel(sp);\
963 sp += 8;\
966 static inline target_ulong get_rsp_from_tss(int level)
968 int index;
970 #if 0
971 printf("TR: base=" TARGET_FMT_lx " limit=%x\n",
972 env->tr.base, env->tr.limit);
973 #endif
975 if (!(env->tr.flags & DESC_P_MASK))
976 cpu_abort(env, "invalid tss");
977 index = 8 * level + 4;
978 if ((index + 7) > env->tr.limit)
979 raise_exception_err(EXCP0A_TSS, env->tr.selector & 0xfffc);
980 return ldq_kernel(env->tr.base + index);
983 /* 64 bit interrupt */
984 static void do_interrupt64(int intno, int is_int, int error_code,
985 target_ulong next_eip, int is_hw)
987 SegmentCache *dt;
988 target_ulong ptr;
989 int type, dpl, selector, cpl, ist;
990 int has_error_code, new_stack;
991 uint32_t e1, e2, e3, ss;
992 target_ulong old_eip, esp, offset;
994 has_error_code = 0;
995 if (!is_int && !is_hw)
996 has_error_code = exeption_has_error_code(intno);
997 if (is_int)
998 old_eip = next_eip;
999 else
1000 old_eip = env->eip;
1002 dt = &env->idt;
1003 if (intno * 16 + 15 > dt->limit)
1004 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1005 ptr = dt->base + intno * 16;
1006 e1 = ldl_kernel(ptr);
1007 e2 = ldl_kernel(ptr + 4);
1008 e3 = ldl_kernel(ptr + 8);
1009 /* check gate type */
1010 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
1011 switch(type) {
1012 case 14: /* 386 interrupt gate */
1013 case 15: /* 386 trap gate */
1014 break;
1015 default:
1016 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1017 break;
1019 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1020 cpl = env->hflags & HF_CPL_MASK;
1021 /* check privilege if software int */
1022 if (is_int && dpl < cpl)
1023 raise_exception_err(EXCP0D_GPF, intno * 16 + 2);
1024 /* check valid bit */
1025 if (!(e2 & DESC_P_MASK))
1026 raise_exception_err(EXCP0B_NOSEG, intno * 16 + 2);
1027 selector = e1 >> 16;
1028 offset = ((target_ulong)e3 << 32) | (e2 & 0xffff0000) | (e1 & 0x0000ffff);
1029 ist = e2 & 7;
1030 if ((selector & 0xfffc) == 0)
1031 raise_exception_err(EXCP0D_GPF, 0);
1033 if (load_segment(&e1, &e2, selector) != 0)
1034 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1035 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
1036 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1037 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1038 if (dpl > cpl)
1039 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1040 if (!(e2 & DESC_P_MASK))
1041 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
1042 if (!(e2 & DESC_L_MASK) || (e2 & DESC_B_MASK))
1043 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1044 if ((!(e2 & DESC_C_MASK) && dpl < cpl) || ist != 0) {
1045 /* to inner privilege */
1046 if (ist != 0)
1047 esp = get_rsp_from_tss(ist + 3);
1048 else
1049 esp = get_rsp_from_tss(dpl);
1050 esp &= ~0xfLL; /* align stack */
1051 ss = 0;
1052 new_stack = 1;
1053 } else if ((e2 & DESC_C_MASK) || dpl == cpl) {
1054 /* to same privilege */
1055 if (env->eflags & VM_MASK)
1056 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1057 new_stack = 0;
1058 if (ist != 0)
1059 esp = get_rsp_from_tss(ist + 3);
1060 else
1061 esp = ESP;
1062 esp &= ~0xfLL; /* align stack */
1063 dpl = cpl;
1064 } else {
1065 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
1066 new_stack = 0; /* avoid warning */
1067 esp = 0; /* avoid warning */
1070 PUSHQ(esp, env->segs[R_SS].selector);
1071 PUSHQ(esp, ESP);
1072 PUSHQ(esp, compute_eflags());
1073 PUSHQ(esp, env->segs[R_CS].selector);
1074 PUSHQ(esp, old_eip);
1075 if (has_error_code) {
1076 PUSHQ(esp, error_code);
1079 if (new_stack) {
1080 ss = 0 | dpl;
1081 cpu_x86_load_seg_cache(env, R_SS, ss, 0, 0, 0);
1083 ESP = esp;
1085 selector = (selector & ~3) | dpl;
1086 cpu_x86_load_seg_cache(env, R_CS, selector,
1087 get_seg_base(e1, e2),
1088 get_seg_limit(e1, e2),
1089 e2);
1090 cpu_x86_set_cpl(env, dpl);
1091 env->eip = offset;
1093 /* interrupt gate clear IF mask */
1094 if ((type & 1) == 0) {
1095 env->eflags &= ~IF_MASK;
1097 env->eflags &= ~(TF_MASK | VM_MASK | RF_MASK | NT_MASK);
1099 #endif
1101 #ifdef TARGET_X86_64
1102 #if defined(CONFIG_USER_ONLY)
1103 void helper_syscall(int next_eip_addend)
1105 env->exception_index = EXCP_SYSCALL;
1106 env->exception_next_eip = env->eip + next_eip_addend;
1107 cpu_loop_exit(env);
1109 #else
1110 void helper_syscall(int next_eip_addend)
1112 int selector;
1114 if (!(env->efer & MSR_EFER_SCE)) {
1115 raise_exception_err(EXCP06_ILLOP, 0);
1117 selector = (env->star >> 32) & 0xffff;
1118 if (env->hflags & HF_LMA_MASK) {
1119 int code64;
1121 ECX = env->eip + next_eip_addend;
1122 env->regs[11] = compute_eflags();
1124 code64 = env->hflags & HF_CS64_MASK;
1126 cpu_x86_set_cpl(env, 0);
1127 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1128 0, 0xffffffff,
1129 DESC_G_MASK | DESC_P_MASK |
1130 DESC_S_MASK |
1131 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
1132 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1133 0, 0xffffffff,
1134 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1135 DESC_S_MASK |
1136 DESC_W_MASK | DESC_A_MASK);
1137 env->eflags &= ~env->fmask;
1138 load_eflags(env->eflags, 0);
1139 if (code64)
1140 env->eip = env->lstar;
1141 else
1142 env->eip = env->cstar;
1143 } else {
1144 ECX = (uint32_t)(env->eip + next_eip_addend);
1146 cpu_x86_set_cpl(env, 0);
1147 cpu_x86_load_seg_cache(env, R_CS, selector & 0xfffc,
1148 0, 0xffffffff,
1149 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1150 DESC_S_MASK |
1151 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1152 cpu_x86_load_seg_cache(env, R_SS, (selector + 8) & 0xfffc,
1153 0, 0xffffffff,
1154 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1155 DESC_S_MASK |
1156 DESC_W_MASK | DESC_A_MASK);
1157 env->eflags &= ~(IF_MASK | RF_MASK | VM_MASK);
1158 env->eip = (uint32_t)env->star;
1161 #endif
1162 #endif
1164 #ifdef TARGET_X86_64
1165 void helper_sysret(int dflag)
1167 int cpl, selector;
1169 if (!(env->efer & MSR_EFER_SCE)) {
1170 raise_exception_err(EXCP06_ILLOP, 0);
1172 cpl = env->hflags & HF_CPL_MASK;
1173 if (!(env->cr[0] & CR0_PE_MASK) || cpl != 0) {
1174 raise_exception_err(EXCP0D_GPF, 0);
1176 selector = (env->star >> 48) & 0xffff;
1177 if (env->hflags & HF_LMA_MASK) {
1178 if (dflag == 2) {
1179 cpu_x86_load_seg_cache(env, R_CS, (selector + 16) | 3,
1180 0, 0xffffffff,
1181 DESC_G_MASK | DESC_P_MASK |
1182 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1183 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK |
1184 DESC_L_MASK);
1185 env->eip = ECX;
1186 } else {
1187 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1188 0, 0xffffffff,
1189 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1190 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1191 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1192 env->eip = (uint32_t)ECX;
1194 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1195 0, 0xffffffff,
1196 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1197 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1198 DESC_W_MASK | DESC_A_MASK);
1199 load_eflags((uint32_t)(env->regs[11]), TF_MASK | AC_MASK | ID_MASK |
1200 IF_MASK | IOPL_MASK | VM_MASK | RF_MASK | NT_MASK);
1201 cpu_x86_set_cpl(env, 3);
1202 } else {
1203 cpu_x86_load_seg_cache(env, R_CS, selector | 3,
1204 0, 0xffffffff,
1205 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1206 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1207 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
1208 env->eip = (uint32_t)ECX;
1209 cpu_x86_load_seg_cache(env, R_SS, selector + 8,
1210 0, 0xffffffff,
1211 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
1212 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
1213 DESC_W_MASK | DESC_A_MASK);
1214 env->eflags |= IF_MASK;
1215 cpu_x86_set_cpl(env, 3);
1218 #endif
1220 /* real mode interrupt */
1221 static void do_interrupt_real(int intno, int is_int, int error_code,
1222 unsigned int next_eip)
1224 SegmentCache *dt;
1225 target_ulong ptr, ssp;
1226 int selector;
1227 uint32_t offset, esp;
1228 uint32_t old_cs, old_eip;
1230 /* real mode (simpler !) */
1231 dt = &env->idt;
1232 if (intno * 4 + 3 > dt->limit)
1233 raise_exception_err(EXCP0D_GPF, intno * 8 + 2);
1234 ptr = dt->base + intno * 4;
1235 offset = lduw_kernel(ptr);
1236 selector = lduw_kernel(ptr + 2);
1237 esp = ESP;
1238 ssp = env->segs[R_SS].base;
1239 if (is_int)
1240 old_eip = next_eip;
1241 else
1242 old_eip = env->eip;
1243 old_cs = env->segs[R_CS].selector;
1244 /* XXX: use SS segment size ? */
1245 PUSHW(ssp, esp, 0xffff, compute_eflags());
1246 PUSHW(ssp, esp, 0xffff, old_cs);
1247 PUSHW(ssp, esp, 0xffff, old_eip);
1249 /* update processor state */
1250 ESP = (ESP & ~0xffff) | (esp & 0xffff);
1251 env->eip = offset;
1252 env->segs[R_CS].selector = selector;
1253 env->segs[R_CS].base = (selector << 4);
1254 env->eflags &= ~(IF_MASK | TF_MASK | AC_MASK | RF_MASK);
1257 #if defined(CONFIG_USER_ONLY)
1258 /* fake user mode interrupt */
1259 static void do_interrupt_user(int intno, int is_int, int error_code,
1260 target_ulong next_eip)
1262 SegmentCache *dt;
1263 target_ulong ptr;
1264 int dpl, cpl, shift;
1265 uint32_t e2;
1267 dt = &env->idt;
1268 if (env->hflags & HF_LMA_MASK) {
1269 shift = 4;
1270 } else {
1271 shift = 3;
1273 ptr = dt->base + (intno << shift);
1274 e2 = ldl_kernel(ptr + 4);
1276 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
1277 cpl = env->hflags & HF_CPL_MASK;
1278 /* check privilege if software int */
1279 if (is_int && dpl < cpl)
1280 raise_exception_err(EXCP0D_GPF, (intno << shift) + 2);
1282 /* Since we emulate only user space, we cannot do more than
1283 exiting the emulation with the suitable exception and error
1284 code */
1285 if (is_int)
1286 EIP = next_eip;
1289 #else
1291 static void handle_even_inj(int intno, int is_int, int error_code,
1292 int is_hw, int rm)
1294 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1295 if (!(event_inj & SVM_EVTINJ_VALID)) {
1296 int type;
1297 if (is_int)
1298 type = SVM_EVTINJ_TYPE_SOFT;
1299 else
1300 type = SVM_EVTINJ_TYPE_EXEPT;
1301 event_inj = intno | type | SVM_EVTINJ_VALID;
1302 if (!rm && exeption_has_error_code(intno)) {
1303 event_inj |= SVM_EVTINJ_VALID_ERR;
1304 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err), error_code);
1306 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj);
1309 #endif
1312 * Begin execution of an interruption. is_int is TRUE if coming from
1313 * the int instruction. next_eip is the EIP value AFTER the interrupt
1314 * instruction. It is only relevant if is_int is TRUE.
1316 static void do_interrupt_all(int intno, int is_int, int error_code,
1317 target_ulong next_eip, int is_hw)
1319 if (qemu_loglevel_mask(CPU_LOG_INT)) {
1320 if ((env->cr[0] & CR0_PE_MASK)) {
1321 static int count;
1322 qemu_log("%6d: v=%02x e=%04x i=%d cpl=%d IP=%04x:" TARGET_FMT_lx " pc=" TARGET_FMT_lx " SP=%04x:" TARGET_FMT_lx,
1323 count, intno, error_code, is_int,
1324 env->hflags & HF_CPL_MASK,
1325 env->segs[R_CS].selector, EIP,
1326 (int)env->segs[R_CS].base + EIP,
1327 env->segs[R_SS].selector, ESP);
1328 if (intno == 0x0e) {
1329 qemu_log(" CR2=" TARGET_FMT_lx, env->cr[2]);
1330 } else {
1331 qemu_log(" EAX=" TARGET_FMT_lx, EAX);
1333 qemu_log("\n");
1334 log_cpu_state(env, X86_DUMP_CCOP);
1335 #if 0
1337 int i;
1338 target_ulong ptr;
1339 qemu_log(" code=");
1340 ptr = env->segs[R_CS].base + env->eip;
1341 for(i = 0; i < 16; i++) {
1342 qemu_log(" %02x", ldub(ptr + i));
1344 qemu_log("\n");
1346 #endif
1347 count++;
1350 if (env->cr[0] & CR0_PE_MASK) {
1351 #if !defined(CONFIG_USER_ONLY)
1352 if (env->hflags & HF_SVMI_MASK)
1353 handle_even_inj(intno, is_int, error_code, is_hw, 0);
1354 #endif
1355 #ifdef TARGET_X86_64
1356 if (env->hflags & HF_LMA_MASK) {
1357 do_interrupt64(intno, is_int, error_code, next_eip, is_hw);
1358 } else
1359 #endif
1361 do_interrupt_protected(intno, is_int, error_code, next_eip, is_hw);
1363 } else {
1364 #if !defined(CONFIG_USER_ONLY)
1365 if (env->hflags & HF_SVMI_MASK)
1366 handle_even_inj(intno, is_int, error_code, is_hw, 1);
1367 #endif
1368 do_interrupt_real(intno, is_int, error_code, next_eip);
1371 #if !defined(CONFIG_USER_ONLY)
1372 if (env->hflags & HF_SVMI_MASK) {
1373 uint32_t event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
1374 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), event_inj & ~SVM_EVTINJ_VALID);
1376 #endif
1379 void do_interrupt(CPUX86State *env1)
1381 CPUX86State *saved_env;
1383 saved_env = env;
1384 env = env1;
1385 #if defined(CONFIG_USER_ONLY)
1386 /* if user mode only, we simulate a fake exception
1387 which will be handled outside the cpu execution
1388 loop */
1389 do_interrupt_user(env->exception_index,
1390 env->exception_is_int,
1391 env->error_code,
1392 env->exception_next_eip);
1393 /* successfully delivered */
1394 env->old_exception = -1;
1395 #else
1396 /* simulate a real cpu exception. On i386, it can
1397 trigger new exceptions, but we do not handle
1398 double or triple faults yet. */
1399 do_interrupt_all(env->exception_index,
1400 env->exception_is_int,
1401 env->error_code,
1402 env->exception_next_eip, 0);
1403 /* successfully delivered */
1404 env->old_exception = -1;
1405 #endif
1406 env = saved_env;
1409 void do_interrupt_x86_hardirq(CPUX86State *env1, int intno, int is_hw)
1411 CPUX86State *saved_env;
1413 saved_env = env;
1414 env = env1;
1415 do_interrupt_all(intno, 0, 0, 0, is_hw);
1416 env = saved_env;
1419 /* This should come from sysemu.h - if we could include it here... */
1420 void qemu_system_reset_request(void);
1423 * Check nested exceptions and change to double or triple fault if
1424 * needed. It should only be called, if this is not an interrupt.
1425 * Returns the new exception number.
1427 static int check_exception(int intno, int *error_code)
1429 int first_contributory = env->old_exception == 0 ||
1430 (env->old_exception >= 10 &&
1431 env->old_exception <= 13);
1432 int second_contributory = intno == 0 ||
1433 (intno >= 10 && intno <= 13);
1435 qemu_log_mask(CPU_LOG_INT, "check_exception old: 0x%x new 0x%x\n",
1436 env->old_exception, intno);
1438 #if !defined(CONFIG_USER_ONLY)
1439 if (env->old_exception == EXCP08_DBLE) {
1440 if (env->hflags & HF_SVMI_MASK)
1441 helper_vmexit(SVM_EXIT_SHUTDOWN, 0); /* does not return */
1443 qemu_log_mask(CPU_LOG_RESET, "Triple fault\n");
1445 qemu_system_reset_request();
1446 return EXCP_HLT;
1448 #endif
1450 if ((first_contributory && second_contributory)
1451 || (env->old_exception == EXCP0E_PAGE &&
1452 (second_contributory || (intno == EXCP0E_PAGE)))) {
1453 intno = EXCP08_DBLE;
1454 *error_code = 0;
1457 if (second_contributory || (intno == EXCP0E_PAGE) ||
1458 (intno == EXCP08_DBLE))
1459 env->old_exception = intno;
1461 return intno;
1465 * Signal an interruption. It is executed in the main CPU loop.
1466 * is_int is TRUE if coming from the int instruction. next_eip is the
1467 * EIP value AFTER the interrupt instruction. It is only relevant if
1468 * is_int is TRUE.
1470 static void QEMU_NORETURN raise_interrupt(int intno, int is_int, int error_code,
1471 int next_eip_addend)
1473 if (!is_int) {
1474 helper_svm_check_intercept_param(SVM_EXIT_EXCP_BASE + intno, error_code);
1475 intno = check_exception(intno, &error_code);
1476 } else {
1477 helper_svm_check_intercept_param(SVM_EXIT_SWINT, 0);
1480 env->exception_index = intno;
1481 env->error_code = error_code;
1482 env->exception_is_int = is_int;
1483 env->exception_next_eip = env->eip + next_eip_addend;
1484 cpu_loop_exit(env);
1487 /* shortcuts to generate exceptions */
1489 static void QEMU_NORETURN raise_exception_err(int exception_index,
1490 int error_code)
1492 raise_interrupt(exception_index, 0, error_code, 0);
1495 void raise_exception_err_env(CPUX86State *nenv, int exception_index,
1496 int error_code)
1498 env = nenv;
1499 raise_interrupt(exception_index, 0, error_code, 0);
1502 static void QEMU_NORETURN raise_exception(int exception_index)
1504 raise_interrupt(exception_index, 0, 0, 0);
1507 void raise_exception_env(int exception_index, CPUX86State *nenv)
1509 env = nenv;
1510 raise_exception(exception_index);
1512 /* SMM support */
1514 #if defined(CONFIG_USER_ONLY)
1516 void do_smm_enter(CPUX86State *env1)
1520 void helper_rsm(void)
1524 #else
1526 #ifdef TARGET_X86_64
1527 #define SMM_REVISION_ID 0x00020064
1528 #else
1529 #define SMM_REVISION_ID 0x00020000
1530 #endif
1532 void do_smm_enter(CPUX86State *env1)
1534 target_ulong sm_state;
1535 SegmentCache *dt;
1536 int i, offset;
1537 CPUX86State *saved_env;
1539 saved_env = env;
1540 env = env1;
1542 qemu_log_mask(CPU_LOG_INT, "SMM: enter\n");
1543 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1545 env->hflags |= HF_SMM_MASK;
1546 cpu_smm_update(env);
1548 sm_state = env->smbase + 0x8000;
1550 #ifdef TARGET_X86_64
1551 for(i = 0; i < 6; i++) {
1552 dt = &env->segs[i];
1553 offset = 0x7e00 + i * 16;
1554 stw_phys(sm_state + offset, dt->selector);
1555 stw_phys(sm_state + offset + 2, (dt->flags >> 8) & 0xf0ff);
1556 stl_phys(sm_state + offset + 4, dt->limit);
1557 stq_phys(sm_state + offset + 8, dt->base);
1560 stq_phys(sm_state + 0x7e68, env->gdt.base);
1561 stl_phys(sm_state + 0x7e64, env->gdt.limit);
1563 stw_phys(sm_state + 0x7e70, env->ldt.selector);
1564 stq_phys(sm_state + 0x7e78, env->ldt.base);
1565 stl_phys(sm_state + 0x7e74, env->ldt.limit);
1566 stw_phys(sm_state + 0x7e72, (env->ldt.flags >> 8) & 0xf0ff);
1568 stq_phys(sm_state + 0x7e88, env->idt.base);
1569 stl_phys(sm_state + 0x7e84, env->idt.limit);
1571 stw_phys(sm_state + 0x7e90, env->tr.selector);
1572 stq_phys(sm_state + 0x7e98, env->tr.base);
1573 stl_phys(sm_state + 0x7e94, env->tr.limit);
1574 stw_phys(sm_state + 0x7e92, (env->tr.flags >> 8) & 0xf0ff);
1576 stq_phys(sm_state + 0x7ed0, env->efer);
1578 stq_phys(sm_state + 0x7ff8, EAX);
1579 stq_phys(sm_state + 0x7ff0, ECX);
1580 stq_phys(sm_state + 0x7fe8, EDX);
1581 stq_phys(sm_state + 0x7fe0, EBX);
1582 stq_phys(sm_state + 0x7fd8, ESP);
1583 stq_phys(sm_state + 0x7fd0, EBP);
1584 stq_phys(sm_state + 0x7fc8, ESI);
1585 stq_phys(sm_state + 0x7fc0, EDI);
1586 for(i = 8; i < 16; i++)
1587 stq_phys(sm_state + 0x7ff8 - i * 8, env->regs[i]);
1588 stq_phys(sm_state + 0x7f78, env->eip);
1589 stl_phys(sm_state + 0x7f70, compute_eflags());
1590 stl_phys(sm_state + 0x7f68, env->dr[6]);
1591 stl_phys(sm_state + 0x7f60, env->dr[7]);
1593 stl_phys(sm_state + 0x7f48, env->cr[4]);
1594 stl_phys(sm_state + 0x7f50, env->cr[3]);
1595 stl_phys(sm_state + 0x7f58, env->cr[0]);
1597 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1598 stl_phys(sm_state + 0x7f00, env->smbase);
1599 #else
1600 stl_phys(sm_state + 0x7ffc, env->cr[0]);
1601 stl_phys(sm_state + 0x7ff8, env->cr[3]);
1602 stl_phys(sm_state + 0x7ff4, compute_eflags());
1603 stl_phys(sm_state + 0x7ff0, env->eip);
1604 stl_phys(sm_state + 0x7fec, EDI);
1605 stl_phys(sm_state + 0x7fe8, ESI);
1606 stl_phys(sm_state + 0x7fe4, EBP);
1607 stl_phys(sm_state + 0x7fe0, ESP);
1608 stl_phys(sm_state + 0x7fdc, EBX);
1609 stl_phys(sm_state + 0x7fd8, EDX);
1610 stl_phys(sm_state + 0x7fd4, ECX);
1611 stl_phys(sm_state + 0x7fd0, EAX);
1612 stl_phys(sm_state + 0x7fcc, env->dr[6]);
1613 stl_phys(sm_state + 0x7fc8, env->dr[7]);
1615 stl_phys(sm_state + 0x7fc4, env->tr.selector);
1616 stl_phys(sm_state + 0x7f64, env->tr.base);
1617 stl_phys(sm_state + 0x7f60, env->tr.limit);
1618 stl_phys(sm_state + 0x7f5c, (env->tr.flags >> 8) & 0xf0ff);
1620 stl_phys(sm_state + 0x7fc0, env->ldt.selector);
1621 stl_phys(sm_state + 0x7f80, env->ldt.base);
1622 stl_phys(sm_state + 0x7f7c, env->ldt.limit);
1623 stl_phys(sm_state + 0x7f78, (env->ldt.flags >> 8) & 0xf0ff);
1625 stl_phys(sm_state + 0x7f74, env->gdt.base);
1626 stl_phys(sm_state + 0x7f70, env->gdt.limit);
1628 stl_phys(sm_state + 0x7f58, env->idt.base);
1629 stl_phys(sm_state + 0x7f54, env->idt.limit);
1631 for(i = 0; i < 6; i++) {
1632 dt = &env->segs[i];
1633 if (i < 3)
1634 offset = 0x7f84 + i * 12;
1635 else
1636 offset = 0x7f2c + (i - 3) * 12;
1637 stl_phys(sm_state + 0x7fa8 + i * 4, dt->selector);
1638 stl_phys(sm_state + offset + 8, dt->base);
1639 stl_phys(sm_state + offset + 4, dt->limit);
1640 stl_phys(sm_state + offset, (dt->flags >> 8) & 0xf0ff);
1642 stl_phys(sm_state + 0x7f14, env->cr[4]);
1644 stl_phys(sm_state + 0x7efc, SMM_REVISION_ID);
1645 stl_phys(sm_state + 0x7ef8, env->smbase);
1646 #endif
1647 /* init SMM cpu state */
1649 #ifdef TARGET_X86_64
1650 cpu_load_efer(env, 0);
1651 #endif
1652 load_eflags(0, ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1653 env->eip = 0x00008000;
1654 cpu_x86_load_seg_cache(env, R_CS, (env->smbase >> 4) & 0xffff, env->smbase,
1655 0xffffffff, 0);
1656 cpu_x86_load_seg_cache(env, R_DS, 0, 0, 0xffffffff, 0);
1657 cpu_x86_load_seg_cache(env, R_ES, 0, 0, 0xffffffff, 0);
1658 cpu_x86_load_seg_cache(env, R_SS, 0, 0, 0xffffffff, 0);
1659 cpu_x86_load_seg_cache(env, R_FS, 0, 0, 0xffffffff, 0);
1660 cpu_x86_load_seg_cache(env, R_GS, 0, 0, 0xffffffff, 0);
1662 cpu_x86_update_cr0(env,
1663 env->cr[0] & ~(CR0_PE_MASK | CR0_EM_MASK | CR0_TS_MASK | CR0_PG_MASK));
1664 cpu_x86_update_cr4(env, 0);
1665 env->dr[7] = 0x00000400;
1666 CC_OP = CC_OP_EFLAGS;
1667 env = saved_env;
1670 void helper_rsm(void)
1672 target_ulong sm_state;
1673 int i, offset;
1674 uint32_t val;
1676 sm_state = env->smbase + 0x8000;
1677 #ifdef TARGET_X86_64
1678 cpu_load_efer(env, ldq_phys(sm_state + 0x7ed0));
1680 for(i = 0; i < 6; i++) {
1681 offset = 0x7e00 + i * 16;
1682 cpu_x86_load_seg_cache(env, i,
1683 lduw_phys(sm_state + offset),
1684 ldq_phys(sm_state + offset + 8),
1685 ldl_phys(sm_state + offset + 4),
1686 (lduw_phys(sm_state + offset + 2) & 0xf0ff) << 8);
1689 env->gdt.base = ldq_phys(sm_state + 0x7e68);
1690 env->gdt.limit = ldl_phys(sm_state + 0x7e64);
1692 env->ldt.selector = lduw_phys(sm_state + 0x7e70);
1693 env->ldt.base = ldq_phys(sm_state + 0x7e78);
1694 env->ldt.limit = ldl_phys(sm_state + 0x7e74);
1695 env->ldt.flags = (lduw_phys(sm_state + 0x7e72) & 0xf0ff) << 8;
1697 env->idt.base = ldq_phys(sm_state + 0x7e88);
1698 env->idt.limit = ldl_phys(sm_state + 0x7e84);
1700 env->tr.selector = lduw_phys(sm_state + 0x7e90);
1701 env->tr.base = ldq_phys(sm_state + 0x7e98);
1702 env->tr.limit = ldl_phys(sm_state + 0x7e94);
1703 env->tr.flags = (lduw_phys(sm_state + 0x7e92) & 0xf0ff) << 8;
1705 EAX = ldq_phys(sm_state + 0x7ff8);
1706 ECX = ldq_phys(sm_state + 0x7ff0);
1707 EDX = ldq_phys(sm_state + 0x7fe8);
1708 EBX = ldq_phys(sm_state + 0x7fe0);
1709 ESP = ldq_phys(sm_state + 0x7fd8);
1710 EBP = ldq_phys(sm_state + 0x7fd0);
1711 ESI = ldq_phys(sm_state + 0x7fc8);
1712 EDI = ldq_phys(sm_state + 0x7fc0);
1713 for(i = 8; i < 16; i++)
1714 env->regs[i] = ldq_phys(sm_state + 0x7ff8 - i * 8);
1715 env->eip = ldq_phys(sm_state + 0x7f78);
1716 load_eflags(ldl_phys(sm_state + 0x7f70),
1717 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1718 env->dr[6] = ldl_phys(sm_state + 0x7f68);
1719 env->dr[7] = ldl_phys(sm_state + 0x7f60);
1721 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f48));
1722 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7f50));
1723 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7f58));
1725 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1726 if (val & 0x20000) {
1727 env->smbase = ldl_phys(sm_state + 0x7f00) & ~0x7fff;
1729 #else
1730 cpu_x86_update_cr0(env, ldl_phys(sm_state + 0x7ffc));
1731 cpu_x86_update_cr3(env, ldl_phys(sm_state + 0x7ff8));
1732 load_eflags(ldl_phys(sm_state + 0x7ff4),
1733 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
1734 env->eip = ldl_phys(sm_state + 0x7ff0);
1735 EDI = ldl_phys(sm_state + 0x7fec);
1736 ESI = ldl_phys(sm_state + 0x7fe8);
1737 EBP = ldl_phys(sm_state + 0x7fe4);
1738 ESP = ldl_phys(sm_state + 0x7fe0);
1739 EBX = ldl_phys(sm_state + 0x7fdc);
1740 EDX = ldl_phys(sm_state + 0x7fd8);
1741 ECX = ldl_phys(sm_state + 0x7fd4);
1742 EAX = ldl_phys(sm_state + 0x7fd0);
1743 env->dr[6] = ldl_phys(sm_state + 0x7fcc);
1744 env->dr[7] = ldl_phys(sm_state + 0x7fc8);
1746 env->tr.selector = ldl_phys(sm_state + 0x7fc4) & 0xffff;
1747 env->tr.base = ldl_phys(sm_state + 0x7f64);
1748 env->tr.limit = ldl_phys(sm_state + 0x7f60);
1749 env->tr.flags = (ldl_phys(sm_state + 0x7f5c) & 0xf0ff) << 8;
1751 env->ldt.selector = ldl_phys(sm_state + 0x7fc0) & 0xffff;
1752 env->ldt.base = ldl_phys(sm_state + 0x7f80);
1753 env->ldt.limit = ldl_phys(sm_state + 0x7f7c);
1754 env->ldt.flags = (ldl_phys(sm_state + 0x7f78) & 0xf0ff) << 8;
1756 env->gdt.base = ldl_phys(sm_state + 0x7f74);
1757 env->gdt.limit = ldl_phys(sm_state + 0x7f70);
1759 env->idt.base = ldl_phys(sm_state + 0x7f58);
1760 env->idt.limit = ldl_phys(sm_state + 0x7f54);
1762 for(i = 0; i < 6; i++) {
1763 if (i < 3)
1764 offset = 0x7f84 + i * 12;
1765 else
1766 offset = 0x7f2c + (i - 3) * 12;
1767 cpu_x86_load_seg_cache(env, i,
1768 ldl_phys(sm_state + 0x7fa8 + i * 4) & 0xffff,
1769 ldl_phys(sm_state + offset + 8),
1770 ldl_phys(sm_state + offset + 4),
1771 (ldl_phys(sm_state + offset) & 0xf0ff) << 8);
1773 cpu_x86_update_cr4(env, ldl_phys(sm_state + 0x7f14));
1775 val = ldl_phys(sm_state + 0x7efc); /* revision ID */
1776 if (val & 0x20000) {
1777 env->smbase = ldl_phys(sm_state + 0x7ef8) & ~0x7fff;
1779 #endif
1780 CC_OP = CC_OP_EFLAGS;
1781 env->hflags &= ~HF_SMM_MASK;
1782 cpu_smm_update(env);
1784 qemu_log_mask(CPU_LOG_INT, "SMM: after RSM\n");
1785 log_cpu_state_mask(CPU_LOG_INT, env, X86_DUMP_CCOP);
1788 #endif /* !CONFIG_USER_ONLY */
1791 /* division, flags are undefined */
1793 void helper_divb_AL(target_ulong t0)
1795 unsigned int num, den, q, r;
1797 num = (EAX & 0xffff);
1798 den = (t0 & 0xff);
1799 if (den == 0) {
1800 raise_exception(EXCP00_DIVZ);
1802 q = (num / den);
1803 if (q > 0xff)
1804 raise_exception(EXCP00_DIVZ);
1805 q &= 0xff;
1806 r = (num % den) & 0xff;
1807 EAX = (EAX & ~0xffff) | (r << 8) | q;
1810 void helper_idivb_AL(target_ulong t0)
1812 int num, den, q, r;
1814 num = (int16_t)EAX;
1815 den = (int8_t)t0;
1816 if (den == 0) {
1817 raise_exception(EXCP00_DIVZ);
1819 q = (num / den);
1820 if (q != (int8_t)q)
1821 raise_exception(EXCP00_DIVZ);
1822 q &= 0xff;
1823 r = (num % den) & 0xff;
1824 EAX = (EAX & ~0xffff) | (r << 8) | q;
1827 void helper_divw_AX(target_ulong t0)
1829 unsigned int num, den, q, r;
1831 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1832 den = (t0 & 0xffff);
1833 if (den == 0) {
1834 raise_exception(EXCP00_DIVZ);
1836 q = (num / den);
1837 if (q > 0xffff)
1838 raise_exception(EXCP00_DIVZ);
1839 q &= 0xffff;
1840 r = (num % den) & 0xffff;
1841 EAX = (EAX & ~0xffff) | q;
1842 EDX = (EDX & ~0xffff) | r;
1845 void helper_idivw_AX(target_ulong t0)
1847 int num, den, q, r;
1849 num = (EAX & 0xffff) | ((EDX & 0xffff) << 16);
1850 den = (int16_t)t0;
1851 if (den == 0) {
1852 raise_exception(EXCP00_DIVZ);
1854 q = (num / den);
1855 if (q != (int16_t)q)
1856 raise_exception(EXCP00_DIVZ);
1857 q &= 0xffff;
1858 r = (num % den) & 0xffff;
1859 EAX = (EAX & ~0xffff) | q;
1860 EDX = (EDX & ~0xffff) | r;
1863 void helper_divl_EAX(target_ulong t0)
1865 unsigned int den, r;
1866 uint64_t num, q;
1868 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1869 den = t0;
1870 if (den == 0) {
1871 raise_exception(EXCP00_DIVZ);
1873 q = (num / den);
1874 r = (num % den);
1875 if (q > 0xffffffff)
1876 raise_exception(EXCP00_DIVZ);
1877 EAX = (uint32_t)q;
1878 EDX = (uint32_t)r;
1881 void helper_idivl_EAX(target_ulong t0)
1883 int den, r;
1884 int64_t num, q;
1886 num = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
1887 den = t0;
1888 if (den == 0) {
1889 raise_exception(EXCP00_DIVZ);
1891 q = (num / den);
1892 r = (num % den);
1893 if (q != (int32_t)q)
1894 raise_exception(EXCP00_DIVZ);
1895 EAX = (uint32_t)q;
1896 EDX = (uint32_t)r;
1899 /* bcd */
1901 /* XXX: exception */
1902 void helper_aam(int base)
1904 int al, ah;
1905 al = EAX & 0xff;
1906 ah = al / base;
1907 al = al % base;
1908 EAX = (EAX & ~0xffff) | al | (ah << 8);
1909 CC_DST = al;
1912 void helper_aad(int base)
1914 int al, ah;
1915 al = EAX & 0xff;
1916 ah = (EAX >> 8) & 0xff;
1917 al = ((ah * base) + al) & 0xff;
1918 EAX = (EAX & ~0xffff) | al;
1919 CC_DST = al;
1922 void helper_aaa(void)
1924 int icarry;
1925 int al, ah, af;
1926 int eflags;
1928 eflags = helper_cc_compute_all(CC_OP);
1929 af = eflags & CC_A;
1930 al = EAX & 0xff;
1931 ah = (EAX >> 8) & 0xff;
1933 icarry = (al > 0xf9);
1934 if (((al & 0x0f) > 9 ) || af) {
1935 al = (al + 6) & 0x0f;
1936 ah = (ah + 1 + icarry) & 0xff;
1937 eflags |= CC_C | CC_A;
1938 } else {
1939 eflags &= ~(CC_C | CC_A);
1940 al &= 0x0f;
1942 EAX = (EAX & ~0xffff) | al | (ah << 8);
1943 CC_SRC = eflags;
1946 void helper_aas(void)
1948 int icarry;
1949 int al, ah, af;
1950 int eflags;
1952 eflags = helper_cc_compute_all(CC_OP);
1953 af = eflags & CC_A;
1954 al = EAX & 0xff;
1955 ah = (EAX >> 8) & 0xff;
1957 icarry = (al < 6);
1958 if (((al & 0x0f) > 9 ) || af) {
1959 al = (al - 6) & 0x0f;
1960 ah = (ah - 1 - icarry) & 0xff;
1961 eflags |= CC_C | CC_A;
1962 } else {
1963 eflags &= ~(CC_C | CC_A);
1964 al &= 0x0f;
1966 EAX = (EAX & ~0xffff) | al | (ah << 8);
1967 CC_SRC = eflags;
1970 void helper_daa(void)
1972 int old_al, al, af, cf;
1973 int eflags;
1975 eflags = helper_cc_compute_all(CC_OP);
1976 cf = eflags & CC_C;
1977 af = eflags & CC_A;
1978 old_al = al = EAX & 0xff;
1980 eflags = 0;
1981 if (((al & 0x0f) > 9 ) || af) {
1982 al = (al + 6) & 0xff;
1983 eflags |= CC_A;
1985 if ((old_al > 0x99) || cf) {
1986 al = (al + 0x60) & 0xff;
1987 eflags |= CC_C;
1989 EAX = (EAX & ~0xff) | al;
1990 /* well, speed is not an issue here, so we compute the flags by hand */
1991 eflags |= (al == 0) << 6; /* zf */
1992 eflags |= parity_table[al]; /* pf */
1993 eflags |= (al & 0x80); /* sf */
1994 CC_SRC = eflags;
1997 void helper_das(void)
1999 int al, al1, af, cf;
2000 int eflags;
2002 eflags = helper_cc_compute_all(CC_OP);
2003 cf = eflags & CC_C;
2004 af = eflags & CC_A;
2005 al = EAX & 0xff;
2007 eflags = 0;
2008 al1 = al;
2009 if (((al & 0x0f) > 9 ) || af) {
2010 eflags |= CC_A;
2011 if (al < 6 || cf)
2012 eflags |= CC_C;
2013 al = (al - 6) & 0xff;
2015 if ((al1 > 0x99) || cf) {
2016 al = (al - 0x60) & 0xff;
2017 eflags |= CC_C;
2019 EAX = (EAX & ~0xff) | al;
2020 /* well, speed is not an issue here, so we compute the flags by hand */
2021 eflags |= (al == 0) << 6; /* zf */
2022 eflags |= parity_table[al]; /* pf */
2023 eflags |= (al & 0x80); /* sf */
2024 CC_SRC = eflags;
2027 void helper_into(int next_eip_addend)
2029 int eflags;
2030 eflags = helper_cc_compute_all(CC_OP);
2031 if (eflags & CC_O) {
2032 raise_interrupt(EXCP04_INTO, 1, 0, next_eip_addend);
2036 void helper_cmpxchg8b(target_ulong a0)
2038 uint64_t d;
2039 int eflags;
2041 eflags = helper_cc_compute_all(CC_OP);
2042 d = ldq(a0);
2043 if (d == (((uint64_t)EDX << 32) | (uint32_t)EAX)) {
2044 stq(a0, ((uint64_t)ECX << 32) | (uint32_t)EBX);
2045 eflags |= CC_Z;
2046 } else {
2047 /* always do the store */
2048 stq(a0, d);
2049 EDX = (uint32_t)(d >> 32);
2050 EAX = (uint32_t)d;
2051 eflags &= ~CC_Z;
2053 CC_SRC = eflags;
2056 #ifdef TARGET_X86_64
2057 void helper_cmpxchg16b(target_ulong a0)
2059 uint64_t d0, d1;
2060 int eflags;
2062 if ((a0 & 0xf) != 0)
2063 raise_exception(EXCP0D_GPF);
2064 eflags = helper_cc_compute_all(CC_OP);
2065 d0 = ldq(a0);
2066 d1 = ldq(a0 + 8);
2067 if (d0 == EAX && d1 == EDX) {
2068 stq(a0, EBX);
2069 stq(a0 + 8, ECX);
2070 eflags |= CC_Z;
2071 } else {
2072 /* always do the store */
2073 stq(a0, d0);
2074 stq(a0 + 8, d1);
2075 EDX = d1;
2076 EAX = d0;
2077 eflags &= ~CC_Z;
2079 CC_SRC = eflags;
2081 #endif
2083 void helper_single_step(void)
2085 #ifndef CONFIG_USER_ONLY
2086 check_hw_breakpoints(env, 1);
2087 env->dr[6] |= DR6_BS;
2088 #endif
2089 raise_exception(EXCP01_DB);
2092 void helper_cpuid(void)
2094 uint32_t eax, ebx, ecx, edx;
2096 helper_svm_check_intercept_param(SVM_EXIT_CPUID, 0);
2098 cpu_x86_cpuid(env, (uint32_t)EAX, (uint32_t)ECX, &eax, &ebx, &ecx, &edx);
2099 EAX = eax;
2100 EBX = ebx;
2101 ECX = ecx;
2102 EDX = edx;
2105 void helper_enter_level(int level, int data32, target_ulong t1)
2107 target_ulong ssp;
2108 uint32_t esp_mask, esp, ebp;
2110 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2111 ssp = env->segs[R_SS].base;
2112 ebp = EBP;
2113 esp = ESP;
2114 if (data32) {
2115 /* 32 bit */
2116 esp -= 4;
2117 while (--level) {
2118 esp -= 4;
2119 ebp -= 4;
2120 stl(ssp + (esp & esp_mask), ldl(ssp + (ebp & esp_mask)));
2122 esp -= 4;
2123 stl(ssp + (esp & esp_mask), t1);
2124 } else {
2125 /* 16 bit */
2126 esp -= 2;
2127 while (--level) {
2128 esp -= 2;
2129 ebp -= 2;
2130 stw(ssp + (esp & esp_mask), lduw(ssp + (ebp & esp_mask)));
2132 esp -= 2;
2133 stw(ssp + (esp & esp_mask), t1);
2137 #ifdef TARGET_X86_64
2138 void helper_enter64_level(int level, int data64, target_ulong t1)
2140 target_ulong esp, ebp;
2141 ebp = EBP;
2142 esp = ESP;
2144 if (data64) {
2145 /* 64 bit */
2146 esp -= 8;
2147 while (--level) {
2148 esp -= 8;
2149 ebp -= 8;
2150 stq(esp, ldq(ebp));
2152 esp -= 8;
2153 stq(esp, t1);
2154 } else {
2155 /* 16 bit */
2156 esp -= 2;
2157 while (--level) {
2158 esp -= 2;
2159 ebp -= 2;
2160 stw(esp, lduw(ebp));
2162 esp -= 2;
2163 stw(esp, t1);
2166 #endif
2168 void helper_lldt(int selector)
2170 SegmentCache *dt;
2171 uint32_t e1, e2;
2172 int index, entry_limit;
2173 target_ulong ptr;
2175 selector &= 0xffff;
2176 if ((selector & 0xfffc) == 0) {
2177 /* XXX: NULL selector case: invalid LDT */
2178 env->ldt.base = 0;
2179 env->ldt.limit = 0;
2180 } else {
2181 if (selector & 0x4)
2182 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2183 dt = &env->gdt;
2184 index = selector & ~7;
2185 #ifdef TARGET_X86_64
2186 if (env->hflags & HF_LMA_MASK)
2187 entry_limit = 15;
2188 else
2189 #endif
2190 entry_limit = 7;
2191 if ((index + entry_limit) > dt->limit)
2192 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2193 ptr = dt->base + index;
2194 e1 = ldl_kernel(ptr);
2195 e2 = ldl_kernel(ptr + 4);
2196 if ((e2 & DESC_S_MASK) || ((e2 >> DESC_TYPE_SHIFT) & 0xf) != 2)
2197 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2198 if (!(e2 & DESC_P_MASK))
2199 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2200 #ifdef TARGET_X86_64
2201 if (env->hflags & HF_LMA_MASK) {
2202 uint32_t e3;
2203 e3 = ldl_kernel(ptr + 8);
2204 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2205 env->ldt.base |= (target_ulong)e3 << 32;
2206 } else
2207 #endif
2209 load_seg_cache_raw_dt(&env->ldt, e1, e2);
2212 env->ldt.selector = selector;
2215 void helper_ltr(int selector)
2217 SegmentCache *dt;
2218 uint32_t e1, e2;
2219 int index, type, entry_limit;
2220 target_ulong ptr;
2222 selector &= 0xffff;
2223 if ((selector & 0xfffc) == 0) {
2224 /* NULL selector case: invalid TR */
2225 env->tr.base = 0;
2226 env->tr.limit = 0;
2227 env->tr.flags = 0;
2228 } else {
2229 if (selector & 0x4)
2230 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2231 dt = &env->gdt;
2232 index = selector & ~7;
2233 #ifdef TARGET_X86_64
2234 if (env->hflags & HF_LMA_MASK)
2235 entry_limit = 15;
2236 else
2237 #endif
2238 entry_limit = 7;
2239 if ((index + entry_limit) > dt->limit)
2240 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2241 ptr = dt->base + index;
2242 e1 = ldl_kernel(ptr);
2243 e2 = ldl_kernel(ptr + 4);
2244 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2245 if ((e2 & DESC_S_MASK) ||
2246 (type != 1 && type != 9))
2247 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2248 if (!(e2 & DESC_P_MASK))
2249 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2250 #ifdef TARGET_X86_64
2251 if (env->hflags & HF_LMA_MASK) {
2252 uint32_t e3, e4;
2253 e3 = ldl_kernel(ptr + 8);
2254 e4 = ldl_kernel(ptr + 12);
2255 if ((e4 >> DESC_TYPE_SHIFT) & 0xf)
2256 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2257 load_seg_cache_raw_dt(&env->tr, e1, e2);
2258 env->tr.base |= (target_ulong)e3 << 32;
2259 } else
2260 #endif
2262 load_seg_cache_raw_dt(&env->tr, e1, e2);
2264 e2 |= DESC_TSS_BUSY_MASK;
2265 stl_kernel(ptr + 4, e2);
2267 env->tr.selector = selector;
2270 /* only works if protected mode and not VM86. seg_reg must be != R_CS */
2271 void helper_load_seg(int seg_reg, int selector)
2273 uint32_t e1, e2;
2274 int cpl, dpl, rpl;
2275 SegmentCache *dt;
2276 int index;
2277 target_ulong ptr;
2279 selector &= 0xffff;
2280 cpl = env->hflags & HF_CPL_MASK;
2281 if ((selector & 0xfffc) == 0) {
2282 /* null selector case */
2283 if (seg_reg == R_SS
2284 #ifdef TARGET_X86_64
2285 && (!(env->hflags & HF_CS64_MASK) || cpl == 3)
2286 #endif
2288 raise_exception_err(EXCP0D_GPF, 0);
2289 cpu_x86_load_seg_cache(env, seg_reg, selector, 0, 0, 0);
2290 } else {
2292 if (selector & 0x4)
2293 dt = &env->ldt;
2294 else
2295 dt = &env->gdt;
2296 index = selector & ~7;
2297 if ((index + 7) > dt->limit)
2298 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2299 ptr = dt->base + index;
2300 e1 = ldl_kernel(ptr);
2301 e2 = ldl_kernel(ptr + 4);
2303 if (!(e2 & DESC_S_MASK))
2304 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2305 rpl = selector & 3;
2306 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2307 if (seg_reg == R_SS) {
2308 /* must be writable segment */
2309 if ((e2 & DESC_CS_MASK) || !(e2 & DESC_W_MASK))
2310 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2311 if (rpl != cpl || dpl != cpl)
2312 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2313 } else {
2314 /* must be readable segment */
2315 if ((e2 & (DESC_CS_MASK | DESC_R_MASK)) == DESC_CS_MASK)
2316 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2318 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2319 /* if not conforming code, test rights */
2320 if (dpl < cpl || dpl < rpl)
2321 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2325 if (!(e2 & DESC_P_MASK)) {
2326 if (seg_reg == R_SS)
2327 raise_exception_err(EXCP0C_STACK, selector & 0xfffc);
2328 else
2329 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2332 /* set the access bit if not already set */
2333 if (!(e2 & DESC_A_MASK)) {
2334 e2 |= DESC_A_MASK;
2335 stl_kernel(ptr + 4, e2);
2338 cpu_x86_load_seg_cache(env, seg_reg, selector,
2339 get_seg_base(e1, e2),
2340 get_seg_limit(e1, e2),
2341 e2);
2342 #if 0
2343 qemu_log("load_seg: sel=0x%04x base=0x%08lx limit=0x%08lx flags=%08x\n",
2344 selector, (unsigned long)sc->base, sc->limit, sc->flags);
2345 #endif
2349 /* protected mode jump */
2350 void helper_ljmp_protected(int new_cs, target_ulong new_eip,
2351 int next_eip_addend)
2353 int gate_cs, type;
2354 uint32_t e1, e2, cpl, dpl, rpl, limit;
2355 target_ulong next_eip;
2357 if ((new_cs & 0xfffc) == 0)
2358 raise_exception_err(EXCP0D_GPF, 0);
2359 if (load_segment(&e1, &e2, new_cs) != 0)
2360 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2361 cpl = env->hflags & HF_CPL_MASK;
2362 if (e2 & DESC_S_MASK) {
2363 if (!(e2 & DESC_CS_MASK))
2364 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2365 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2366 if (e2 & DESC_C_MASK) {
2367 /* conforming code segment */
2368 if (dpl > cpl)
2369 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2370 } else {
2371 /* non conforming code segment */
2372 rpl = new_cs & 3;
2373 if (rpl > cpl)
2374 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2375 if (dpl != cpl)
2376 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2378 if (!(e2 & DESC_P_MASK))
2379 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2380 limit = get_seg_limit(e1, e2);
2381 if (new_eip > limit &&
2382 !(env->hflags & HF_LMA_MASK) && !(e2 & DESC_L_MASK))
2383 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2384 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2385 get_seg_base(e1, e2), limit, e2);
2386 EIP = new_eip;
2387 } else {
2388 /* jump to call or task gate */
2389 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2390 rpl = new_cs & 3;
2391 cpl = env->hflags & HF_CPL_MASK;
2392 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
2393 switch(type) {
2394 case 1: /* 286 TSS */
2395 case 9: /* 386 TSS */
2396 case 5: /* task gate */
2397 if (dpl < cpl || dpl < rpl)
2398 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2399 next_eip = env->eip + next_eip_addend;
2400 switch_tss(new_cs, e1, e2, SWITCH_TSS_JMP, next_eip);
2401 CC_OP = CC_OP_EFLAGS;
2402 break;
2403 case 4: /* 286 call gate */
2404 case 12: /* 386 call gate */
2405 if ((dpl < cpl) || (dpl < rpl))
2406 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2407 if (!(e2 & DESC_P_MASK))
2408 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2409 gate_cs = e1 >> 16;
2410 new_eip = (e1 & 0xffff);
2411 if (type == 12)
2412 new_eip |= (e2 & 0xffff0000);
2413 if (load_segment(&e1, &e2, gate_cs) != 0)
2414 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2415 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2416 /* must be code segment */
2417 if (((e2 & (DESC_S_MASK | DESC_CS_MASK)) !=
2418 (DESC_S_MASK | DESC_CS_MASK)))
2419 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2420 if (((e2 & DESC_C_MASK) && (dpl > cpl)) ||
2421 (!(e2 & DESC_C_MASK) && (dpl != cpl)))
2422 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2423 if (!(e2 & DESC_P_MASK))
2424 raise_exception_err(EXCP0D_GPF, gate_cs & 0xfffc);
2425 limit = get_seg_limit(e1, e2);
2426 if (new_eip > limit)
2427 raise_exception_err(EXCP0D_GPF, 0);
2428 cpu_x86_load_seg_cache(env, R_CS, (gate_cs & 0xfffc) | cpl,
2429 get_seg_base(e1, e2), limit, e2);
2430 EIP = new_eip;
2431 break;
2432 default:
2433 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2434 break;
2439 /* real mode call */
2440 void helper_lcall_real(int new_cs, target_ulong new_eip1,
2441 int shift, int next_eip)
2443 int new_eip;
2444 uint32_t esp, esp_mask;
2445 target_ulong ssp;
2447 new_eip = new_eip1;
2448 esp = ESP;
2449 esp_mask = get_sp_mask(env->segs[R_SS].flags);
2450 ssp = env->segs[R_SS].base;
2451 if (shift) {
2452 PUSHL(ssp, esp, esp_mask, env->segs[R_CS].selector);
2453 PUSHL(ssp, esp, esp_mask, next_eip);
2454 } else {
2455 PUSHW(ssp, esp, esp_mask, env->segs[R_CS].selector);
2456 PUSHW(ssp, esp, esp_mask, next_eip);
2459 SET_ESP(esp, esp_mask);
2460 env->eip = new_eip;
2461 env->segs[R_CS].selector = new_cs;
2462 env->segs[R_CS].base = (new_cs << 4);
2465 /* protected mode call */
2466 void helper_lcall_protected(int new_cs, target_ulong new_eip,
2467 int shift, int next_eip_addend)
2469 int new_stack, i;
2470 uint32_t e1, e2, cpl, dpl, rpl, selector, offset, param_count;
2471 uint32_t ss = 0, ss_e1 = 0, ss_e2 = 0, sp, type, ss_dpl, sp_mask;
2472 uint32_t val, limit, old_sp_mask;
2473 target_ulong ssp, old_ssp, next_eip;
2475 next_eip = env->eip + next_eip_addend;
2476 LOG_PCALL("lcall %04x:%08x s=%d\n", new_cs, (uint32_t)new_eip, shift);
2477 LOG_PCALL_STATE(env);
2478 if ((new_cs & 0xfffc) == 0)
2479 raise_exception_err(EXCP0D_GPF, 0);
2480 if (load_segment(&e1, &e2, new_cs) != 0)
2481 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2482 cpl = env->hflags & HF_CPL_MASK;
2483 LOG_PCALL("desc=%08x:%08x\n", e1, e2);
2484 if (e2 & DESC_S_MASK) {
2485 if (!(e2 & DESC_CS_MASK))
2486 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2487 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2488 if (e2 & DESC_C_MASK) {
2489 /* conforming code segment */
2490 if (dpl > cpl)
2491 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2492 } else {
2493 /* non conforming code segment */
2494 rpl = new_cs & 3;
2495 if (rpl > cpl)
2496 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2497 if (dpl != cpl)
2498 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2500 if (!(e2 & DESC_P_MASK))
2501 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2503 #ifdef TARGET_X86_64
2504 /* XXX: check 16/32 bit cases in long mode */
2505 if (shift == 2) {
2506 target_ulong rsp;
2507 /* 64 bit case */
2508 rsp = ESP;
2509 PUSHQ(rsp, env->segs[R_CS].selector);
2510 PUSHQ(rsp, next_eip);
2511 /* from this point, not restartable */
2512 ESP = rsp;
2513 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2514 get_seg_base(e1, e2),
2515 get_seg_limit(e1, e2), e2);
2516 EIP = new_eip;
2517 } else
2518 #endif
2520 sp = ESP;
2521 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2522 ssp = env->segs[R_SS].base;
2523 if (shift) {
2524 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2525 PUSHL(ssp, sp, sp_mask, next_eip);
2526 } else {
2527 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2528 PUSHW(ssp, sp, sp_mask, next_eip);
2531 limit = get_seg_limit(e1, e2);
2532 if (new_eip > limit)
2533 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2534 /* from this point, not restartable */
2535 SET_ESP(sp, sp_mask);
2536 cpu_x86_load_seg_cache(env, R_CS, (new_cs & 0xfffc) | cpl,
2537 get_seg_base(e1, e2), limit, e2);
2538 EIP = new_eip;
2540 } else {
2541 /* check gate type */
2542 type = (e2 >> DESC_TYPE_SHIFT) & 0x1f;
2543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2544 rpl = new_cs & 3;
2545 switch(type) {
2546 case 1: /* available 286 TSS */
2547 case 9: /* available 386 TSS */
2548 case 5: /* task gate */
2549 if (dpl < cpl || dpl < rpl)
2550 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2551 switch_tss(new_cs, e1, e2, SWITCH_TSS_CALL, next_eip);
2552 CC_OP = CC_OP_EFLAGS;
2553 return;
2554 case 4: /* 286 call gate */
2555 case 12: /* 386 call gate */
2556 break;
2557 default:
2558 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2559 break;
2561 shift = type >> 3;
2563 if (dpl < cpl || dpl < rpl)
2564 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2565 /* check valid bit */
2566 if (!(e2 & DESC_P_MASK))
2567 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2568 selector = e1 >> 16;
2569 offset = (e2 & 0xffff0000) | (e1 & 0x0000ffff);
2570 param_count = e2 & 0x1f;
2571 if ((selector & 0xfffc) == 0)
2572 raise_exception_err(EXCP0D_GPF, 0);
2574 if (load_segment(&e1, &e2, selector) != 0)
2575 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2576 if (!(e2 & DESC_S_MASK) || !(e2 & (DESC_CS_MASK)))
2577 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2578 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2579 if (dpl > cpl)
2580 raise_exception_err(EXCP0D_GPF, selector & 0xfffc);
2581 if (!(e2 & DESC_P_MASK))
2582 raise_exception_err(EXCP0B_NOSEG, selector & 0xfffc);
2584 if (!(e2 & DESC_C_MASK) && dpl < cpl) {
2585 /* to inner privilege */
2586 get_ss_esp_from_tss(&ss, &sp, dpl);
2587 LOG_PCALL("new ss:esp=%04x:%08x param_count=%d ESP=" TARGET_FMT_lx "\n",
2588 ss, sp, param_count, ESP);
2589 if ((ss & 0xfffc) == 0)
2590 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2591 if ((ss & 3) != dpl)
2592 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2593 if (load_segment(&ss_e1, &ss_e2, ss) != 0)
2594 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2595 ss_dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2596 if (ss_dpl != dpl)
2597 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2598 if (!(ss_e2 & DESC_S_MASK) ||
2599 (ss_e2 & DESC_CS_MASK) ||
2600 !(ss_e2 & DESC_W_MASK))
2601 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2602 if (!(ss_e2 & DESC_P_MASK))
2603 raise_exception_err(EXCP0A_TSS, ss & 0xfffc);
2605 // push_size = ((param_count * 2) + 8) << shift;
2607 old_sp_mask = get_sp_mask(env->segs[R_SS].flags);
2608 old_ssp = env->segs[R_SS].base;
2610 sp_mask = get_sp_mask(ss_e2);
2611 ssp = get_seg_base(ss_e1, ss_e2);
2612 if (shift) {
2613 PUSHL(ssp, sp, sp_mask, env->segs[R_SS].selector);
2614 PUSHL(ssp, sp, sp_mask, ESP);
2615 for(i = param_count - 1; i >= 0; i--) {
2616 val = ldl_kernel(old_ssp + ((ESP + i * 4) & old_sp_mask));
2617 PUSHL(ssp, sp, sp_mask, val);
2619 } else {
2620 PUSHW(ssp, sp, sp_mask, env->segs[R_SS].selector);
2621 PUSHW(ssp, sp, sp_mask, ESP);
2622 for(i = param_count - 1; i >= 0; i--) {
2623 val = lduw_kernel(old_ssp + ((ESP + i * 2) & old_sp_mask));
2624 PUSHW(ssp, sp, sp_mask, val);
2627 new_stack = 1;
2628 } else {
2629 /* to same privilege */
2630 sp = ESP;
2631 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2632 ssp = env->segs[R_SS].base;
2633 // push_size = (4 << shift);
2634 new_stack = 0;
2637 if (shift) {
2638 PUSHL(ssp, sp, sp_mask, env->segs[R_CS].selector);
2639 PUSHL(ssp, sp, sp_mask, next_eip);
2640 } else {
2641 PUSHW(ssp, sp, sp_mask, env->segs[R_CS].selector);
2642 PUSHW(ssp, sp, sp_mask, next_eip);
2645 /* from this point, not restartable */
2647 if (new_stack) {
2648 ss = (ss & ~3) | dpl;
2649 cpu_x86_load_seg_cache(env, R_SS, ss,
2650 ssp,
2651 get_seg_limit(ss_e1, ss_e2),
2652 ss_e2);
2655 selector = (selector & ~3) | dpl;
2656 cpu_x86_load_seg_cache(env, R_CS, selector,
2657 get_seg_base(e1, e2),
2658 get_seg_limit(e1, e2),
2659 e2);
2660 cpu_x86_set_cpl(env, dpl);
2661 SET_ESP(sp, sp_mask);
2662 EIP = offset;
2666 /* real and vm86 mode iret */
2667 void helper_iret_real(int shift)
2669 uint32_t sp, new_cs, new_eip, new_eflags, sp_mask;
2670 target_ulong ssp;
2671 int eflags_mask;
2673 sp_mask = 0xffff; /* XXXX: use SS segment size ? */
2674 sp = ESP;
2675 ssp = env->segs[R_SS].base;
2676 if (shift == 1) {
2677 /* 32 bits */
2678 POPL(ssp, sp, sp_mask, new_eip);
2679 POPL(ssp, sp, sp_mask, new_cs);
2680 new_cs &= 0xffff;
2681 POPL(ssp, sp, sp_mask, new_eflags);
2682 } else {
2683 /* 16 bits */
2684 POPW(ssp, sp, sp_mask, new_eip);
2685 POPW(ssp, sp, sp_mask, new_cs);
2686 POPW(ssp, sp, sp_mask, new_eflags);
2688 ESP = (ESP & ~sp_mask) | (sp & sp_mask);
2689 env->segs[R_CS].selector = new_cs;
2690 env->segs[R_CS].base = (new_cs << 4);
2691 env->eip = new_eip;
2692 if (env->eflags & VM_MASK)
2693 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | RF_MASK | NT_MASK;
2694 else
2695 eflags_mask = TF_MASK | AC_MASK | ID_MASK | IF_MASK | IOPL_MASK | RF_MASK | NT_MASK;
2696 if (shift == 0)
2697 eflags_mask &= 0xffff;
2698 load_eflags(new_eflags, eflags_mask);
2699 env->hflags2 &= ~HF2_NMI_MASK;
2702 static inline void validate_seg(int seg_reg, int cpl)
2704 int dpl;
2705 uint32_t e2;
2707 /* XXX: on x86_64, we do not want to nullify FS and GS because
2708 they may still contain a valid base. I would be interested to
2709 know how a real x86_64 CPU behaves */
2710 if ((seg_reg == R_FS || seg_reg == R_GS) &&
2711 (env->segs[seg_reg].selector & 0xfffc) == 0)
2712 return;
2714 e2 = env->segs[seg_reg].flags;
2715 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2716 if (!(e2 & DESC_CS_MASK) || !(e2 & DESC_C_MASK)) {
2717 /* data or non conforming code segment */
2718 if (dpl < cpl) {
2719 cpu_x86_load_seg_cache(env, seg_reg, 0, 0, 0, 0);
2724 /* protected mode iret */
2725 static inline void helper_ret_protected(int shift, int is_iret, int addend)
2727 uint32_t new_cs, new_eflags, new_ss;
2728 uint32_t new_es, new_ds, new_fs, new_gs;
2729 uint32_t e1, e2, ss_e1, ss_e2;
2730 int cpl, dpl, rpl, eflags_mask, iopl;
2731 target_ulong ssp, sp, new_eip, new_esp, sp_mask;
2733 #ifdef TARGET_X86_64
2734 if (shift == 2)
2735 sp_mask = -1;
2736 else
2737 #endif
2738 sp_mask = get_sp_mask(env->segs[R_SS].flags);
2739 sp = ESP;
2740 ssp = env->segs[R_SS].base;
2741 new_eflags = 0; /* avoid warning */
2742 #ifdef TARGET_X86_64
2743 if (shift == 2) {
2744 POPQ(sp, new_eip);
2745 POPQ(sp, new_cs);
2746 new_cs &= 0xffff;
2747 if (is_iret) {
2748 POPQ(sp, new_eflags);
2750 } else
2751 #endif
2752 if (shift == 1) {
2753 /* 32 bits */
2754 POPL(ssp, sp, sp_mask, new_eip);
2755 POPL(ssp, sp, sp_mask, new_cs);
2756 new_cs &= 0xffff;
2757 if (is_iret) {
2758 POPL(ssp, sp, sp_mask, new_eflags);
2759 if (new_eflags & VM_MASK)
2760 goto return_to_vm86;
2762 } else {
2763 /* 16 bits */
2764 POPW(ssp, sp, sp_mask, new_eip);
2765 POPW(ssp, sp, sp_mask, new_cs);
2766 if (is_iret)
2767 POPW(ssp, sp, sp_mask, new_eflags);
2769 LOG_PCALL("lret new %04x:" TARGET_FMT_lx " s=%d addend=0x%x\n",
2770 new_cs, new_eip, shift, addend);
2771 LOG_PCALL_STATE(env);
2772 if ((new_cs & 0xfffc) == 0)
2773 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2774 if (load_segment(&e1, &e2, new_cs) != 0)
2775 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2776 if (!(e2 & DESC_S_MASK) ||
2777 !(e2 & DESC_CS_MASK))
2778 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2779 cpl = env->hflags & HF_CPL_MASK;
2780 rpl = new_cs & 3;
2781 if (rpl < cpl)
2782 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2783 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
2784 if (e2 & DESC_C_MASK) {
2785 if (dpl > rpl)
2786 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2787 } else {
2788 if (dpl != rpl)
2789 raise_exception_err(EXCP0D_GPF, new_cs & 0xfffc);
2791 if (!(e2 & DESC_P_MASK))
2792 raise_exception_err(EXCP0B_NOSEG, new_cs & 0xfffc);
2794 sp += addend;
2795 if (rpl == cpl && (!(env->hflags & HF_CS64_MASK) ||
2796 ((env->hflags & HF_CS64_MASK) && !is_iret))) {
2797 /* return to same privilege level */
2798 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2799 get_seg_base(e1, e2),
2800 get_seg_limit(e1, e2),
2801 e2);
2802 } else {
2803 /* return to different privilege level */
2804 #ifdef TARGET_X86_64
2805 if (shift == 2) {
2806 POPQ(sp, new_esp);
2807 POPQ(sp, new_ss);
2808 new_ss &= 0xffff;
2809 } else
2810 #endif
2811 if (shift == 1) {
2812 /* 32 bits */
2813 POPL(ssp, sp, sp_mask, new_esp);
2814 POPL(ssp, sp, sp_mask, new_ss);
2815 new_ss &= 0xffff;
2816 } else {
2817 /* 16 bits */
2818 POPW(ssp, sp, sp_mask, new_esp);
2819 POPW(ssp, sp, sp_mask, new_ss);
2821 LOG_PCALL("new ss:esp=%04x:" TARGET_FMT_lx "\n",
2822 new_ss, new_esp);
2823 if ((new_ss & 0xfffc) == 0) {
2824 #ifdef TARGET_X86_64
2825 /* NULL ss is allowed in long mode if cpl != 3*/
2826 /* XXX: test CS64 ? */
2827 if ((env->hflags & HF_LMA_MASK) && rpl != 3) {
2828 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2829 0, 0xffffffff,
2830 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2831 DESC_S_MASK | (rpl << DESC_DPL_SHIFT) |
2832 DESC_W_MASK | DESC_A_MASK);
2833 ss_e2 = DESC_B_MASK; /* XXX: should not be needed ? */
2834 } else
2835 #endif
2837 raise_exception_err(EXCP0D_GPF, 0);
2839 } else {
2840 if ((new_ss & 3) != rpl)
2841 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2842 if (load_segment(&ss_e1, &ss_e2, new_ss) != 0)
2843 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2844 if (!(ss_e2 & DESC_S_MASK) ||
2845 (ss_e2 & DESC_CS_MASK) ||
2846 !(ss_e2 & DESC_W_MASK))
2847 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2848 dpl = (ss_e2 >> DESC_DPL_SHIFT) & 3;
2849 if (dpl != rpl)
2850 raise_exception_err(EXCP0D_GPF, new_ss & 0xfffc);
2851 if (!(ss_e2 & DESC_P_MASK))
2852 raise_exception_err(EXCP0B_NOSEG, new_ss & 0xfffc);
2853 cpu_x86_load_seg_cache(env, R_SS, new_ss,
2854 get_seg_base(ss_e1, ss_e2),
2855 get_seg_limit(ss_e1, ss_e2),
2856 ss_e2);
2859 cpu_x86_load_seg_cache(env, R_CS, new_cs,
2860 get_seg_base(e1, e2),
2861 get_seg_limit(e1, e2),
2862 e2);
2863 cpu_x86_set_cpl(env, rpl);
2864 sp = new_esp;
2865 #ifdef TARGET_X86_64
2866 if (env->hflags & HF_CS64_MASK)
2867 sp_mask = -1;
2868 else
2869 #endif
2870 sp_mask = get_sp_mask(ss_e2);
2872 /* validate data segments */
2873 validate_seg(R_ES, rpl);
2874 validate_seg(R_DS, rpl);
2875 validate_seg(R_FS, rpl);
2876 validate_seg(R_GS, rpl);
2878 sp += addend;
2880 SET_ESP(sp, sp_mask);
2881 env->eip = new_eip;
2882 if (is_iret) {
2883 /* NOTE: 'cpl' is the _old_ CPL */
2884 eflags_mask = TF_MASK | AC_MASK | ID_MASK | RF_MASK | NT_MASK;
2885 if (cpl == 0)
2886 eflags_mask |= IOPL_MASK;
2887 iopl = (env->eflags >> IOPL_SHIFT) & 3;
2888 if (cpl <= iopl)
2889 eflags_mask |= IF_MASK;
2890 if (shift == 0)
2891 eflags_mask &= 0xffff;
2892 load_eflags(new_eflags, eflags_mask);
2894 return;
2896 return_to_vm86:
2897 POPL(ssp, sp, sp_mask, new_esp);
2898 POPL(ssp, sp, sp_mask, new_ss);
2899 POPL(ssp, sp, sp_mask, new_es);
2900 POPL(ssp, sp, sp_mask, new_ds);
2901 POPL(ssp, sp, sp_mask, new_fs);
2902 POPL(ssp, sp, sp_mask, new_gs);
2904 /* modify processor state */
2905 load_eflags(new_eflags, TF_MASK | AC_MASK | ID_MASK |
2906 IF_MASK | IOPL_MASK | VM_MASK | NT_MASK | VIF_MASK | VIP_MASK);
2907 load_seg_vm(R_CS, new_cs & 0xffff);
2908 cpu_x86_set_cpl(env, 3);
2909 load_seg_vm(R_SS, new_ss & 0xffff);
2910 load_seg_vm(R_ES, new_es & 0xffff);
2911 load_seg_vm(R_DS, new_ds & 0xffff);
2912 load_seg_vm(R_FS, new_fs & 0xffff);
2913 load_seg_vm(R_GS, new_gs & 0xffff);
2915 env->eip = new_eip & 0xffff;
2916 ESP = new_esp;
2919 void helper_iret_protected(int shift, int next_eip)
2921 int tss_selector, type;
2922 uint32_t e1, e2;
2924 /* specific case for TSS */
2925 if (env->eflags & NT_MASK) {
2926 #ifdef TARGET_X86_64
2927 if (env->hflags & HF_LMA_MASK)
2928 raise_exception_err(EXCP0D_GPF, 0);
2929 #endif
2930 tss_selector = lduw_kernel(env->tr.base + 0);
2931 if (tss_selector & 4)
2932 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2933 if (load_segment(&e1, &e2, tss_selector) != 0)
2934 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2935 type = (e2 >> DESC_TYPE_SHIFT) & 0x17;
2936 /* NOTE: we check both segment and busy TSS */
2937 if (type != 3)
2938 raise_exception_err(EXCP0A_TSS, tss_selector & 0xfffc);
2939 switch_tss(tss_selector, e1, e2, SWITCH_TSS_IRET, next_eip);
2940 } else {
2941 helper_ret_protected(shift, 1, 0);
2943 env->hflags2 &= ~HF2_NMI_MASK;
2946 void helper_lret_protected(int shift, int addend)
2948 helper_ret_protected(shift, 0, addend);
2951 void helper_sysenter(void)
2953 if (env->sysenter_cs == 0) {
2954 raise_exception_err(EXCP0D_GPF, 0);
2956 env->eflags &= ~(VM_MASK | IF_MASK | RF_MASK);
2957 cpu_x86_set_cpl(env, 0);
2959 #ifdef TARGET_X86_64
2960 if (env->hflags & HF_LMA_MASK) {
2961 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2962 0, 0xffffffff,
2963 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2964 DESC_S_MASK |
2965 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
2966 } else
2967 #endif
2969 cpu_x86_load_seg_cache(env, R_CS, env->sysenter_cs & 0xfffc,
2970 0, 0xffffffff,
2971 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2972 DESC_S_MASK |
2973 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
2975 cpu_x86_load_seg_cache(env, R_SS, (env->sysenter_cs + 8) & 0xfffc,
2976 0, 0xffffffff,
2977 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2978 DESC_S_MASK |
2979 DESC_W_MASK | DESC_A_MASK);
2980 ESP = env->sysenter_esp;
2981 EIP = env->sysenter_eip;
2984 void helper_sysexit(int dflag)
2986 int cpl;
2988 cpl = env->hflags & HF_CPL_MASK;
2989 if (env->sysenter_cs == 0 || cpl != 0) {
2990 raise_exception_err(EXCP0D_GPF, 0);
2992 cpu_x86_set_cpl(env, 3);
2993 #ifdef TARGET_X86_64
2994 if (dflag == 2) {
2995 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 32) & 0xfffc) | 3,
2996 0, 0xffffffff,
2997 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
2998 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
2999 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK | DESC_L_MASK);
3000 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 40) & 0xfffc) | 3,
3001 0, 0xffffffff,
3002 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3003 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3004 DESC_W_MASK | DESC_A_MASK);
3005 } else
3006 #endif
3008 cpu_x86_load_seg_cache(env, R_CS, ((env->sysenter_cs + 16) & 0xfffc) | 3,
3009 0, 0xffffffff,
3010 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3011 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3012 DESC_CS_MASK | DESC_R_MASK | DESC_A_MASK);
3013 cpu_x86_load_seg_cache(env, R_SS, ((env->sysenter_cs + 24) & 0xfffc) | 3,
3014 0, 0xffffffff,
3015 DESC_G_MASK | DESC_B_MASK | DESC_P_MASK |
3016 DESC_S_MASK | (3 << DESC_DPL_SHIFT) |
3017 DESC_W_MASK | DESC_A_MASK);
3019 ESP = ECX;
3020 EIP = EDX;
3023 #if defined(CONFIG_USER_ONLY)
3024 target_ulong helper_read_crN(int reg)
3026 return 0;
3029 void helper_write_crN(int reg, target_ulong t0)
3033 void helper_movl_drN_T0(int reg, target_ulong t0)
3036 #else
3037 target_ulong helper_read_crN(int reg)
3039 target_ulong val;
3041 helper_svm_check_intercept_param(SVM_EXIT_READ_CR0 + reg, 0);
3042 switch(reg) {
3043 default:
3044 val = env->cr[reg];
3045 break;
3046 case 8:
3047 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3048 val = cpu_get_apic_tpr(env->apic_state);
3049 } else {
3050 val = env->v_tpr;
3052 break;
3054 return val;
3057 void helper_write_crN(int reg, target_ulong t0)
3059 helper_svm_check_intercept_param(SVM_EXIT_WRITE_CR0 + reg, 0);
3060 switch(reg) {
3061 case 0:
3062 cpu_x86_update_cr0(env, t0);
3063 break;
3064 case 3:
3065 cpu_x86_update_cr3(env, t0);
3066 break;
3067 case 4:
3068 cpu_x86_update_cr4(env, t0);
3069 break;
3070 case 8:
3071 if (!(env->hflags2 & HF2_VINTR_MASK)) {
3072 cpu_set_apic_tpr(env->apic_state, t0);
3074 env->v_tpr = t0 & 0x0f;
3075 break;
3076 default:
3077 env->cr[reg] = t0;
3078 break;
3082 void helper_movl_drN_T0(int reg, target_ulong t0)
3084 int i;
3086 if (reg < 4) {
3087 hw_breakpoint_remove(env, reg);
3088 env->dr[reg] = t0;
3089 hw_breakpoint_insert(env, reg);
3090 } else if (reg == 7) {
3091 for (i = 0; i < 4; i++)
3092 hw_breakpoint_remove(env, i);
3093 env->dr[7] = t0;
3094 for (i = 0; i < 4; i++)
3095 hw_breakpoint_insert(env, i);
3096 } else
3097 env->dr[reg] = t0;
3099 #endif
3101 void helper_lmsw(target_ulong t0)
3103 /* only 4 lower bits of CR0 are modified. PE cannot be set to zero
3104 if already set to one. */
3105 t0 = (env->cr[0] & ~0xe) | (t0 & 0xf);
3106 helper_write_crN(0, t0);
3109 void helper_clts(void)
3111 env->cr[0] &= ~CR0_TS_MASK;
3112 env->hflags &= ~HF_TS_MASK;
3115 void helper_invlpg(target_ulong addr)
3117 helper_svm_check_intercept_param(SVM_EXIT_INVLPG, 0);
3118 tlb_flush_page(env, addr);
3121 void helper_rdtsc(void)
3123 uint64_t val;
3125 if ((env->cr[4] & CR4_TSD_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3126 raise_exception(EXCP0D_GPF);
3128 helper_svm_check_intercept_param(SVM_EXIT_RDTSC, 0);
3130 val = cpu_get_tsc(env) + env->tsc_offset;
3131 EAX = (uint32_t)(val);
3132 EDX = (uint32_t)(val >> 32);
3135 void helper_rdtscp(void)
3137 helper_rdtsc();
3138 ECX = (uint32_t)(env->tsc_aux);
3141 void helper_rdpmc(void)
3143 if ((env->cr[4] & CR4_PCE_MASK) && ((env->hflags & HF_CPL_MASK) != 0)) {
3144 raise_exception(EXCP0D_GPF);
3146 helper_svm_check_intercept_param(SVM_EXIT_RDPMC, 0);
3148 /* currently unimplemented */
3149 qemu_log_mask(LOG_UNIMP, "x86: unimplemented rdpmc\n");
3150 raise_exception_err(EXCP06_ILLOP, 0);
3153 #if defined(CONFIG_USER_ONLY)
3154 void helper_wrmsr(void)
3158 void helper_rdmsr(void)
3161 #else
3162 void helper_wrmsr(void)
3164 uint64_t val;
3166 helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
3168 val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
3170 switch((uint32_t)ECX) {
3171 case MSR_IA32_SYSENTER_CS:
3172 env->sysenter_cs = val & 0xffff;
3173 break;
3174 case MSR_IA32_SYSENTER_ESP:
3175 env->sysenter_esp = val;
3176 break;
3177 case MSR_IA32_SYSENTER_EIP:
3178 env->sysenter_eip = val;
3179 break;
3180 case MSR_IA32_APICBASE:
3181 cpu_set_apic_base(env->apic_state, val);
3182 break;
3183 case MSR_EFER:
3185 uint64_t update_mask;
3186 update_mask = 0;
3187 if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
3188 update_mask |= MSR_EFER_SCE;
3189 if (env->cpuid_ext2_features & CPUID_EXT2_LM)
3190 update_mask |= MSR_EFER_LME;
3191 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3192 update_mask |= MSR_EFER_FFXSR;
3193 if (env->cpuid_ext2_features & CPUID_EXT2_NX)
3194 update_mask |= MSR_EFER_NXE;
3195 if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
3196 update_mask |= MSR_EFER_SVME;
3197 if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
3198 update_mask |= MSR_EFER_FFXSR;
3199 cpu_load_efer(env, (env->efer & ~update_mask) |
3200 (val & update_mask));
3202 break;
3203 case MSR_STAR:
3204 env->star = val;
3205 break;
3206 case MSR_PAT:
3207 env->pat = val;
3208 break;
3209 case MSR_VM_HSAVE_PA:
3210 env->vm_hsave = val;
3211 break;
3212 #ifdef TARGET_X86_64
3213 case MSR_LSTAR:
3214 env->lstar = val;
3215 break;
3216 case MSR_CSTAR:
3217 env->cstar = val;
3218 break;
3219 case MSR_FMASK:
3220 env->fmask = val;
3221 break;
3222 case MSR_FSBASE:
3223 env->segs[R_FS].base = val;
3224 break;
3225 case MSR_GSBASE:
3226 env->segs[R_GS].base = val;
3227 break;
3228 case MSR_KERNELGSBASE:
3229 env->kernelgsbase = val;
3230 break;
3231 #endif
3232 case MSR_MTRRphysBase(0):
3233 case MSR_MTRRphysBase(1):
3234 case MSR_MTRRphysBase(2):
3235 case MSR_MTRRphysBase(3):
3236 case MSR_MTRRphysBase(4):
3237 case MSR_MTRRphysBase(5):
3238 case MSR_MTRRphysBase(6):
3239 case MSR_MTRRphysBase(7):
3240 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base = val;
3241 break;
3242 case MSR_MTRRphysMask(0):
3243 case MSR_MTRRphysMask(1):
3244 case MSR_MTRRphysMask(2):
3245 case MSR_MTRRphysMask(3):
3246 case MSR_MTRRphysMask(4):
3247 case MSR_MTRRphysMask(5):
3248 case MSR_MTRRphysMask(6):
3249 case MSR_MTRRphysMask(7):
3250 env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask = val;
3251 break;
3252 case MSR_MTRRfix64K_00000:
3253 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix64K_00000] = val;
3254 break;
3255 case MSR_MTRRfix16K_80000:
3256 case MSR_MTRRfix16K_A0000:
3257 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1] = val;
3258 break;
3259 case MSR_MTRRfix4K_C0000:
3260 case MSR_MTRRfix4K_C8000:
3261 case MSR_MTRRfix4K_D0000:
3262 case MSR_MTRRfix4K_D8000:
3263 case MSR_MTRRfix4K_E0000:
3264 case MSR_MTRRfix4K_E8000:
3265 case MSR_MTRRfix4K_F0000:
3266 case MSR_MTRRfix4K_F8000:
3267 env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3] = val;
3268 break;
3269 case MSR_MTRRdefType:
3270 env->mtrr_deftype = val;
3271 break;
3272 case MSR_MCG_STATUS:
3273 env->mcg_status = val;
3274 break;
3275 case MSR_MCG_CTL:
3276 if ((env->mcg_cap & MCG_CTL_P)
3277 && (val == 0 || val == ~(uint64_t)0))
3278 env->mcg_ctl = val;
3279 break;
3280 case MSR_TSC_AUX:
3281 env->tsc_aux = val;
3282 break;
3283 case MSR_IA32_MISC_ENABLE:
3284 env->msr_ia32_misc_enable = val;
3285 break;
3286 default:
3287 if ((uint32_t)ECX >= MSR_MC0_CTL
3288 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3289 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3290 if ((offset & 0x3) != 0
3291 || (val == 0 || val == ~(uint64_t)0))
3292 env->mce_banks[offset] = val;
3293 break;
3295 /* XXX: exception ? */
3296 break;
3300 void helper_rdmsr(void)
3302 uint64_t val;
3304 helper_svm_check_intercept_param(SVM_EXIT_MSR, 0);
3306 switch((uint32_t)ECX) {
3307 case MSR_IA32_SYSENTER_CS:
3308 val = env->sysenter_cs;
3309 break;
3310 case MSR_IA32_SYSENTER_ESP:
3311 val = env->sysenter_esp;
3312 break;
3313 case MSR_IA32_SYSENTER_EIP:
3314 val = env->sysenter_eip;
3315 break;
3316 case MSR_IA32_APICBASE:
3317 val = cpu_get_apic_base(env->apic_state);
3318 break;
3319 case MSR_EFER:
3320 val = env->efer;
3321 break;
3322 case MSR_STAR:
3323 val = env->star;
3324 break;
3325 case MSR_PAT:
3326 val = env->pat;
3327 break;
3328 case MSR_VM_HSAVE_PA:
3329 val = env->vm_hsave;
3330 break;
3331 case MSR_IA32_PERF_STATUS:
3332 /* tsc_increment_by_tick */
3333 val = 1000ULL;
3334 /* CPU multiplier */
3335 val |= (((uint64_t)4ULL) << 40);
3336 break;
3337 #ifdef TARGET_X86_64
3338 case MSR_LSTAR:
3339 val = env->lstar;
3340 break;
3341 case MSR_CSTAR:
3342 val = env->cstar;
3343 break;
3344 case MSR_FMASK:
3345 val = env->fmask;
3346 break;
3347 case MSR_FSBASE:
3348 val = env->segs[R_FS].base;
3349 break;
3350 case MSR_GSBASE:
3351 val = env->segs[R_GS].base;
3352 break;
3353 case MSR_KERNELGSBASE:
3354 val = env->kernelgsbase;
3355 break;
3356 case MSR_TSC_AUX:
3357 val = env->tsc_aux;
3358 break;
3359 #endif
3360 case MSR_MTRRphysBase(0):
3361 case MSR_MTRRphysBase(1):
3362 case MSR_MTRRphysBase(2):
3363 case MSR_MTRRphysBase(3):
3364 case MSR_MTRRphysBase(4):
3365 case MSR_MTRRphysBase(5):
3366 case MSR_MTRRphysBase(6):
3367 case MSR_MTRRphysBase(7):
3368 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysBase(0)) / 2].base;
3369 break;
3370 case MSR_MTRRphysMask(0):
3371 case MSR_MTRRphysMask(1):
3372 case MSR_MTRRphysMask(2):
3373 case MSR_MTRRphysMask(3):
3374 case MSR_MTRRphysMask(4):
3375 case MSR_MTRRphysMask(5):
3376 case MSR_MTRRphysMask(6):
3377 case MSR_MTRRphysMask(7):
3378 val = env->mtrr_var[((uint32_t)ECX - MSR_MTRRphysMask(0)) / 2].mask;
3379 break;
3380 case MSR_MTRRfix64K_00000:
3381 val = env->mtrr_fixed[0];
3382 break;
3383 case MSR_MTRRfix16K_80000:
3384 case MSR_MTRRfix16K_A0000:
3385 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix16K_80000 + 1];
3386 break;
3387 case MSR_MTRRfix4K_C0000:
3388 case MSR_MTRRfix4K_C8000:
3389 case MSR_MTRRfix4K_D0000:
3390 case MSR_MTRRfix4K_D8000:
3391 case MSR_MTRRfix4K_E0000:
3392 case MSR_MTRRfix4K_E8000:
3393 case MSR_MTRRfix4K_F0000:
3394 case MSR_MTRRfix4K_F8000:
3395 val = env->mtrr_fixed[(uint32_t)ECX - MSR_MTRRfix4K_C0000 + 3];
3396 break;
3397 case MSR_MTRRdefType:
3398 val = env->mtrr_deftype;
3399 break;
3400 case MSR_MTRRcap:
3401 if (env->cpuid_features & CPUID_MTRR)
3402 val = MSR_MTRRcap_VCNT | MSR_MTRRcap_FIXRANGE_SUPPORT | MSR_MTRRcap_WC_SUPPORTED;
3403 else
3404 /* XXX: exception ? */
3405 val = 0;
3406 break;
3407 case MSR_MCG_CAP:
3408 val = env->mcg_cap;
3409 break;
3410 case MSR_MCG_CTL:
3411 if (env->mcg_cap & MCG_CTL_P)
3412 val = env->mcg_ctl;
3413 else
3414 val = 0;
3415 break;
3416 case MSR_MCG_STATUS:
3417 val = env->mcg_status;
3418 break;
3419 case MSR_IA32_MISC_ENABLE:
3420 val = env->msr_ia32_misc_enable;
3421 break;
3422 default:
3423 if ((uint32_t)ECX >= MSR_MC0_CTL
3424 && (uint32_t)ECX < MSR_MC0_CTL + (4 * env->mcg_cap & 0xff)) {
3425 uint32_t offset = (uint32_t)ECX - MSR_MC0_CTL;
3426 val = env->mce_banks[offset];
3427 break;
3429 /* XXX: exception ? */
3430 val = 0;
3431 break;
3433 EAX = (uint32_t)(val);
3434 EDX = (uint32_t)(val >> 32);
3436 #endif
3438 target_ulong helper_lsl(target_ulong selector1)
3440 unsigned int limit;
3441 uint32_t e1, e2, eflags, selector;
3442 int rpl, dpl, cpl, type;
3444 selector = selector1 & 0xffff;
3445 eflags = helper_cc_compute_all(CC_OP);
3446 if ((selector & 0xfffc) == 0)
3447 goto fail;
3448 if (load_segment(&e1, &e2, selector) != 0)
3449 goto fail;
3450 rpl = selector & 3;
3451 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3452 cpl = env->hflags & HF_CPL_MASK;
3453 if (e2 & DESC_S_MASK) {
3454 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3455 /* conforming */
3456 } else {
3457 if (dpl < cpl || dpl < rpl)
3458 goto fail;
3460 } else {
3461 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3462 switch(type) {
3463 case 1:
3464 case 2:
3465 case 3:
3466 case 9:
3467 case 11:
3468 break;
3469 default:
3470 goto fail;
3472 if (dpl < cpl || dpl < rpl) {
3473 fail:
3474 CC_SRC = eflags & ~CC_Z;
3475 return 0;
3478 limit = get_seg_limit(e1, e2);
3479 CC_SRC = eflags | CC_Z;
3480 return limit;
3483 target_ulong helper_lar(target_ulong selector1)
3485 uint32_t e1, e2, eflags, selector;
3486 int rpl, dpl, cpl, type;
3488 selector = selector1 & 0xffff;
3489 eflags = helper_cc_compute_all(CC_OP);
3490 if ((selector & 0xfffc) == 0)
3491 goto fail;
3492 if (load_segment(&e1, &e2, selector) != 0)
3493 goto fail;
3494 rpl = selector & 3;
3495 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3496 cpl = env->hflags & HF_CPL_MASK;
3497 if (e2 & DESC_S_MASK) {
3498 if ((e2 & DESC_CS_MASK) && (e2 & DESC_C_MASK)) {
3499 /* conforming */
3500 } else {
3501 if (dpl < cpl || dpl < rpl)
3502 goto fail;
3504 } else {
3505 type = (e2 >> DESC_TYPE_SHIFT) & 0xf;
3506 switch(type) {
3507 case 1:
3508 case 2:
3509 case 3:
3510 case 4:
3511 case 5:
3512 case 9:
3513 case 11:
3514 case 12:
3515 break;
3516 default:
3517 goto fail;
3519 if (dpl < cpl || dpl < rpl) {
3520 fail:
3521 CC_SRC = eflags & ~CC_Z;
3522 return 0;
3525 CC_SRC = eflags | CC_Z;
3526 return e2 & 0x00f0ff00;
3529 void helper_verr(target_ulong selector1)
3531 uint32_t e1, e2, eflags, selector;
3532 int rpl, dpl, cpl;
3534 selector = selector1 & 0xffff;
3535 eflags = helper_cc_compute_all(CC_OP);
3536 if ((selector & 0xfffc) == 0)
3537 goto fail;
3538 if (load_segment(&e1, &e2, selector) != 0)
3539 goto fail;
3540 if (!(e2 & DESC_S_MASK))
3541 goto fail;
3542 rpl = selector & 3;
3543 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3544 cpl = env->hflags & HF_CPL_MASK;
3545 if (e2 & DESC_CS_MASK) {
3546 if (!(e2 & DESC_R_MASK))
3547 goto fail;
3548 if (!(e2 & DESC_C_MASK)) {
3549 if (dpl < cpl || dpl < rpl)
3550 goto fail;
3552 } else {
3553 if (dpl < cpl || dpl < rpl) {
3554 fail:
3555 CC_SRC = eflags & ~CC_Z;
3556 return;
3559 CC_SRC = eflags | CC_Z;
3562 void helper_verw(target_ulong selector1)
3564 uint32_t e1, e2, eflags, selector;
3565 int rpl, dpl, cpl;
3567 selector = selector1 & 0xffff;
3568 eflags = helper_cc_compute_all(CC_OP);
3569 if ((selector & 0xfffc) == 0)
3570 goto fail;
3571 if (load_segment(&e1, &e2, selector) != 0)
3572 goto fail;
3573 if (!(e2 & DESC_S_MASK))
3574 goto fail;
3575 rpl = selector & 3;
3576 dpl = (e2 >> DESC_DPL_SHIFT) & 3;
3577 cpl = env->hflags & HF_CPL_MASK;
3578 if (e2 & DESC_CS_MASK) {
3579 goto fail;
3580 } else {
3581 if (dpl < cpl || dpl < rpl)
3582 goto fail;
3583 if (!(e2 & DESC_W_MASK)) {
3584 fail:
3585 CC_SRC = eflags & ~CC_Z;
3586 return;
3589 CC_SRC = eflags | CC_Z;
3592 /* x87 FPU helpers */
3594 static inline double floatx80_to_double(floatx80 a)
3596 union {
3597 float64 f64;
3598 double d;
3599 } u;
3601 u.f64 = floatx80_to_float64(a, &env->fp_status);
3602 return u.d;
3605 static inline floatx80 double_to_floatx80(double a)
3607 union {
3608 float64 f64;
3609 double d;
3610 } u;
3612 u.d = a;
3613 return float64_to_floatx80(u.f64, &env->fp_status);
3616 static void fpu_set_exception(int mask)
3618 env->fpus |= mask;
3619 if (env->fpus & (~env->fpuc & FPUC_EM))
3620 env->fpus |= FPUS_SE | FPUS_B;
3623 static inline floatx80 helper_fdiv(floatx80 a, floatx80 b)
3625 if (floatx80_is_zero(b)) {
3626 fpu_set_exception(FPUS_ZE);
3628 return floatx80_div(a, b, &env->fp_status);
3631 static void fpu_raise_exception(void)
3633 if (env->cr[0] & CR0_NE_MASK) {
3634 raise_exception(EXCP10_COPR);
3636 #if !defined(CONFIG_USER_ONLY)
3637 else {
3638 cpu_set_ferr(env);
3640 #endif
3643 void helper_flds_FT0(uint32_t val)
3645 union {
3646 float32 f;
3647 uint32_t i;
3648 } u;
3649 u.i = val;
3650 FT0 = float32_to_floatx80(u.f, &env->fp_status);
3653 void helper_fldl_FT0(uint64_t val)
3655 union {
3656 float64 f;
3657 uint64_t i;
3658 } u;
3659 u.i = val;
3660 FT0 = float64_to_floatx80(u.f, &env->fp_status);
3663 void helper_fildl_FT0(int32_t val)
3665 FT0 = int32_to_floatx80(val, &env->fp_status);
3668 void helper_flds_ST0(uint32_t val)
3670 int new_fpstt;
3671 union {
3672 float32 f;
3673 uint32_t i;
3674 } u;
3675 new_fpstt = (env->fpstt - 1) & 7;
3676 u.i = val;
3677 env->fpregs[new_fpstt].d = float32_to_floatx80(u.f, &env->fp_status);
3678 env->fpstt = new_fpstt;
3679 env->fptags[new_fpstt] = 0; /* validate stack entry */
3682 void helper_fldl_ST0(uint64_t val)
3684 int new_fpstt;
3685 union {
3686 float64 f;
3687 uint64_t i;
3688 } u;
3689 new_fpstt = (env->fpstt - 1) & 7;
3690 u.i = val;
3691 env->fpregs[new_fpstt].d = float64_to_floatx80(u.f, &env->fp_status);
3692 env->fpstt = new_fpstt;
3693 env->fptags[new_fpstt] = 0; /* validate stack entry */
3696 void helper_fildl_ST0(int32_t val)
3698 int new_fpstt;
3699 new_fpstt = (env->fpstt - 1) & 7;
3700 env->fpregs[new_fpstt].d = int32_to_floatx80(val, &env->fp_status);
3701 env->fpstt = new_fpstt;
3702 env->fptags[new_fpstt] = 0; /* validate stack entry */
3705 void helper_fildll_ST0(int64_t val)
3707 int new_fpstt;
3708 new_fpstt = (env->fpstt - 1) & 7;
3709 env->fpregs[new_fpstt].d = int64_to_floatx80(val, &env->fp_status);
3710 env->fpstt = new_fpstt;
3711 env->fptags[new_fpstt] = 0; /* validate stack entry */
3714 uint32_t helper_fsts_ST0(void)
3716 union {
3717 float32 f;
3718 uint32_t i;
3719 } u;
3720 u.f = floatx80_to_float32(ST0, &env->fp_status);
3721 return u.i;
3724 uint64_t helper_fstl_ST0(void)
3726 union {
3727 float64 f;
3728 uint64_t i;
3729 } u;
3730 u.f = floatx80_to_float64(ST0, &env->fp_status);
3731 return u.i;
3734 int32_t helper_fist_ST0(void)
3736 int32_t val;
3737 val = floatx80_to_int32(ST0, &env->fp_status);
3738 if (val != (int16_t)val)
3739 val = -32768;
3740 return val;
3743 int32_t helper_fistl_ST0(void)
3745 int32_t val;
3746 val = floatx80_to_int32(ST0, &env->fp_status);
3747 return val;
3750 int64_t helper_fistll_ST0(void)
3752 int64_t val;
3753 val = floatx80_to_int64(ST0, &env->fp_status);
3754 return val;
3757 int32_t helper_fistt_ST0(void)
3759 int32_t val;
3760 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3761 if (val != (int16_t)val)
3762 val = -32768;
3763 return val;
3766 int32_t helper_fisttl_ST0(void)
3768 int32_t val;
3769 val = floatx80_to_int32_round_to_zero(ST0, &env->fp_status);
3770 return val;
3773 int64_t helper_fisttll_ST0(void)
3775 int64_t val;
3776 val = floatx80_to_int64_round_to_zero(ST0, &env->fp_status);
3777 return val;
3780 void helper_fldt_ST0(target_ulong ptr)
3782 int new_fpstt;
3783 new_fpstt = (env->fpstt - 1) & 7;
3784 env->fpregs[new_fpstt].d = helper_fldt(ptr);
3785 env->fpstt = new_fpstt;
3786 env->fptags[new_fpstt] = 0; /* validate stack entry */
3789 void helper_fstt_ST0(target_ulong ptr)
3791 helper_fstt(ST0, ptr);
3794 void helper_fpush(void)
3796 fpush();
3799 void helper_fpop(void)
3801 fpop();
3804 void helper_fdecstp(void)
3806 env->fpstt = (env->fpstt - 1) & 7;
3807 env->fpus &= (~0x4700);
3810 void helper_fincstp(void)
3812 env->fpstt = (env->fpstt + 1) & 7;
3813 env->fpus &= (~0x4700);
3816 /* FPU move */
3818 void helper_ffree_STN(int st_index)
3820 env->fptags[(env->fpstt + st_index) & 7] = 1;
3823 void helper_fmov_ST0_FT0(void)
3825 ST0 = FT0;
3828 void helper_fmov_FT0_STN(int st_index)
3830 FT0 = ST(st_index);
3833 void helper_fmov_ST0_STN(int st_index)
3835 ST0 = ST(st_index);
3838 void helper_fmov_STN_ST0(int st_index)
3840 ST(st_index) = ST0;
3843 void helper_fxchg_ST0_STN(int st_index)
3845 floatx80 tmp;
3846 tmp = ST(st_index);
3847 ST(st_index) = ST0;
3848 ST0 = tmp;
3851 /* FPU operations */
3853 static const int fcom_ccval[4] = {0x0100, 0x4000, 0x0000, 0x4500};
3855 void helper_fcom_ST0_FT0(void)
3857 int ret;
3859 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3860 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret + 1];
3863 void helper_fucom_ST0_FT0(void)
3865 int ret;
3867 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3868 env->fpus = (env->fpus & ~0x4500) | fcom_ccval[ret+ 1];
3871 static const int fcomi_ccval[4] = {CC_C, CC_Z, 0, CC_Z | CC_P | CC_C};
3873 void helper_fcomi_ST0_FT0(void)
3875 int eflags;
3876 int ret;
3878 ret = floatx80_compare(ST0, FT0, &env->fp_status);
3879 eflags = helper_cc_compute_all(CC_OP);
3880 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3881 CC_SRC = eflags;
3884 void helper_fucomi_ST0_FT0(void)
3886 int eflags;
3887 int ret;
3889 ret = floatx80_compare_quiet(ST0, FT0, &env->fp_status);
3890 eflags = helper_cc_compute_all(CC_OP);
3891 eflags = (eflags & ~(CC_Z | CC_P | CC_C)) | fcomi_ccval[ret + 1];
3892 CC_SRC = eflags;
3895 void helper_fadd_ST0_FT0(void)
3897 ST0 = floatx80_add(ST0, FT0, &env->fp_status);
3900 void helper_fmul_ST0_FT0(void)
3902 ST0 = floatx80_mul(ST0, FT0, &env->fp_status);
3905 void helper_fsub_ST0_FT0(void)
3907 ST0 = floatx80_sub(ST0, FT0, &env->fp_status);
3910 void helper_fsubr_ST0_FT0(void)
3912 ST0 = floatx80_sub(FT0, ST0, &env->fp_status);
3915 void helper_fdiv_ST0_FT0(void)
3917 ST0 = helper_fdiv(ST0, FT0);
3920 void helper_fdivr_ST0_FT0(void)
3922 ST0 = helper_fdiv(FT0, ST0);
3925 /* fp operations between STN and ST0 */
3927 void helper_fadd_STN_ST0(int st_index)
3929 ST(st_index) = floatx80_add(ST(st_index), ST0, &env->fp_status);
3932 void helper_fmul_STN_ST0(int st_index)
3934 ST(st_index) = floatx80_mul(ST(st_index), ST0, &env->fp_status);
3937 void helper_fsub_STN_ST0(int st_index)
3939 ST(st_index) = floatx80_sub(ST(st_index), ST0, &env->fp_status);
3942 void helper_fsubr_STN_ST0(int st_index)
3944 ST(st_index) = floatx80_sub(ST0, ST(st_index), &env->fp_status);
3947 void helper_fdiv_STN_ST0(int st_index)
3949 floatx80 *p;
3950 p = &ST(st_index);
3951 *p = helper_fdiv(*p, ST0);
3954 void helper_fdivr_STN_ST0(int st_index)
3956 floatx80 *p;
3957 p = &ST(st_index);
3958 *p = helper_fdiv(ST0, *p);
3961 /* misc FPU operations */
3962 void helper_fchs_ST0(void)
3964 ST0 = floatx80_chs(ST0);
3967 void helper_fabs_ST0(void)
3969 ST0 = floatx80_abs(ST0);
3972 void helper_fld1_ST0(void)
3974 ST0 = floatx80_one;
3977 void helper_fldl2t_ST0(void)
3979 ST0 = floatx80_l2t;
3982 void helper_fldl2e_ST0(void)
3984 ST0 = floatx80_l2e;
3987 void helper_fldpi_ST0(void)
3989 ST0 = floatx80_pi;
3992 void helper_fldlg2_ST0(void)
3994 ST0 = floatx80_lg2;
3997 void helper_fldln2_ST0(void)
3999 ST0 = floatx80_ln2;
4002 void helper_fldz_ST0(void)
4004 ST0 = floatx80_zero;
4007 void helper_fldz_FT0(void)
4009 FT0 = floatx80_zero;
4012 uint32_t helper_fnstsw(void)
4014 return (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4017 uint32_t helper_fnstcw(void)
4019 return env->fpuc;
4022 static void update_fp_status(void)
4024 int rnd_type;
4026 /* set rounding mode */
4027 switch(env->fpuc & FPU_RC_MASK) {
4028 default:
4029 case FPU_RC_NEAR:
4030 rnd_type = float_round_nearest_even;
4031 break;
4032 case FPU_RC_DOWN:
4033 rnd_type = float_round_down;
4034 break;
4035 case FPU_RC_UP:
4036 rnd_type = float_round_up;
4037 break;
4038 case FPU_RC_CHOP:
4039 rnd_type = float_round_to_zero;
4040 break;
4042 set_float_rounding_mode(rnd_type, &env->fp_status);
4043 switch((env->fpuc >> 8) & 3) {
4044 case 0:
4045 rnd_type = 32;
4046 break;
4047 case 2:
4048 rnd_type = 64;
4049 break;
4050 case 3:
4051 default:
4052 rnd_type = 80;
4053 break;
4055 set_floatx80_rounding_precision(rnd_type, &env->fp_status);
4058 void helper_fldcw(uint32_t val)
4060 env->fpuc = val;
4061 update_fp_status();
4064 void helper_fclex(void)
4066 env->fpus &= 0x7f00;
4069 void helper_fwait(void)
4071 if (env->fpus & FPUS_SE)
4072 fpu_raise_exception();
4075 void helper_fninit(void)
4077 env->fpus = 0;
4078 env->fpstt = 0;
4079 env->fpuc = 0x37f;
4080 env->fptags[0] = 1;
4081 env->fptags[1] = 1;
4082 env->fptags[2] = 1;
4083 env->fptags[3] = 1;
4084 env->fptags[4] = 1;
4085 env->fptags[5] = 1;
4086 env->fptags[6] = 1;
4087 env->fptags[7] = 1;
4090 /* BCD ops */
4092 void helper_fbld_ST0(target_ulong ptr)
4094 floatx80 tmp;
4095 uint64_t val;
4096 unsigned int v;
4097 int i;
4099 val = 0;
4100 for(i = 8; i >= 0; i--) {
4101 v = ldub(ptr + i);
4102 val = (val * 100) + ((v >> 4) * 10) + (v & 0xf);
4104 tmp = int64_to_floatx80(val, &env->fp_status);
4105 if (ldub(ptr + 9) & 0x80) {
4106 floatx80_chs(tmp);
4108 fpush();
4109 ST0 = tmp;
4112 void helper_fbst_ST0(target_ulong ptr)
4114 int v;
4115 target_ulong mem_ref, mem_end;
4116 int64_t val;
4118 val = floatx80_to_int64(ST0, &env->fp_status);
4119 mem_ref = ptr;
4120 mem_end = mem_ref + 9;
4121 if (val < 0) {
4122 stb(mem_end, 0x80);
4123 val = -val;
4124 } else {
4125 stb(mem_end, 0x00);
4127 while (mem_ref < mem_end) {
4128 if (val == 0)
4129 break;
4130 v = val % 100;
4131 val = val / 100;
4132 v = ((v / 10) << 4) | (v % 10);
4133 stb(mem_ref++, v);
4135 while (mem_ref < mem_end) {
4136 stb(mem_ref++, 0);
4140 void helper_f2xm1(void)
4142 double val = floatx80_to_double(ST0);
4143 val = pow(2.0, val) - 1.0;
4144 ST0 = double_to_floatx80(val);
4147 void helper_fyl2x(void)
4149 double fptemp = floatx80_to_double(ST0);
4151 if (fptemp>0.0){
4152 fptemp = log(fptemp)/log(2.0); /* log2(ST) */
4153 fptemp *= floatx80_to_double(ST1);
4154 ST1 = double_to_floatx80(fptemp);
4155 fpop();
4156 } else {
4157 env->fpus &= (~0x4700);
4158 env->fpus |= 0x400;
4162 void helper_fptan(void)
4164 double fptemp = floatx80_to_double(ST0);
4166 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4167 env->fpus |= 0x400;
4168 } else {
4169 fptemp = tan(fptemp);
4170 ST0 = double_to_floatx80(fptemp);
4171 fpush();
4172 ST0 = floatx80_one;
4173 env->fpus &= (~0x400); /* C2 <-- 0 */
4174 /* the above code is for |arg| < 2**52 only */
4178 void helper_fpatan(void)
4180 double fptemp, fpsrcop;
4182 fpsrcop = floatx80_to_double(ST1);
4183 fptemp = floatx80_to_double(ST0);
4184 ST1 = double_to_floatx80(atan2(fpsrcop, fptemp));
4185 fpop();
4188 void helper_fxtract(void)
4190 CPU_LDoubleU temp;
4192 temp.d = ST0;
4194 if (floatx80_is_zero(ST0)) {
4195 /* Easy way to generate -inf and raising division by 0 exception */
4196 ST0 = floatx80_div(floatx80_chs(floatx80_one), floatx80_zero, &env->fp_status);
4197 fpush();
4198 ST0 = temp.d;
4199 } else {
4200 int expdif;
4202 expdif = EXPD(temp) - EXPBIAS;
4203 /*DP exponent bias*/
4204 ST0 = int32_to_floatx80(expdif, &env->fp_status);
4205 fpush();
4206 BIASEXPONENT(temp);
4207 ST0 = temp.d;
4211 void helper_fprem1(void)
4213 double st0, st1, dblq, fpsrcop, fptemp;
4214 CPU_LDoubleU fpsrcop1, fptemp1;
4215 int expdif;
4216 signed long long int q;
4218 st0 = floatx80_to_double(ST0);
4219 st1 = floatx80_to_double(ST1);
4221 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4222 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4223 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4224 return;
4227 fpsrcop = st0;
4228 fptemp = st1;
4229 fpsrcop1.d = ST0;
4230 fptemp1.d = ST1;
4231 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4233 if (expdif < 0) {
4234 /* optimisation? taken from the AMD docs */
4235 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4236 /* ST0 is unchanged */
4237 return;
4240 if (expdif < 53) {
4241 dblq = fpsrcop / fptemp;
4242 /* round dblq towards nearest integer */
4243 dblq = rint(dblq);
4244 st0 = fpsrcop - fptemp * dblq;
4246 /* convert dblq to q by truncating towards zero */
4247 if (dblq < 0.0)
4248 q = (signed long long int)(-dblq);
4249 else
4250 q = (signed long long int)dblq;
4252 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4253 /* (C0,C3,C1) <-- (q2,q1,q0) */
4254 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4255 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4256 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4257 } else {
4258 env->fpus |= 0x400; /* C2 <-- 1 */
4259 fptemp = pow(2.0, expdif - 50);
4260 fpsrcop = (st0 / st1) / fptemp;
4261 /* fpsrcop = integer obtained by chopping */
4262 fpsrcop = (fpsrcop < 0.0) ?
4263 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4264 st0 -= (st1 * fpsrcop * fptemp);
4266 ST0 = double_to_floatx80(st0);
4269 void helper_fprem(void)
4271 double st0, st1, dblq, fpsrcop, fptemp;
4272 CPU_LDoubleU fpsrcop1, fptemp1;
4273 int expdif;
4274 signed long long int q;
4276 st0 = floatx80_to_double(ST0);
4277 st1 = floatx80_to_double(ST1);
4279 if (isinf(st0) || isnan(st0) || isnan(st1) || (st1 == 0.0)) {
4280 ST0 = double_to_floatx80(0.0 / 0.0); /* NaN */
4281 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4282 return;
4285 fpsrcop = st0;
4286 fptemp = st1;
4287 fpsrcop1.d = ST0;
4288 fptemp1.d = ST1;
4289 expdif = EXPD(fpsrcop1) - EXPD(fptemp1);
4291 if (expdif < 0) {
4292 /* optimisation? taken from the AMD docs */
4293 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4294 /* ST0 is unchanged */
4295 return;
4298 if ( expdif < 53 ) {
4299 dblq = fpsrcop/*ST0*/ / fptemp/*ST1*/;
4300 /* round dblq towards zero */
4301 dblq = (dblq < 0.0) ? ceil(dblq) : floor(dblq);
4302 st0 = fpsrcop/*ST0*/ - fptemp * dblq;
4304 /* convert dblq to q by truncating towards zero */
4305 if (dblq < 0.0)
4306 q = (signed long long int)(-dblq);
4307 else
4308 q = (signed long long int)dblq;
4310 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4311 /* (C0,C3,C1) <-- (q2,q1,q0) */
4312 env->fpus |= (q & 0x4) << (8 - 2); /* (C0) <-- q2 */
4313 env->fpus |= (q & 0x2) << (14 - 1); /* (C3) <-- q1 */
4314 env->fpus |= (q & 0x1) << (9 - 0); /* (C1) <-- q0 */
4315 } else {
4316 int N = 32 + (expdif % 32); /* as per AMD docs */
4317 env->fpus |= 0x400; /* C2 <-- 1 */
4318 fptemp = pow(2.0, (double)(expdif - N));
4319 fpsrcop = (st0 / st1) / fptemp;
4320 /* fpsrcop = integer obtained by chopping */
4321 fpsrcop = (fpsrcop < 0.0) ?
4322 -(floor(fabs(fpsrcop))) : floor(fpsrcop);
4323 st0 -= (st1 * fpsrcop * fptemp);
4325 ST0 = double_to_floatx80(st0);
4328 void helper_fyl2xp1(void)
4330 double fptemp = floatx80_to_double(ST0);
4332 if ((fptemp+1.0)>0.0) {
4333 fptemp = log(fptemp+1.0) / log(2.0); /* log2(ST+1.0) */
4334 fptemp *= floatx80_to_double(ST1);
4335 ST1 = double_to_floatx80(fptemp);
4336 fpop();
4337 } else {
4338 env->fpus &= (~0x4700);
4339 env->fpus |= 0x400;
4343 void helper_fsqrt(void)
4345 if (floatx80_is_neg(ST0)) {
4346 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4347 env->fpus |= 0x400;
4349 ST0 = floatx80_sqrt(ST0, &env->fp_status);
4352 void helper_fsincos(void)
4354 double fptemp = floatx80_to_double(ST0);
4356 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4357 env->fpus |= 0x400;
4358 } else {
4359 ST0 = double_to_floatx80(sin(fptemp));
4360 fpush();
4361 ST0 = double_to_floatx80(cos(fptemp));
4362 env->fpus &= (~0x400); /* C2 <-- 0 */
4363 /* the above code is for |arg| < 2**63 only */
4367 void helper_frndint(void)
4369 ST0 = floatx80_round_to_int(ST0, &env->fp_status);
4372 void helper_fscale(void)
4374 if (floatx80_is_any_nan(ST1)) {
4375 ST0 = ST1;
4376 } else {
4377 int n = floatx80_to_int32_round_to_zero(ST1, &env->fp_status);
4378 ST0 = floatx80_scalbn(ST0, n, &env->fp_status);
4382 void helper_fsin(void)
4384 double fptemp = floatx80_to_double(ST0);
4386 if ((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4387 env->fpus |= 0x400;
4388 } else {
4389 ST0 = double_to_floatx80(sin(fptemp));
4390 env->fpus &= (~0x400); /* C2 <-- 0 */
4391 /* the above code is for |arg| < 2**53 only */
4395 void helper_fcos(void)
4397 double fptemp = floatx80_to_double(ST0);
4399 if((fptemp > MAXTAN)||(fptemp < -MAXTAN)) {
4400 env->fpus |= 0x400;
4401 } else {
4402 ST0 = double_to_floatx80(cos(fptemp));
4403 env->fpus &= (~0x400); /* C2 <-- 0 */
4404 /* the above code is for |arg5 < 2**63 only */
4408 void helper_fxam_ST0(void)
4410 CPU_LDoubleU temp;
4411 int expdif;
4413 temp.d = ST0;
4415 env->fpus &= (~0x4700); /* (C3,C2,C1,C0) <-- 0000 */
4416 if (SIGND(temp))
4417 env->fpus |= 0x200; /* C1 <-- 1 */
4419 /* XXX: test fptags too */
4420 expdif = EXPD(temp);
4421 if (expdif == MAXEXPD) {
4422 if (MANTD(temp) == 0x8000000000000000ULL)
4423 env->fpus |= 0x500 /*Infinity*/;
4424 else
4425 env->fpus |= 0x100 /*NaN*/;
4426 } else if (expdif == 0) {
4427 if (MANTD(temp) == 0)
4428 env->fpus |= 0x4000 /*Zero*/;
4429 else
4430 env->fpus |= 0x4400 /*Denormal*/;
4431 } else {
4432 env->fpus |= 0x400;
4436 void helper_fstenv(target_ulong ptr, int data32)
4438 int fpus, fptag, exp, i;
4439 uint64_t mant;
4440 CPU_LDoubleU tmp;
4442 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4443 fptag = 0;
4444 for (i=7; i>=0; i--) {
4445 fptag <<= 2;
4446 if (env->fptags[i]) {
4447 fptag |= 3;
4448 } else {
4449 tmp.d = env->fpregs[i].d;
4450 exp = EXPD(tmp);
4451 mant = MANTD(tmp);
4452 if (exp == 0 && mant == 0) {
4453 /* zero */
4454 fptag |= 1;
4455 } else if (exp == 0 || exp == MAXEXPD
4456 || (mant & (1LL << 63)) == 0
4458 /* NaNs, infinity, denormal */
4459 fptag |= 2;
4463 if (data32) {
4464 /* 32 bit */
4465 stl(ptr, env->fpuc);
4466 stl(ptr + 4, fpus);
4467 stl(ptr + 8, fptag);
4468 stl(ptr + 12, 0); /* fpip */
4469 stl(ptr + 16, 0); /* fpcs */
4470 stl(ptr + 20, 0); /* fpoo */
4471 stl(ptr + 24, 0); /* fpos */
4472 } else {
4473 /* 16 bit */
4474 stw(ptr, env->fpuc);
4475 stw(ptr + 2, fpus);
4476 stw(ptr + 4, fptag);
4477 stw(ptr + 6, 0);
4478 stw(ptr + 8, 0);
4479 stw(ptr + 10, 0);
4480 stw(ptr + 12, 0);
4484 void helper_fldenv(target_ulong ptr, int data32)
4486 int i, fpus, fptag;
4488 if (data32) {
4489 env->fpuc = lduw(ptr);
4490 fpus = lduw(ptr + 4);
4491 fptag = lduw(ptr + 8);
4493 else {
4494 env->fpuc = lduw(ptr);
4495 fpus = lduw(ptr + 2);
4496 fptag = lduw(ptr + 4);
4498 env->fpstt = (fpus >> 11) & 7;
4499 env->fpus = fpus & ~0x3800;
4500 for(i = 0;i < 8; i++) {
4501 env->fptags[i] = ((fptag & 3) == 3);
4502 fptag >>= 2;
4506 void helper_fsave(target_ulong ptr, int data32)
4508 floatx80 tmp;
4509 int i;
4511 helper_fstenv(ptr, data32);
4513 ptr += (14 << data32);
4514 for(i = 0;i < 8; i++) {
4515 tmp = ST(i);
4516 helper_fstt(tmp, ptr);
4517 ptr += 10;
4520 /* fninit */
4521 env->fpus = 0;
4522 env->fpstt = 0;
4523 env->fpuc = 0x37f;
4524 env->fptags[0] = 1;
4525 env->fptags[1] = 1;
4526 env->fptags[2] = 1;
4527 env->fptags[3] = 1;
4528 env->fptags[4] = 1;
4529 env->fptags[5] = 1;
4530 env->fptags[6] = 1;
4531 env->fptags[7] = 1;
4534 void helper_frstor(target_ulong ptr, int data32)
4536 floatx80 tmp;
4537 int i;
4539 helper_fldenv(ptr, data32);
4540 ptr += (14 << data32);
4542 for(i = 0;i < 8; i++) {
4543 tmp = helper_fldt(ptr);
4544 ST(i) = tmp;
4545 ptr += 10;
4550 #if defined(CONFIG_USER_ONLY)
4551 void cpu_x86_load_seg(CPUX86State *s, int seg_reg, int selector)
4553 CPUX86State *saved_env;
4555 saved_env = env;
4556 env = s;
4557 if (!(env->cr[0] & CR0_PE_MASK) || (env->eflags & VM_MASK)) {
4558 selector &= 0xffff;
4559 cpu_x86_load_seg_cache(env, seg_reg, selector,
4560 (selector << 4), 0xffff, 0);
4561 } else {
4562 helper_load_seg(seg_reg, selector);
4564 env = saved_env;
4567 void cpu_x86_fsave(CPUX86State *s, target_ulong ptr, int data32)
4569 CPUX86State *saved_env;
4571 saved_env = env;
4572 env = s;
4574 helper_fsave(ptr, data32);
4576 env = saved_env;
4579 void cpu_x86_frstor(CPUX86State *s, target_ulong ptr, int data32)
4581 CPUX86State *saved_env;
4583 saved_env = env;
4584 env = s;
4586 helper_frstor(ptr, data32);
4588 env = saved_env;
4590 #endif
4592 void helper_fxsave(target_ulong ptr, int data64)
4594 int fpus, fptag, i, nb_xmm_regs;
4595 floatx80 tmp;
4596 target_ulong addr;
4598 /* The operand must be 16 byte aligned */
4599 if (ptr & 0xf) {
4600 raise_exception(EXCP0D_GPF);
4603 fpus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11;
4604 fptag = 0;
4605 for(i = 0; i < 8; i++) {
4606 fptag |= (env->fptags[i] << i);
4608 stw(ptr, env->fpuc);
4609 stw(ptr + 2, fpus);
4610 stw(ptr + 4, fptag ^ 0xff);
4611 #ifdef TARGET_X86_64
4612 if (data64) {
4613 stq(ptr + 0x08, 0); /* rip */
4614 stq(ptr + 0x10, 0); /* rdp */
4615 } else
4616 #endif
4618 stl(ptr + 0x08, 0); /* eip */
4619 stl(ptr + 0x0c, 0); /* sel */
4620 stl(ptr + 0x10, 0); /* dp */
4621 stl(ptr + 0x14, 0); /* sel */
4624 addr = ptr + 0x20;
4625 for(i = 0;i < 8; i++) {
4626 tmp = ST(i);
4627 helper_fstt(tmp, addr);
4628 addr += 16;
4631 if (env->cr[4] & CR4_OSFXSR_MASK) {
4632 /* XXX: finish it */
4633 stl(ptr + 0x18, env->mxcsr); /* mxcsr */
4634 stl(ptr + 0x1c, 0x0000ffff); /* mxcsr_mask */
4635 if (env->hflags & HF_CS64_MASK)
4636 nb_xmm_regs = 16;
4637 else
4638 nb_xmm_regs = 8;
4639 addr = ptr + 0xa0;
4640 /* Fast FXSAVE leaves out the XMM registers */
4641 if (!(env->efer & MSR_EFER_FFXSR)
4642 || (env->hflags & HF_CPL_MASK)
4643 || !(env->hflags & HF_LMA_MASK)) {
4644 for(i = 0; i < nb_xmm_regs; i++) {
4645 stq(addr, env->xmm_regs[i].XMM_Q(0));
4646 stq(addr + 8, env->xmm_regs[i].XMM_Q(1));
4647 addr += 16;
4653 void helper_fxrstor(target_ulong ptr, int data64)
4655 int i, fpus, fptag, nb_xmm_regs;
4656 floatx80 tmp;
4657 target_ulong addr;
4659 /* The operand must be 16 byte aligned */
4660 if (ptr & 0xf) {
4661 raise_exception(EXCP0D_GPF);
4664 env->fpuc = lduw(ptr);
4665 fpus = lduw(ptr + 2);
4666 fptag = lduw(ptr + 4);
4667 env->fpstt = (fpus >> 11) & 7;
4668 env->fpus = fpus & ~0x3800;
4669 fptag ^= 0xff;
4670 for(i = 0;i < 8; i++) {
4671 env->fptags[i] = ((fptag >> i) & 1);
4674 addr = ptr + 0x20;
4675 for(i = 0;i < 8; i++) {
4676 tmp = helper_fldt(addr);
4677 ST(i) = tmp;
4678 addr += 16;
4681 if (env->cr[4] & CR4_OSFXSR_MASK) {
4682 /* XXX: finish it */
4683 env->mxcsr = ldl(ptr + 0x18);
4684 //ldl(ptr + 0x1c);
4685 if (env->hflags & HF_CS64_MASK)
4686 nb_xmm_regs = 16;
4687 else
4688 nb_xmm_regs = 8;
4689 addr = ptr + 0xa0;
4690 /* Fast FXRESTORE leaves out the XMM registers */
4691 if (!(env->efer & MSR_EFER_FFXSR)
4692 || (env->hflags & HF_CPL_MASK)
4693 || !(env->hflags & HF_LMA_MASK)) {
4694 for(i = 0; i < nb_xmm_regs; i++) {
4695 env->xmm_regs[i].XMM_Q(0) = ldq(addr);
4696 env->xmm_regs[i].XMM_Q(1) = ldq(addr + 8);
4697 addr += 16;
4703 void cpu_get_fp80(uint64_t *pmant, uint16_t *pexp, floatx80 f)
4705 CPU_LDoubleU temp;
4707 temp.d = f;
4708 *pmant = temp.l.lower;
4709 *pexp = temp.l.upper;
4712 floatx80 cpu_set_fp80(uint64_t mant, uint16_t upper)
4714 CPU_LDoubleU temp;
4716 temp.l.upper = upper;
4717 temp.l.lower = mant;
4718 return temp.d;
4721 #ifdef TARGET_X86_64
4723 //#define DEBUG_MULDIV
4725 static void add128(uint64_t *plow, uint64_t *phigh, uint64_t a, uint64_t b)
4727 *plow += a;
4728 /* carry test */
4729 if (*plow < a)
4730 (*phigh)++;
4731 *phigh += b;
4734 static void neg128(uint64_t *plow, uint64_t *phigh)
4736 *plow = ~ *plow;
4737 *phigh = ~ *phigh;
4738 add128(plow, phigh, 1, 0);
4741 /* return TRUE if overflow */
4742 static int div64(uint64_t *plow, uint64_t *phigh, uint64_t b)
4744 uint64_t q, r, a1, a0;
4745 int i, qb, ab;
4747 a0 = *plow;
4748 a1 = *phigh;
4749 if (a1 == 0) {
4750 q = a0 / b;
4751 r = a0 % b;
4752 *plow = q;
4753 *phigh = r;
4754 } else {
4755 if (a1 >= b)
4756 return 1;
4757 /* XXX: use a better algorithm */
4758 for(i = 0; i < 64; i++) {
4759 ab = a1 >> 63;
4760 a1 = (a1 << 1) | (a0 >> 63);
4761 if (ab || a1 >= b) {
4762 a1 -= b;
4763 qb = 1;
4764 } else {
4765 qb = 0;
4767 a0 = (a0 << 1) | qb;
4769 #if defined(DEBUG_MULDIV)
4770 printf("div: 0x%016" PRIx64 "%016" PRIx64 " / 0x%016" PRIx64 ": q=0x%016" PRIx64 " r=0x%016" PRIx64 "\n",
4771 *phigh, *plow, b, a0, a1);
4772 #endif
4773 *plow = a0;
4774 *phigh = a1;
4776 return 0;
4779 /* return TRUE if overflow */
4780 static int idiv64(uint64_t *plow, uint64_t *phigh, int64_t b)
4782 int sa, sb;
4783 sa = ((int64_t)*phigh < 0);
4784 if (sa)
4785 neg128(plow, phigh);
4786 sb = (b < 0);
4787 if (sb)
4788 b = -b;
4789 if (div64(plow, phigh, b) != 0)
4790 return 1;
4791 if (sa ^ sb) {
4792 if (*plow > (1ULL << 63))
4793 return 1;
4794 *plow = - *plow;
4795 } else {
4796 if (*plow >= (1ULL << 63))
4797 return 1;
4799 if (sa)
4800 *phigh = - *phigh;
4801 return 0;
4804 void helper_mulq_EAX_T0(target_ulong t0)
4806 uint64_t r0, r1;
4808 mulu64(&r0, &r1, EAX, t0);
4809 EAX = r0;
4810 EDX = r1;
4811 CC_DST = r0;
4812 CC_SRC = r1;
4815 void helper_imulq_EAX_T0(target_ulong t0)
4817 uint64_t r0, r1;
4819 muls64(&r0, &r1, EAX, t0);
4820 EAX = r0;
4821 EDX = r1;
4822 CC_DST = r0;
4823 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4826 target_ulong helper_imulq_T0_T1(target_ulong t0, target_ulong t1)
4828 uint64_t r0, r1;
4830 muls64(&r0, &r1, t0, t1);
4831 CC_DST = r0;
4832 CC_SRC = ((int64_t)r1 != ((int64_t)r0 >> 63));
4833 return r0;
4836 void helper_divq_EAX(target_ulong t0)
4838 uint64_t r0, r1;
4839 if (t0 == 0) {
4840 raise_exception(EXCP00_DIVZ);
4842 r0 = EAX;
4843 r1 = EDX;
4844 if (div64(&r0, &r1, t0))
4845 raise_exception(EXCP00_DIVZ);
4846 EAX = r0;
4847 EDX = r1;
4850 void helper_idivq_EAX(target_ulong t0)
4852 uint64_t r0, r1;
4853 if (t0 == 0) {
4854 raise_exception(EXCP00_DIVZ);
4856 r0 = EAX;
4857 r1 = EDX;
4858 if (idiv64(&r0, &r1, t0))
4859 raise_exception(EXCP00_DIVZ);
4860 EAX = r0;
4861 EDX = r1;
4863 #endif
4865 static void do_hlt(void)
4867 env->hflags &= ~HF_INHIBIT_IRQ_MASK; /* needed if sti is just before */
4868 env->halted = 1;
4869 env->exception_index = EXCP_HLT;
4870 cpu_loop_exit(env);
4873 void helper_hlt(int next_eip_addend)
4875 helper_svm_check_intercept_param(SVM_EXIT_HLT, 0);
4876 EIP += next_eip_addend;
4878 do_hlt();
4881 void helper_monitor(target_ulong ptr)
4883 if ((uint32_t)ECX != 0)
4884 raise_exception(EXCP0D_GPF);
4885 /* XXX: store address ? */
4886 helper_svm_check_intercept_param(SVM_EXIT_MONITOR, 0);
4889 void helper_mwait(int next_eip_addend)
4891 if ((uint32_t)ECX != 0)
4892 raise_exception(EXCP0D_GPF);
4893 helper_svm_check_intercept_param(SVM_EXIT_MWAIT, 0);
4894 EIP += next_eip_addend;
4896 /* XXX: not complete but not completely erroneous */
4897 if (env->cpu_index != 0 || env->next_cpu != NULL) {
4898 /* more than one CPU: do not sleep because another CPU may
4899 wake this one */
4900 } else {
4901 do_hlt();
4905 void helper_debug(void)
4907 env->exception_index = EXCP_DEBUG;
4908 cpu_loop_exit(env);
4911 void helper_reset_rf(void)
4913 env->eflags &= ~RF_MASK;
4916 void helper_raise_interrupt(int intno, int next_eip_addend)
4918 raise_interrupt(intno, 1, 0, next_eip_addend);
4921 void helper_raise_exception(int exception_index)
4923 raise_exception(exception_index);
4926 void helper_cli(void)
4928 env->eflags &= ~IF_MASK;
4931 void helper_sti(void)
4933 env->eflags |= IF_MASK;
4936 #if 0
4937 /* vm86plus instructions */
4938 void helper_cli_vm(void)
4940 env->eflags &= ~VIF_MASK;
4943 void helper_sti_vm(void)
4945 env->eflags |= VIF_MASK;
4946 if (env->eflags & VIP_MASK) {
4947 raise_exception(EXCP0D_GPF);
4950 #endif
4952 void helper_set_inhibit_irq(void)
4954 env->hflags |= HF_INHIBIT_IRQ_MASK;
4957 void helper_reset_inhibit_irq(void)
4959 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
4962 void helper_boundw(target_ulong a0, int v)
4964 int low, high;
4965 low = ldsw(a0);
4966 high = ldsw(a0 + 2);
4967 v = (int16_t)v;
4968 if (v < low || v > high) {
4969 raise_exception(EXCP05_BOUND);
4973 void helper_boundl(target_ulong a0, int v)
4975 int low, high;
4976 low = ldl(a0);
4977 high = ldl(a0 + 4);
4978 if (v < low || v > high) {
4979 raise_exception(EXCP05_BOUND);
4983 #if !defined(CONFIG_USER_ONLY)
4985 #define MMUSUFFIX _mmu
4987 #define SHIFT 0
4988 #include "softmmu_template.h"
4990 #define SHIFT 1
4991 #include "softmmu_template.h"
4993 #define SHIFT 2
4994 #include "softmmu_template.h"
4996 #define SHIFT 3
4997 #include "softmmu_template.h"
4999 #endif
5001 #if !defined(CONFIG_USER_ONLY)
5002 /* try to fill the TLB and return an exception if error. If retaddr is
5003 NULL, it means that the function was called in C code (i.e. not
5004 from generated code or from helper.c) */
5005 /* XXX: fix it to restore all registers */
5006 void tlb_fill(CPUX86State *env1, target_ulong addr, int is_write, int mmu_idx,
5007 uintptr_t retaddr)
5009 TranslationBlock *tb;
5010 int ret;
5011 CPUX86State *saved_env;
5013 saved_env = env;
5014 env = env1;
5016 ret = cpu_x86_handle_mmu_fault(env, addr, is_write, mmu_idx);
5017 if (ret) {
5018 if (retaddr) {
5019 /* now we have a real cpu fault */
5020 tb = tb_find_pc(retaddr);
5021 if (tb) {
5022 /* the PC is inside the translated code. It means that we have
5023 a virtual CPU fault */
5024 cpu_restore_state(tb, env, retaddr);
5027 raise_exception_err(env->exception_index, env->error_code);
5029 env = saved_env;
5031 #endif
5033 /* Secure Virtual Machine helpers */
5035 #if defined(CONFIG_USER_ONLY)
5037 void helper_vmrun(int aflag, int next_eip_addend)
5040 void helper_vmmcall(void)
5043 void helper_vmload(int aflag)
5046 void helper_vmsave(int aflag)
5049 void helper_stgi(void)
5052 void helper_clgi(void)
5055 void helper_skinit(void)
5058 void helper_invlpga(int aflag)
5061 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5064 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5068 void svm_check_intercept(CPUX86State *env1, uint32_t type)
5072 void helper_svm_check_io(uint32_t port, uint32_t param,
5073 uint32_t next_eip_addend)
5076 #else
5078 static inline void svm_save_seg(target_phys_addr_t addr,
5079 const SegmentCache *sc)
5081 stw_phys(addr + offsetof(struct vmcb_seg, selector),
5082 sc->selector);
5083 stq_phys(addr + offsetof(struct vmcb_seg, base),
5084 sc->base);
5085 stl_phys(addr + offsetof(struct vmcb_seg, limit),
5086 sc->limit);
5087 stw_phys(addr + offsetof(struct vmcb_seg, attrib),
5088 ((sc->flags >> 8) & 0xff) | ((sc->flags >> 12) & 0x0f00));
5091 static inline void svm_load_seg(target_phys_addr_t addr, SegmentCache *sc)
5093 unsigned int flags;
5095 sc->selector = lduw_phys(addr + offsetof(struct vmcb_seg, selector));
5096 sc->base = ldq_phys(addr + offsetof(struct vmcb_seg, base));
5097 sc->limit = ldl_phys(addr + offsetof(struct vmcb_seg, limit));
5098 flags = lduw_phys(addr + offsetof(struct vmcb_seg, attrib));
5099 sc->flags = ((flags & 0xff) << 8) | ((flags & 0x0f00) << 12);
5102 static inline void svm_load_seg_cache(target_phys_addr_t addr,
5103 CPUX86State *env, int seg_reg)
5105 SegmentCache sc1, *sc = &sc1;
5106 svm_load_seg(addr, sc);
5107 cpu_x86_load_seg_cache(env, seg_reg, sc->selector,
5108 sc->base, sc->limit, sc->flags);
5111 void helper_vmrun(int aflag, int next_eip_addend)
5113 target_ulong addr;
5114 uint32_t event_inj;
5115 uint32_t int_ctl;
5117 helper_svm_check_intercept_param(SVM_EXIT_VMRUN, 0);
5119 if (aflag == 2)
5120 addr = EAX;
5121 else
5122 addr = (uint32_t)EAX;
5124 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmrun! " TARGET_FMT_lx "\n", addr);
5126 env->vm_vmcb = addr;
5128 /* save the current CPU state in the hsave page */
5129 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5130 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5132 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5133 stl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5135 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0), env->cr[0]);
5136 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr2), env->cr[2]);
5137 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3), env->cr[3]);
5138 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4), env->cr[4]);
5139 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6), env->dr[6]);
5140 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7), env->dr[7]);
5142 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer), env->efer);
5143 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags), compute_eflags());
5145 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.es),
5146 &env->segs[R_ES]);
5147 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.cs),
5148 &env->segs[R_CS]);
5149 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ss),
5150 &env->segs[R_SS]);
5151 svm_save_seg(env->vm_hsave + offsetof(struct vmcb, save.ds),
5152 &env->segs[R_DS]);
5154 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip),
5155 EIP + next_eip_addend);
5156 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp), ESP);
5157 stq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax), EAX);
5159 /* load the interception bitmaps so we do not need to access the
5160 vmcb in svm mode */
5161 env->intercept = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept));
5162 env->intercept_cr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_read));
5163 env->intercept_cr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_cr_write));
5164 env->intercept_dr_read = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_read));
5165 env->intercept_dr_write = lduw_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_dr_write));
5166 env->intercept_exceptions = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.intercept_exceptions));
5168 /* enable intercepts */
5169 env->hflags |= HF_SVMI_MASK;
5171 env->tsc_offset = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.tsc_offset));
5173 env->gdt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base));
5174 env->gdt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit));
5176 env->idt.base = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base));
5177 env->idt.limit = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit));
5179 /* clear exit_info_2 so we behave like the real hardware */
5180 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2), 0);
5182 cpu_x86_update_cr0(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0)));
5183 cpu_x86_update_cr4(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4)));
5184 cpu_x86_update_cr3(env, ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3)));
5185 env->cr[2] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2));
5186 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5187 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5188 if (int_ctl & V_INTR_MASKING_MASK) {
5189 env->v_tpr = int_ctl & V_TPR_MASK;
5190 env->hflags2 |= HF2_VINTR_MASK;
5191 if (env->eflags & IF_MASK)
5192 env->hflags2 |= HF2_HIF_MASK;
5195 cpu_load_efer(env,
5196 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer)));
5197 env->eflags = 0;
5198 load_eflags(ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags)),
5199 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5200 CC_OP = CC_OP_EFLAGS;
5202 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.es),
5203 env, R_ES);
5204 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5205 env, R_CS);
5206 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5207 env, R_SS);
5208 svm_load_seg_cache(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5209 env, R_DS);
5211 EIP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip));
5212 env->eip = EIP;
5213 ESP = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp));
5214 EAX = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax));
5215 env->dr[7] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7));
5216 env->dr[6] = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6));
5217 cpu_x86_set_cpl(env, ldub_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl)));
5219 /* FIXME: guest state consistency checks */
5221 switch(ldub_phys(env->vm_vmcb + offsetof(struct vmcb, control.tlb_ctl))) {
5222 case TLB_CONTROL_DO_NOTHING:
5223 break;
5224 case TLB_CONTROL_FLUSH_ALL_ASID:
5225 /* FIXME: this is not 100% correct but should work for now */
5226 tlb_flush(env, 1);
5227 break;
5230 env->hflags2 |= HF2_GIF_MASK;
5232 if (int_ctl & V_IRQ_MASK) {
5233 env->interrupt_request |= CPU_INTERRUPT_VIRQ;
5236 /* maybe we need to inject an event */
5237 event_inj = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj));
5238 if (event_inj & SVM_EVTINJ_VALID) {
5239 uint8_t vector = event_inj & SVM_EVTINJ_VEC_MASK;
5240 uint16_t valid_err = event_inj & SVM_EVTINJ_VALID_ERR;
5241 uint32_t event_inj_err = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err));
5243 qemu_log_mask(CPU_LOG_TB_IN_ASM, "Injecting(%#hx): ", valid_err);
5244 /* FIXME: need to implement valid_err */
5245 switch (event_inj & SVM_EVTINJ_TYPE_MASK) {
5246 case SVM_EVTINJ_TYPE_INTR:
5247 env->exception_index = vector;
5248 env->error_code = event_inj_err;
5249 env->exception_is_int = 0;
5250 env->exception_next_eip = -1;
5251 qemu_log_mask(CPU_LOG_TB_IN_ASM, "INTR");
5252 /* XXX: is it always correct ? */
5253 do_interrupt_all(vector, 0, 0, 0, 1);
5254 break;
5255 case SVM_EVTINJ_TYPE_NMI:
5256 env->exception_index = EXCP02_NMI;
5257 env->error_code = event_inj_err;
5258 env->exception_is_int = 0;
5259 env->exception_next_eip = EIP;
5260 qemu_log_mask(CPU_LOG_TB_IN_ASM, "NMI");
5261 cpu_loop_exit(env);
5262 break;
5263 case SVM_EVTINJ_TYPE_EXEPT:
5264 env->exception_index = vector;
5265 env->error_code = event_inj_err;
5266 env->exception_is_int = 0;
5267 env->exception_next_eip = -1;
5268 qemu_log_mask(CPU_LOG_TB_IN_ASM, "EXEPT");
5269 cpu_loop_exit(env);
5270 break;
5271 case SVM_EVTINJ_TYPE_SOFT:
5272 env->exception_index = vector;
5273 env->error_code = event_inj_err;
5274 env->exception_is_int = 1;
5275 env->exception_next_eip = EIP;
5276 qemu_log_mask(CPU_LOG_TB_IN_ASM, "SOFT");
5277 cpu_loop_exit(env);
5278 break;
5280 qemu_log_mask(CPU_LOG_TB_IN_ASM, " %#x %#x\n", env->exception_index, env->error_code);
5284 void helper_vmmcall(void)
5286 helper_svm_check_intercept_param(SVM_EXIT_VMMCALL, 0);
5287 raise_exception(EXCP06_ILLOP);
5290 void helper_vmload(int aflag)
5292 target_ulong addr;
5293 helper_svm_check_intercept_param(SVM_EXIT_VMLOAD, 0);
5295 if (aflag == 2)
5296 addr = EAX;
5297 else
5298 addr = (uint32_t)EAX;
5300 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmload! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5301 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5302 env->segs[R_FS].base);
5304 svm_load_seg_cache(addr + offsetof(struct vmcb, save.fs),
5305 env, R_FS);
5306 svm_load_seg_cache(addr + offsetof(struct vmcb, save.gs),
5307 env, R_GS);
5308 svm_load_seg(addr + offsetof(struct vmcb, save.tr),
5309 &env->tr);
5310 svm_load_seg(addr + offsetof(struct vmcb, save.ldtr),
5311 &env->ldt);
5313 #ifdef TARGET_X86_64
5314 env->kernelgsbase = ldq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base));
5315 env->lstar = ldq_phys(addr + offsetof(struct vmcb, save.lstar));
5316 env->cstar = ldq_phys(addr + offsetof(struct vmcb, save.cstar));
5317 env->fmask = ldq_phys(addr + offsetof(struct vmcb, save.sfmask));
5318 #endif
5319 env->star = ldq_phys(addr + offsetof(struct vmcb, save.star));
5320 env->sysenter_cs = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_cs));
5321 env->sysenter_esp = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_esp));
5322 env->sysenter_eip = ldq_phys(addr + offsetof(struct vmcb, save.sysenter_eip));
5325 void helper_vmsave(int aflag)
5327 target_ulong addr;
5328 helper_svm_check_intercept_param(SVM_EXIT_VMSAVE, 0);
5330 if (aflag == 2)
5331 addr = EAX;
5332 else
5333 addr = (uint32_t)EAX;
5335 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmsave! " TARGET_FMT_lx "\nFS: %016" PRIx64 " | " TARGET_FMT_lx "\n",
5336 addr, ldq_phys(addr + offsetof(struct vmcb, save.fs.base)),
5337 env->segs[R_FS].base);
5339 svm_save_seg(addr + offsetof(struct vmcb, save.fs),
5340 &env->segs[R_FS]);
5341 svm_save_seg(addr + offsetof(struct vmcb, save.gs),
5342 &env->segs[R_GS]);
5343 svm_save_seg(addr + offsetof(struct vmcb, save.tr),
5344 &env->tr);
5345 svm_save_seg(addr + offsetof(struct vmcb, save.ldtr),
5346 &env->ldt);
5348 #ifdef TARGET_X86_64
5349 stq_phys(addr + offsetof(struct vmcb, save.kernel_gs_base), env->kernelgsbase);
5350 stq_phys(addr + offsetof(struct vmcb, save.lstar), env->lstar);
5351 stq_phys(addr + offsetof(struct vmcb, save.cstar), env->cstar);
5352 stq_phys(addr + offsetof(struct vmcb, save.sfmask), env->fmask);
5353 #endif
5354 stq_phys(addr + offsetof(struct vmcb, save.star), env->star);
5355 stq_phys(addr + offsetof(struct vmcb, save.sysenter_cs), env->sysenter_cs);
5356 stq_phys(addr + offsetof(struct vmcb, save.sysenter_esp), env->sysenter_esp);
5357 stq_phys(addr + offsetof(struct vmcb, save.sysenter_eip), env->sysenter_eip);
5360 void helper_stgi(void)
5362 helper_svm_check_intercept_param(SVM_EXIT_STGI, 0);
5363 env->hflags2 |= HF2_GIF_MASK;
5366 void helper_clgi(void)
5368 helper_svm_check_intercept_param(SVM_EXIT_CLGI, 0);
5369 env->hflags2 &= ~HF2_GIF_MASK;
5372 void helper_skinit(void)
5374 helper_svm_check_intercept_param(SVM_EXIT_SKINIT, 0);
5375 /* XXX: not implemented */
5376 raise_exception(EXCP06_ILLOP);
5379 void helper_invlpga(int aflag)
5381 target_ulong addr;
5382 helper_svm_check_intercept_param(SVM_EXIT_INVLPGA, 0);
5384 if (aflag == 2)
5385 addr = EAX;
5386 else
5387 addr = (uint32_t)EAX;
5389 /* XXX: could use the ASID to see if it is needed to do the
5390 flush */
5391 tlb_flush_page(env, addr);
5394 void helper_svm_check_intercept_param(uint32_t type, uint64_t param)
5396 if (likely(!(env->hflags & HF_SVMI_MASK)))
5397 return;
5398 switch(type) {
5399 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR0 + 8:
5400 if (env->intercept_cr_read & (1 << (type - SVM_EXIT_READ_CR0))) {
5401 helper_vmexit(type, param);
5403 break;
5404 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR0 + 8:
5405 if (env->intercept_cr_write & (1 << (type - SVM_EXIT_WRITE_CR0))) {
5406 helper_vmexit(type, param);
5408 break;
5409 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR0 + 7:
5410 if (env->intercept_dr_read & (1 << (type - SVM_EXIT_READ_DR0))) {
5411 helper_vmexit(type, param);
5413 break;
5414 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR0 + 7:
5415 if (env->intercept_dr_write & (1 << (type - SVM_EXIT_WRITE_DR0))) {
5416 helper_vmexit(type, param);
5418 break;
5419 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 31:
5420 if (env->intercept_exceptions & (1 << (type - SVM_EXIT_EXCP_BASE))) {
5421 helper_vmexit(type, param);
5423 break;
5424 case SVM_EXIT_MSR:
5425 if (env->intercept & (1ULL << (SVM_EXIT_MSR - SVM_EXIT_INTR))) {
5426 /* FIXME: this should be read in at vmrun (faster this way?) */
5427 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.msrpm_base_pa));
5428 uint32_t t0, t1;
5429 switch((uint32_t)ECX) {
5430 case 0 ... 0x1fff:
5431 t0 = (ECX * 2) % 8;
5432 t1 = (ECX * 2) / 8;
5433 break;
5434 case 0xc0000000 ... 0xc0001fff:
5435 t0 = (8192 + ECX - 0xc0000000) * 2;
5436 t1 = (t0 / 8);
5437 t0 %= 8;
5438 break;
5439 case 0xc0010000 ... 0xc0011fff:
5440 t0 = (16384 + ECX - 0xc0010000) * 2;
5441 t1 = (t0 / 8);
5442 t0 %= 8;
5443 break;
5444 default:
5445 helper_vmexit(type, param);
5446 t0 = 0;
5447 t1 = 0;
5448 break;
5450 if (ldub_phys(addr + t1) & ((1 << param) << t0))
5451 helper_vmexit(type, param);
5453 break;
5454 default:
5455 if (env->intercept & (1ULL << (type - SVM_EXIT_INTR))) {
5456 helper_vmexit(type, param);
5458 break;
5462 void svm_check_intercept(CPUX86State *env1, uint32_t type)
5464 CPUX86State *saved_env;
5466 saved_env = env;
5467 env = env1;
5468 helper_svm_check_intercept_param(type, 0);
5469 env = saved_env;
5472 void helper_svm_check_io(uint32_t port, uint32_t param,
5473 uint32_t next_eip_addend)
5475 if (env->intercept & (1ULL << (SVM_EXIT_IOIO - SVM_EXIT_INTR))) {
5476 /* FIXME: this should be read in at vmrun (faster this way?) */
5477 uint64_t addr = ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.iopm_base_pa));
5478 uint16_t mask = (1 << ((param >> 4) & 7)) - 1;
5479 if(lduw_phys(addr + port / 8) & (mask << (port & 7))) {
5480 /* next EIP */
5481 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2),
5482 env->eip + next_eip_addend);
5483 helper_vmexit(SVM_EXIT_IOIO, param | (port << 16));
5488 /* Note: currently only 32 bits of exit_code are used */
5489 void helper_vmexit(uint32_t exit_code, uint64_t exit_info_1)
5491 uint32_t int_ctl;
5493 qemu_log_mask(CPU_LOG_TB_IN_ASM, "vmexit(%08x, %016" PRIx64 ", %016" PRIx64 ", " TARGET_FMT_lx ")!\n",
5494 exit_code, exit_info_1,
5495 ldq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_2)),
5496 EIP);
5498 if(env->hflags & HF_INHIBIT_IRQ_MASK) {
5499 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), SVM_INTERRUPT_SHADOW_MASK);
5500 env->hflags &= ~HF_INHIBIT_IRQ_MASK;
5501 } else {
5502 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_state), 0);
5505 /* Save the VM state in the vmcb */
5506 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.es),
5507 &env->segs[R_ES]);
5508 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.cs),
5509 &env->segs[R_CS]);
5510 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ss),
5511 &env->segs[R_SS]);
5512 svm_save_seg(env->vm_vmcb + offsetof(struct vmcb, save.ds),
5513 &env->segs[R_DS]);
5515 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.base), env->gdt.base);
5516 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.gdtr.limit), env->gdt.limit);
5518 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.base), env->idt.base);
5519 stl_phys(env->vm_vmcb + offsetof(struct vmcb, save.idtr.limit), env->idt.limit);
5521 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.efer), env->efer);
5522 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr0), env->cr[0]);
5523 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr2), env->cr[2]);
5524 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr3), env->cr[3]);
5525 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.cr4), env->cr[4]);
5527 int_ctl = ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl));
5528 int_ctl &= ~(V_TPR_MASK | V_IRQ_MASK);
5529 int_ctl |= env->v_tpr & V_TPR_MASK;
5530 if (env->interrupt_request & CPU_INTERRUPT_VIRQ)
5531 int_ctl |= V_IRQ_MASK;
5532 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.int_ctl), int_ctl);
5534 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rflags), compute_eflags());
5535 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rip), env->eip);
5536 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rsp), ESP);
5537 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.rax), EAX);
5538 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr7), env->dr[7]);
5539 stq_phys(env->vm_vmcb + offsetof(struct vmcb, save.dr6), env->dr[6]);
5540 stb_phys(env->vm_vmcb + offsetof(struct vmcb, save.cpl), env->hflags & HF_CPL_MASK);
5542 /* Reload the host state from vm_hsave */
5543 env->hflags2 &= ~(HF2_HIF_MASK | HF2_VINTR_MASK);
5544 env->hflags &= ~HF_SVMI_MASK;
5545 env->intercept = 0;
5546 env->intercept_exceptions = 0;
5547 env->interrupt_request &= ~CPU_INTERRUPT_VIRQ;
5548 env->tsc_offset = 0;
5550 env->gdt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.base));
5551 env->gdt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.gdtr.limit));
5553 env->idt.base = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.base));
5554 env->idt.limit = ldl_phys(env->vm_hsave + offsetof(struct vmcb, save.idtr.limit));
5556 cpu_x86_update_cr0(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr0)) | CR0_PE_MASK);
5557 cpu_x86_update_cr4(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr4)));
5558 cpu_x86_update_cr3(env, ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.cr3)));
5559 /* we need to set the efer after the crs so the hidden flags get
5560 set properly */
5561 cpu_load_efer(env,
5562 ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.efer)));
5563 env->eflags = 0;
5564 load_eflags(ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rflags)),
5565 ~(CC_O | CC_S | CC_Z | CC_A | CC_P | CC_C | DF_MASK));
5566 CC_OP = CC_OP_EFLAGS;
5568 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.es),
5569 env, R_ES);
5570 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.cs),
5571 env, R_CS);
5572 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ss),
5573 env, R_SS);
5574 svm_load_seg_cache(env->vm_hsave + offsetof(struct vmcb, save.ds),
5575 env, R_DS);
5577 EIP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rip));
5578 ESP = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rsp));
5579 EAX = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.rax));
5581 env->dr[6] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr6));
5582 env->dr[7] = ldq_phys(env->vm_hsave + offsetof(struct vmcb, save.dr7));
5584 /* other setups */
5585 cpu_x86_set_cpl(env, 0);
5586 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_code), exit_code);
5587 stq_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_info_1), exit_info_1);
5589 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info),
5590 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj)));
5591 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.exit_int_info_err),
5592 ldl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj_err)));
5593 stl_phys(env->vm_vmcb + offsetof(struct vmcb, control.event_inj), 0);
5595 env->hflags2 &= ~HF2_GIF_MASK;
5596 /* FIXME: Resets the current ASID register to zero (host ASID). */
5598 /* Clears the V_IRQ and V_INTR_MASKING bits inside the processor. */
5600 /* Clears the TSC_OFFSET inside the processor. */
5602 /* If the host is in PAE mode, the processor reloads the host's PDPEs
5603 from the page table indicated the host's CR3. If the PDPEs contain
5604 illegal state, the processor causes a shutdown. */
5606 /* Forces CR0.PE = 1, RFLAGS.VM = 0. */
5607 env->cr[0] |= CR0_PE_MASK;
5608 env->eflags &= ~VM_MASK;
5610 /* Disables all breakpoints in the host DR7 register. */
5612 /* Checks the reloaded host state for consistency. */
5614 /* If the host's rIP reloaded by #VMEXIT is outside the limit of the
5615 host's code segment or non-canonical (in the case of long mode), a
5616 #GP fault is delivered inside the host.) */
5618 /* remove any pending exception */
5619 env->exception_index = -1;
5620 env->error_code = 0;
5621 env->old_exception = -1;
5623 cpu_loop_exit(env);
5626 #endif
5628 /* MMX/SSE */
5629 /* XXX: optimize by storing fptt and fptags in the static cpu state */
5631 #define SSE_DAZ 0x0040
5632 #define SSE_RC_MASK 0x6000
5633 #define SSE_RC_NEAR 0x0000
5634 #define SSE_RC_DOWN 0x2000
5635 #define SSE_RC_UP 0x4000
5636 #define SSE_RC_CHOP 0x6000
5637 #define SSE_FZ 0x8000
5639 static void update_sse_status(void)
5641 int rnd_type;
5643 /* set rounding mode */
5644 switch(env->mxcsr & SSE_RC_MASK) {
5645 default:
5646 case SSE_RC_NEAR:
5647 rnd_type = float_round_nearest_even;
5648 break;
5649 case SSE_RC_DOWN:
5650 rnd_type = float_round_down;
5651 break;
5652 case SSE_RC_UP:
5653 rnd_type = float_round_up;
5654 break;
5655 case SSE_RC_CHOP:
5656 rnd_type = float_round_to_zero;
5657 break;
5659 set_float_rounding_mode(rnd_type, &env->sse_status);
5661 /* set denormals are zero */
5662 set_flush_inputs_to_zero((env->mxcsr & SSE_DAZ) ? 1 : 0, &env->sse_status);
5664 /* set flush to zero */
5665 set_flush_to_zero((env->mxcsr & SSE_FZ) ? 1 : 0, &env->fp_status);
5668 void helper_ldmxcsr(uint32_t val)
5670 env->mxcsr = val;
5671 update_sse_status();
5674 void helper_enter_mmx(void)
5676 env->fpstt = 0;
5677 *(uint32_t *)(env->fptags) = 0;
5678 *(uint32_t *)(env->fptags + 4) = 0;
5681 void helper_emms(void)
5683 /* set to empty state */
5684 *(uint32_t *)(env->fptags) = 0x01010101;
5685 *(uint32_t *)(env->fptags + 4) = 0x01010101;
5688 /* XXX: suppress */
5689 void helper_movq(void *d, void *s)
5691 *(uint64_t *)d = *(uint64_t *)s;
5694 #define SHIFT 0
5695 #include "ops_sse.h"
5697 #define SHIFT 1
5698 #include "ops_sse.h"
5700 #define SHIFT 0
5701 #include "helper_template.h"
5702 #undef SHIFT
5704 #define SHIFT 1
5705 #include "helper_template.h"
5706 #undef SHIFT
5708 #define SHIFT 2
5709 #include "helper_template.h"
5710 #undef SHIFT
5712 #ifdef TARGET_X86_64
5714 #define SHIFT 3
5715 #include "helper_template.h"
5716 #undef SHIFT
5718 #endif
5720 /* bit operations */
5721 target_ulong helper_bsf(target_ulong t0)
5723 int count;
5724 target_ulong res;
5726 res = t0;
5727 count = 0;
5728 while ((res & 1) == 0) {
5729 count++;
5730 res >>= 1;
5732 return count;
5735 target_ulong helper_lzcnt(target_ulong t0, int wordsize)
5737 int count;
5738 target_ulong res, mask;
5740 if (wordsize > 0 && t0 == 0) {
5741 return wordsize;
5743 res = t0;
5744 count = TARGET_LONG_BITS - 1;
5745 mask = (target_ulong)1 << (TARGET_LONG_BITS - 1);
5746 while ((res & mask) == 0) {
5747 count--;
5748 res <<= 1;
5750 if (wordsize > 0) {
5751 return wordsize - 1 - count;
5753 return count;
5756 target_ulong helper_bsr(target_ulong t0)
5758 return helper_lzcnt(t0, 0);
5761 static int compute_all_eflags(void)
5763 return CC_SRC;
5766 static int compute_c_eflags(void)
5768 return CC_SRC & CC_C;
5771 uint32_t helper_cc_compute_all(int op)
5773 switch (op) {
5774 default: /* should never happen */ return 0;
5776 case CC_OP_EFLAGS: return compute_all_eflags();
5778 case CC_OP_MULB: return compute_all_mulb();
5779 case CC_OP_MULW: return compute_all_mulw();
5780 case CC_OP_MULL: return compute_all_mull();
5782 case CC_OP_ADDB: return compute_all_addb();
5783 case CC_OP_ADDW: return compute_all_addw();
5784 case CC_OP_ADDL: return compute_all_addl();
5786 case CC_OP_ADCB: return compute_all_adcb();
5787 case CC_OP_ADCW: return compute_all_adcw();
5788 case CC_OP_ADCL: return compute_all_adcl();
5790 case CC_OP_SUBB: return compute_all_subb();
5791 case CC_OP_SUBW: return compute_all_subw();
5792 case CC_OP_SUBL: return compute_all_subl();
5794 case CC_OP_SBBB: return compute_all_sbbb();
5795 case CC_OP_SBBW: return compute_all_sbbw();
5796 case CC_OP_SBBL: return compute_all_sbbl();
5798 case CC_OP_LOGICB: return compute_all_logicb();
5799 case CC_OP_LOGICW: return compute_all_logicw();
5800 case CC_OP_LOGICL: return compute_all_logicl();
5802 case CC_OP_INCB: return compute_all_incb();
5803 case CC_OP_INCW: return compute_all_incw();
5804 case CC_OP_INCL: return compute_all_incl();
5806 case CC_OP_DECB: return compute_all_decb();
5807 case CC_OP_DECW: return compute_all_decw();
5808 case CC_OP_DECL: return compute_all_decl();
5810 case CC_OP_SHLB: return compute_all_shlb();
5811 case CC_OP_SHLW: return compute_all_shlw();
5812 case CC_OP_SHLL: return compute_all_shll();
5814 case CC_OP_SARB: return compute_all_sarb();
5815 case CC_OP_SARW: return compute_all_sarw();
5816 case CC_OP_SARL: return compute_all_sarl();
5818 #ifdef TARGET_X86_64
5819 case CC_OP_MULQ: return compute_all_mulq();
5821 case CC_OP_ADDQ: return compute_all_addq();
5823 case CC_OP_ADCQ: return compute_all_adcq();
5825 case CC_OP_SUBQ: return compute_all_subq();
5827 case CC_OP_SBBQ: return compute_all_sbbq();
5829 case CC_OP_LOGICQ: return compute_all_logicq();
5831 case CC_OP_INCQ: return compute_all_incq();
5833 case CC_OP_DECQ: return compute_all_decq();
5835 case CC_OP_SHLQ: return compute_all_shlq();
5837 case CC_OP_SARQ: return compute_all_sarq();
5838 #endif
5842 uint32_t cpu_cc_compute_all(CPUX86State *env1, int op)
5844 CPUX86State *saved_env;
5845 uint32_t ret;
5847 saved_env = env;
5848 env = env1;
5849 ret = helper_cc_compute_all(op);
5850 env = saved_env;
5851 return ret;
5854 uint32_t helper_cc_compute_c(int op)
5856 switch (op) {
5857 default: /* should never happen */ return 0;
5859 case CC_OP_EFLAGS: return compute_c_eflags();
5861 case CC_OP_MULB: return compute_c_mull();
5862 case CC_OP_MULW: return compute_c_mull();
5863 case CC_OP_MULL: return compute_c_mull();
5865 case CC_OP_ADDB: return compute_c_addb();
5866 case CC_OP_ADDW: return compute_c_addw();
5867 case CC_OP_ADDL: return compute_c_addl();
5869 case CC_OP_ADCB: return compute_c_adcb();
5870 case CC_OP_ADCW: return compute_c_adcw();
5871 case CC_OP_ADCL: return compute_c_adcl();
5873 case CC_OP_SUBB: return compute_c_subb();
5874 case CC_OP_SUBW: return compute_c_subw();
5875 case CC_OP_SUBL: return compute_c_subl();
5877 case CC_OP_SBBB: return compute_c_sbbb();
5878 case CC_OP_SBBW: return compute_c_sbbw();
5879 case CC_OP_SBBL: return compute_c_sbbl();
5881 case CC_OP_LOGICB: return compute_c_logicb();
5882 case CC_OP_LOGICW: return compute_c_logicw();
5883 case CC_OP_LOGICL: return compute_c_logicl();
5885 case CC_OP_INCB: return compute_c_incl();
5886 case CC_OP_INCW: return compute_c_incl();
5887 case CC_OP_INCL: return compute_c_incl();
5889 case CC_OP_DECB: return compute_c_incl();
5890 case CC_OP_DECW: return compute_c_incl();
5891 case CC_OP_DECL: return compute_c_incl();
5893 case CC_OP_SHLB: return compute_c_shlb();
5894 case CC_OP_SHLW: return compute_c_shlw();
5895 case CC_OP_SHLL: return compute_c_shll();
5897 case CC_OP_SARB: return compute_c_sarl();
5898 case CC_OP_SARW: return compute_c_sarl();
5899 case CC_OP_SARL: return compute_c_sarl();
5901 #ifdef TARGET_X86_64
5902 case CC_OP_MULQ: return compute_c_mull();
5904 case CC_OP_ADDQ: return compute_c_addq();
5906 case CC_OP_ADCQ: return compute_c_adcq();
5908 case CC_OP_SUBQ: return compute_c_subq();
5910 case CC_OP_SBBQ: return compute_c_sbbq();
5912 case CC_OP_LOGICQ: return compute_c_logicq();
5914 case CC_OP_INCQ: return compute_c_incl();
5916 case CC_OP_DECQ: return compute_c_incl();
5918 case CC_OP_SHLQ: return compute_c_shlq();
5920 case CC_OP_SARQ: return compute_c_sarl();
5921 #endif