hppa: specify target_phys_bits in configure script
[qemu/hppa.git] / target-i386 / translate.c
blob472600937554d4e5d35a28f6ba7381d2e57d4139
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
27 #include "cpu.h"
28 #include "exec-all.h"
29 #include "disas.h"
30 #include "tcg-op.h"
32 #include "helper.h"
33 #define GEN_HELPER 1
34 #include "helper.h"
36 #define PREFIX_REPZ 0x01
37 #define PREFIX_REPNZ 0x02
38 #define PREFIX_LOCK 0x04
39 #define PREFIX_DATA 0x08
40 #define PREFIX_ADR 0x10
42 #ifdef TARGET_X86_64
43 #define X86_64_ONLY(x) x
44 #define X86_64_DEF(...) __VA_ARGS__
45 #define CODE64(s) ((s)->code64)
46 #define REX_X(s) ((s)->rex_x)
47 #define REX_B(s) ((s)->rex_b)
48 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
49 #if 1
50 #define BUGGY_64(x) NULL
51 #endif
52 #else
53 #define X86_64_ONLY(x) NULL
54 #define X86_64_DEF(...)
55 #define CODE64(s) 0
56 #define REX_X(s) 0
57 #define REX_B(s) 0
58 #endif
60 //#define MACRO_TEST 1
62 /* global register indexes */
63 static TCGv_ptr cpu_env;
64 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
65 static TCGv_i32 cpu_cc_op;
66 /* local temps */
67 static TCGv cpu_T[2], cpu_T3;
68 /* local register indexes (only used inside old micro ops) */
69 static TCGv cpu_tmp0, cpu_tmp4;
70 static TCGv_ptr cpu_ptr0, cpu_ptr1;
71 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
72 static TCGv_i64 cpu_tmp1_i64;
73 static TCGv cpu_tmp5, cpu_tmp6;
75 #include "gen-icount.h"
77 #ifdef TARGET_X86_64
78 static int x86_64_hregs;
79 #endif
81 typedef struct DisasContext {
82 /* current insn context */
83 int override; /* -1 if no override */
84 int prefix;
85 int aflag, dflag;
86 target_ulong pc; /* pc = eip + cs_base */
87 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
88 static state change (stop translation) */
89 /* current block context */
90 target_ulong cs_base; /* base of CS segment */
91 int pe; /* protected mode */
92 int code32; /* 32 bit code segment */
93 #ifdef TARGET_X86_64
94 int lma; /* long mode active */
95 int code64; /* 64 bit code segment */
96 int rex_x, rex_b;
97 #endif
98 int ss32; /* 32 bit stack segment */
99 int cc_op; /* current CC operation */
100 int addseg; /* non zero if either DS/ES/SS have a non zero base */
101 int f_st; /* currently unused */
102 int vm86; /* vm86 mode */
103 int cpl;
104 int iopl;
105 int tf; /* TF cpu flag */
106 int singlestep_enabled; /* "hardware" single step enabled */
107 int jmp_opt; /* use direct block chaining for direct jumps */
108 int mem_index; /* select memory access functions */
109 uint64_t flags; /* all execution flags */
110 struct TranslationBlock *tb;
111 int popl_esp_hack; /* for correct popl with esp base handling */
112 int rip_offset; /* only used in x86_64, but left for simplicity */
113 int cpuid_features;
114 int cpuid_ext_features;
115 int cpuid_ext2_features;
116 int cpuid_ext3_features;
117 } DisasContext;
119 static void gen_eob(DisasContext *s);
120 static void gen_jmp(DisasContext *s, target_ulong eip);
121 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
123 /* i386 arith/logic operations */
124 enum {
125 OP_ADDL,
126 OP_ORL,
127 OP_ADCL,
128 OP_SBBL,
129 OP_ANDL,
130 OP_SUBL,
131 OP_XORL,
132 OP_CMPL,
135 /* i386 shift ops */
136 enum {
137 OP_ROL,
138 OP_ROR,
139 OP_RCL,
140 OP_RCR,
141 OP_SHL,
142 OP_SHR,
143 OP_SHL1, /* undocumented */
144 OP_SAR = 7,
147 enum {
148 JCC_O,
149 JCC_B,
150 JCC_Z,
151 JCC_BE,
152 JCC_S,
153 JCC_P,
154 JCC_L,
155 JCC_LE,
158 /* operand size */
159 enum {
160 OT_BYTE = 0,
161 OT_WORD,
162 OT_LONG,
163 OT_QUAD,
166 enum {
167 /* I386 int registers */
168 OR_EAX, /* MUST be even numbered */
169 OR_ECX,
170 OR_EDX,
171 OR_EBX,
172 OR_ESP,
173 OR_EBP,
174 OR_ESI,
175 OR_EDI,
177 OR_TMP0 = 16, /* temporary operand register */
178 OR_TMP1,
179 OR_A0, /* temporary register used when doing address evaluation */
182 static inline void gen_op_movl_T0_0(void)
184 tcg_gen_movi_tl(cpu_T[0], 0);
187 static inline void gen_op_movl_T0_im(int32_t val)
189 tcg_gen_movi_tl(cpu_T[0], val);
192 static inline void gen_op_movl_T0_imu(uint32_t val)
194 tcg_gen_movi_tl(cpu_T[0], val);
197 static inline void gen_op_movl_T1_im(int32_t val)
199 tcg_gen_movi_tl(cpu_T[1], val);
202 static inline void gen_op_movl_T1_imu(uint32_t val)
204 tcg_gen_movi_tl(cpu_T[1], val);
207 static inline void gen_op_movl_A0_im(uint32_t val)
209 tcg_gen_movi_tl(cpu_A0, val);
212 #ifdef TARGET_X86_64
213 static inline void gen_op_movq_A0_im(int64_t val)
215 tcg_gen_movi_tl(cpu_A0, val);
217 #endif
219 static inline void gen_movtl_T0_im(target_ulong val)
221 tcg_gen_movi_tl(cpu_T[0], val);
224 static inline void gen_movtl_T1_im(target_ulong val)
226 tcg_gen_movi_tl(cpu_T[1], val);
229 static inline void gen_op_andl_T0_ffff(void)
231 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
234 static inline void gen_op_andl_T0_im(uint32_t val)
236 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
239 static inline void gen_op_movl_T0_T1(void)
241 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
244 static inline void gen_op_andl_A0_ffff(void)
246 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
249 #ifdef TARGET_X86_64
251 #define NB_OP_SIZES 4
253 #else /* !TARGET_X86_64 */
255 #define NB_OP_SIZES 3
257 #endif /* !TARGET_X86_64 */
259 #if defined(WORDS_BIGENDIAN)
260 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
261 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
262 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
263 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
264 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
265 #else
266 #define REG_B_OFFSET 0
267 #define REG_H_OFFSET 1
268 #define REG_W_OFFSET 0
269 #define REG_L_OFFSET 0
270 #define REG_LH_OFFSET 4
271 #endif
273 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
275 switch(ot) {
276 case OT_BYTE:
277 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
278 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
279 } else {
280 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
282 break;
283 case OT_WORD:
284 tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
285 break;
286 #ifdef TARGET_X86_64
287 case OT_LONG:
288 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
289 /* high part of register set to zero */
290 tcg_gen_movi_tl(cpu_tmp0, 0);
291 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
292 break;
293 default:
294 case OT_QUAD:
295 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
296 break;
297 #else
298 default:
299 case OT_LONG:
300 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
301 break;
302 #endif
306 static inline void gen_op_mov_reg_T0(int ot, int reg)
308 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
311 static inline void gen_op_mov_reg_T1(int ot, int reg)
313 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
316 static inline void gen_op_mov_reg_A0(int size, int reg)
318 switch(size) {
319 case 0:
320 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
321 break;
322 #ifdef TARGET_X86_64
323 case 1:
324 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
325 /* high part of register set to zero */
326 tcg_gen_movi_tl(cpu_tmp0, 0);
327 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
328 break;
329 default:
330 case 2:
331 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
332 break;
333 #else
334 default:
335 case 1:
336 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
337 break;
338 #endif
342 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
344 switch(ot) {
345 case OT_BYTE:
346 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
347 goto std_case;
348 } else {
349 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
351 break;
352 default:
353 std_case:
354 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
355 break;
359 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
361 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
364 static inline void gen_op_movl_A0_reg(int reg)
366 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
369 static inline void gen_op_addl_A0_im(int32_t val)
371 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
372 #ifdef TARGET_X86_64
373 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
374 #endif
377 #ifdef TARGET_X86_64
378 static inline void gen_op_addq_A0_im(int64_t val)
380 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
382 #endif
384 static void gen_add_A0_im(DisasContext *s, int val)
386 #ifdef TARGET_X86_64
387 if (CODE64(s))
388 gen_op_addq_A0_im(val);
389 else
390 #endif
391 gen_op_addl_A0_im(val);
394 static inline void gen_op_addl_T0_T1(void)
396 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
399 static inline void gen_op_jmp_T0(void)
401 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
404 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
406 switch(size) {
407 case 0:
408 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
409 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
410 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
411 break;
412 case 1:
413 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
414 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
415 #ifdef TARGET_X86_64
416 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
417 #endif
418 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
419 break;
420 #ifdef TARGET_X86_64
421 case 2:
422 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
423 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
424 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
425 break;
426 #endif
430 static inline void gen_op_add_reg_T0(int size, int reg)
432 switch(size) {
433 case 0:
434 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
435 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
436 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
437 break;
438 case 1:
439 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
440 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
441 #ifdef TARGET_X86_64
442 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
443 #endif
444 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
445 break;
446 #ifdef TARGET_X86_64
447 case 2:
448 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
449 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
450 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
451 break;
452 #endif
456 static inline void gen_op_set_cc_op(int32_t val)
458 tcg_gen_movi_i32(cpu_cc_op, val);
461 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
463 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
464 if (shift != 0)
465 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
466 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
467 #ifdef TARGET_X86_64
468 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
469 #endif
472 static inline void gen_op_movl_A0_seg(int reg)
474 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
477 static inline void gen_op_addl_A0_seg(int reg)
479 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
480 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
481 #ifdef TARGET_X86_64
482 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
483 #endif
486 #ifdef TARGET_X86_64
487 static inline void gen_op_movq_A0_seg(int reg)
489 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
492 static inline void gen_op_addq_A0_seg(int reg)
494 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
495 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
498 static inline void gen_op_movq_A0_reg(int reg)
500 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
503 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
505 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
506 if (shift != 0)
507 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
508 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
510 #endif
512 static inline void gen_op_lds_T0_A0(int idx)
514 int mem_index = (idx >> 2) - 1;
515 switch(idx & 3) {
516 case 0:
517 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
518 break;
519 case 1:
520 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
521 break;
522 default:
523 case 2:
524 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
525 break;
529 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
531 int mem_index = (idx >> 2) - 1;
532 switch(idx & 3) {
533 case 0:
534 tcg_gen_qemu_ld8u(t0, a0, mem_index);
535 break;
536 case 1:
537 tcg_gen_qemu_ld16u(t0, a0, mem_index);
538 break;
539 case 2:
540 tcg_gen_qemu_ld32u(t0, a0, mem_index);
541 break;
542 default:
543 case 3:
544 /* Should never happen on 32-bit targets. */
545 #ifdef TARGET_X86_64
546 tcg_gen_qemu_ld64(t0, a0, mem_index);
547 #endif
548 break;
552 /* XXX: always use ldu or lds */
553 static inline void gen_op_ld_T0_A0(int idx)
555 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
558 static inline void gen_op_ldu_T0_A0(int idx)
560 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
563 static inline void gen_op_ld_T1_A0(int idx)
565 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
568 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
570 int mem_index = (idx >> 2) - 1;
571 switch(idx & 3) {
572 case 0:
573 tcg_gen_qemu_st8(t0, a0, mem_index);
574 break;
575 case 1:
576 tcg_gen_qemu_st16(t0, a0, mem_index);
577 break;
578 case 2:
579 tcg_gen_qemu_st32(t0, a0, mem_index);
580 break;
581 default:
582 case 3:
583 /* Should never happen on 32-bit targets. */
584 #ifdef TARGET_X86_64
585 tcg_gen_qemu_st64(t0, a0, mem_index);
586 #endif
587 break;
591 static inline void gen_op_st_T0_A0(int idx)
593 gen_op_st_v(idx, cpu_T[0], cpu_A0);
596 static inline void gen_op_st_T1_A0(int idx)
598 gen_op_st_v(idx, cpu_T[1], cpu_A0);
601 static inline void gen_jmp_im(target_ulong pc)
603 tcg_gen_movi_tl(cpu_tmp0, pc);
604 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
607 static inline void gen_string_movl_A0_ESI(DisasContext *s)
609 int override;
611 override = s->override;
612 #ifdef TARGET_X86_64
613 if (s->aflag == 2) {
614 if (override >= 0) {
615 gen_op_movq_A0_seg(override);
616 gen_op_addq_A0_reg_sN(0, R_ESI);
617 } else {
618 gen_op_movq_A0_reg(R_ESI);
620 } else
621 #endif
622 if (s->aflag) {
623 /* 32 bit address */
624 if (s->addseg && override < 0)
625 override = R_DS;
626 if (override >= 0) {
627 gen_op_movl_A0_seg(override);
628 gen_op_addl_A0_reg_sN(0, R_ESI);
629 } else {
630 gen_op_movl_A0_reg(R_ESI);
632 } else {
633 /* 16 address, always override */
634 if (override < 0)
635 override = R_DS;
636 gen_op_movl_A0_reg(R_ESI);
637 gen_op_andl_A0_ffff();
638 gen_op_addl_A0_seg(override);
642 static inline void gen_string_movl_A0_EDI(DisasContext *s)
644 #ifdef TARGET_X86_64
645 if (s->aflag == 2) {
646 gen_op_movq_A0_reg(R_EDI);
647 } else
648 #endif
649 if (s->aflag) {
650 if (s->addseg) {
651 gen_op_movl_A0_seg(R_ES);
652 gen_op_addl_A0_reg_sN(0, R_EDI);
653 } else {
654 gen_op_movl_A0_reg(R_EDI);
656 } else {
657 gen_op_movl_A0_reg(R_EDI);
658 gen_op_andl_A0_ffff();
659 gen_op_addl_A0_seg(R_ES);
663 static inline void gen_op_movl_T0_Dshift(int ot)
665 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
666 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
669 static void gen_extu(int ot, TCGv reg)
671 switch(ot) {
672 case OT_BYTE:
673 tcg_gen_ext8u_tl(reg, reg);
674 break;
675 case OT_WORD:
676 tcg_gen_ext16u_tl(reg, reg);
677 break;
678 case OT_LONG:
679 tcg_gen_ext32u_tl(reg, reg);
680 break;
681 default:
682 break;
686 static void gen_exts(int ot, TCGv reg)
688 switch(ot) {
689 case OT_BYTE:
690 tcg_gen_ext8s_tl(reg, reg);
691 break;
692 case OT_WORD:
693 tcg_gen_ext16s_tl(reg, reg);
694 break;
695 case OT_LONG:
696 tcg_gen_ext32s_tl(reg, reg);
697 break;
698 default:
699 break;
703 static inline void gen_op_jnz_ecx(int size, int label1)
705 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
706 gen_extu(size + 1, cpu_tmp0);
707 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
710 static inline void gen_op_jz_ecx(int size, int label1)
712 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
713 gen_extu(size + 1, cpu_tmp0);
714 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
717 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
719 switch (ot) {
720 case 0: gen_helper_inb(v, n); break;
721 case 1: gen_helper_inw(v, n); break;
722 case 2: gen_helper_inl(v, n); break;
727 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
729 switch (ot) {
730 case 0: gen_helper_outb(v, n); break;
731 case 1: gen_helper_outw(v, n); break;
732 case 2: gen_helper_outl(v, n); break;
737 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
738 uint32_t svm_flags)
740 int state_saved;
741 target_ulong next_eip;
743 state_saved = 0;
744 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
745 if (s->cc_op != CC_OP_DYNAMIC)
746 gen_op_set_cc_op(s->cc_op);
747 gen_jmp_im(cur_eip);
748 state_saved = 1;
749 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
750 switch (ot) {
751 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
752 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
753 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
756 if(s->flags & HF_SVMI_MASK) {
757 if (!state_saved) {
758 if (s->cc_op != CC_OP_DYNAMIC)
759 gen_op_set_cc_op(s->cc_op);
760 gen_jmp_im(cur_eip);
761 state_saved = 1;
763 svm_flags |= (1 << (4 + ot));
764 next_eip = s->pc - s->cs_base;
765 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
766 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
767 tcg_const_i32(next_eip - cur_eip));
771 static inline void gen_movs(DisasContext *s, int ot)
773 gen_string_movl_A0_ESI(s);
774 gen_op_ld_T0_A0(ot + s->mem_index);
775 gen_string_movl_A0_EDI(s);
776 gen_op_st_T0_A0(ot + s->mem_index);
777 gen_op_movl_T0_Dshift(ot);
778 gen_op_add_reg_T0(s->aflag, R_ESI);
779 gen_op_add_reg_T0(s->aflag, R_EDI);
782 static inline void gen_update_cc_op(DisasContext *s)
784 if (s->cc_op != CC_OP_DYNAMIC) {
785 gen_op_set_cc_op(s->cc_op);
786 s->cc_op = CC_OP_DYNAMIC;
790 static void gen_op_update1_cc(void)
792 tcg_gen_discard_tl(cpu_cc_src);
793 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
796 static void gen_op_update2_cc(void)
798 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
799 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
802 static inline void gen_op_cmpl_T0_T1_cc(void)
804 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
805 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
808 static inline void gen_op_testl_T0_T1_cc(void)
810 tcg_gen_discard_tl(cpu_cc_src);
811 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
814 static void gen_op_update_neg_cc(void)
816 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
817 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
820 /* compute eflags.C to reg */
821 static void gen_compute_eflags_c(TCGv reg)
823 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
824 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
827 /* compute all eflags to cc_src */
828 static void gen_compute_eflags(TCGv reg)
830 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
831 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
834 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
836 if (s->cc_op != CC_OP_DYNAMIC)
837 gen_op_set_cc_op(s->cc_op);
838 switch(jcc_op) {
839 case JCC_O:
840 gen_compute_eflags(cpu_T[0]);
841 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
842 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
843 break;
844 case JCC_B:
845 gen_compute_eflags_c(cpu_T[0]);
846 break;
847 case JCC_Z:
848 gen_compute_eflags(cpu_T[0]);
849 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
850 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
851 break;
852 case JCC_BE:
853 gen_compute_eflags(cpu_tmp0);
854 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
855 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
856 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
857 break;
858 case JCC_S:
859 gen_compute_eflags(cpu_T[0]);
860 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
861 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
862 break;
863 case JCC_P:
864 gen_compute_eflags(cpu_T[0]);
865 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
866 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
867 break;
868 case JCC_L:
869 gen_compute_eflags(cpu_tmp0);
870 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
871 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
872 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
873 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
874 break;
875 default:
876 case JCC_LE:
877 gen_compute_eflags(cpu_tmp0);
878 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
879 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
880 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
881 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
882 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
883 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
884 break;
888 /* return true if setcc_slow is not needed (WARNING: must be kept in
889 sync with gen_jcc1) */
890 static int is_fast_jcc_case(DisasContext *s, int b)
892 int jcc_op;
893 jcc_op = (b >> 1) & 7;
894 switch(s->cc_op) {
895 /* we optimize the cmp/jcc case */
896 case CC_OP_SUBB:
897 case CC_OP_SUBW:
898 case CC_OP_SUBL:
899 case CC_OP_SUBQ:
900 if (jcc_op == JCC_O || jcc_op == JCC_P)
901 goto slow_jcc;
902 break;
904 /* some jumps are easy to compute */
905 case CC_OP_ADDB:
906 case CC_OP_ADDW:
907 case CC_OP_ADDL:
908 case CC_OP_ADDQ:
910 case CC_OP_LOGICB:
911 case CC_OP_LOGICW:
912 case CC_OP_LOGICL:
913 case CC_OP_LOGICQ:
915 case CC_OP_INCB:
916 case CC_OP_INCW:
917 case CC_OP_INCL:
918 case CC_OP_INCQ:
920 case CC_OP_DECB:
921 case CC_OP_DECW:
922 case CC_OP_DECL:
923 case CC_OP_DECQ:
925 case CC_OP_SHLB:
926 case CC_OP_SHLW:
927 case CC_OP_SHLL:
928 case CC_OP_SHLQ:
929 if (jcc_op != JCC_Z && jcc_op != JCC_S)
930 goto slow_jcc;
931 break;
932 default:
933 slow_jcc:
934 return 0;
936 return 1;
939 /* generate a conditional jump to label 'l1' according to jump opcode
940 value 'b'. In the fast case, T0 is guaranted not to be used. */
941 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
943 int inv, jcc_op, size, cond;
944 TCGv t0;
946 inv = b & 1;
947 jcc_op = (b >> 1) & 7;
949 switch(cc_op) {
950 /* we optimize the cmp/jcc case */
951 case CC_OP_SUBB:
952 case CC_OP_SUBW:
953 case CC_OP_SUBL:
954 case CC_OP_SUBQ:
956 size = cc_op - CC_OP_SUBB;
957 switch(jcc_op) {
958 case JCC_Z:
959 fast_jcc_z:
960 switch(size) {
961 case 0:
962 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
963 t0 = cpu_tmp0;
964 break;
965 case 1:
966 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
967 t0 = cpu_tmp0;
968 break;
969 #ifdef TARGET_X86_64
970 case 2:
971 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
972 t0 = cpu_tmp0;
973 break;
974 #endif
975 default:
976 t0 = cpu_cc_dst;
977 break;
979 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
980 break;
981 case JCC_S:
982 fast_jcc_s:
983 switch(size) {
984 case 0:
985 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
986 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
987 0, l1);
988 break;
989 case 1:
990 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
991 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
992 0, l1);
993 break;
994 #ifdef TARGET_X86_64
995 case 2:
996 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
997 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
998 0, l1);
999 break;
1000 #endif
1001 default:
1002 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1003 0, l1);
1004 break;
1006 break;
1008 case JCC_B:
1009 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1010 goto fast_jcc_b;
1011 case JCC_BE:
1012 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1013 fast_jcc_b:
1014 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1015 switch(size) {
1016 case 0:
1017 t0 = cpu_tmp0;
1018 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1019 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1020 break;
1021 case 1:
1022 t0 = cpu_tmp0;
1023 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1024 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1025 break;
1026 #ifdef TARGET_X86_64
1027 case 2:
1028 t0 = cpu_tmp0;
1029 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1030 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1031 break;
1032 #endif
1033 default:
1034 t0 = cpu_cc_src;
1035 break;
1037 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1038 break;
1040 case JCC_L:
1041 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1042 goto fast_jcc_l;
1043 case JCC_LE:
1044 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1045 fast_jcc_l:
1046 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1047 switch(size) {
1048 case 0:
1049 t0 = cpu_tmp0;
1050 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1051 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1052 break;
1053 case 1:
1054 t0 = cpu_tmp0;
1055 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1056 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1057 break;
1058 #ifdef TARGET_X86_64
1059 case 2:
1060 t0 = cpu_tmp0;
1061 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1062 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1063 break;
1064 #endif
1065 default:
1066 t0 = cpu_cc_src;
1067 break;
1069 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1070 break;
1072 default:
1073 goto slow_jcc;
1075 break;
1077 /* some jumps are easy to compute */
1078 case CC_OP_ADDB:
1079 case CC_OP_ADDW:
1080 case CC_OP_ADDL:
1081 case CC_OP_ADDQ:
1083 case CC_OP_ADCB:
1084 case CC_OP_ADCW:
1085 case CC_OP_ADCL:
1086 case CC_OP_ADCQ:
1088 case CC_OP_SBBB:
1089 case CC_OP_SBBW:
1090 case CC_OP_SBBL:
1091 case CC_OP_SBBQ:
1093 case CC_OP_LOGICB:
1094 case CC_OP_LOGICW:
1095 case CC_OP_LOGICL:
1096 case CC_OP_LOGICQ:
1098 case CC_OP_INCB:
1099 case CC_OP_INCW:
1100 case CC_OP_INCL:
1101 case CC_OP_INCQ:
1103 case CC_OP_DECB:
1104 case CC_OP_DECW:
1105 case CC_OP_DECL:
1106 case CC_OP_DECQ:
1108 case CC_OP_SHLB:
1109 case CC_OP_SHLW:
1110 case CC_OP_SHLL:
1111 case CC_OP_SHLQ:
1113 case CC_OP_SARB:
1114 case CC_OP_SARW:
1115 case CC_OP_SARL:
1116 case CC_OP_SARQ:
1117 switch(jcc_op) {
1118 case JCC_Z:
1119 size = (cc_op - CC_OP_ADDB) & 3;
1120 goto fast_jcc_z;
1121 case JCC_S:
1122 size = (cc_op - CC_OP_ADDB) & 3;
1123 goto fast_jcc_s;
1124 default:
1125 goto slow_jcc;
1127 break;
1128 default:
1129 slow_jcc:
1130 gen_setcc_slow_T0(s, jcc_op);
1131 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1132 cpu_T[0], 0, l1);
1133 break;
1137 /* XXX: does not work with gdbstub "ice" single step - not a
1138 serious problem */
1139 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1141 int l1, l2;
1143 l1 = gen_new_label();
1144 l2 = gen_new_label();
1145 gen_op_jnz_ecx(s->aflag, l1);
1146 gen_set_label(l2);
1147 gen_jmp_tb(s, next_eip, 1);
1148 gen_set_label(l1);
1149 return l2;
1152 static inline void gen_stos(DisasContext *s, int ot)
1154 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1155 gen_string_movl_A0_EDI(s);
1156 gen_op_st_T0_A0(ot + s->mem_index);
1157 gen_op_movl_T0_Dshift(ot);
1158 gen_op_add_reg_T0(s->aflag, R_EDI);
1161 static inline void gen_lods(DisasContext *s, int ot)
1163 gen_string_movl_A0_ESI(s);
1164 gen_op_ld_T0_A0(ot + s->mem_index);
1165 gen_op_mov_reg_T0(ot, R_EAX);
1166 gen_op_movl_T0_Dshift(ot);
1167 gen_op_add_reg_T0(s->aflag, R_ESI);
1170 static inline void gen_scas(DisasContext *s, int ot)
1172 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1173 gen_string_movl_A0_EDI(s);
1174 gen_op_ld_T1_A0(ot + s->mem_index);
1175 gen_op_cmpl_T0_T1_cc();
1176 gen_op_movl_T0_Dshift(ot);
1177 gen_op_add_reg_T0(s->aflag, R_EDI);
1180 static inline void gen_cmps(DisasContext *s, int ot)
1182 gen_string_movl_A0_ESI(s);
1183 gen_op_ld_T0_A0(ot + s->mem_index);
1184 gen_string_movl_A0_EDI(s);
1185 gen_op_ld_T1_A0(ot + s->mem_index);
1186 gen_op_cmpl_T0_T1_cc();
1187 gen_op_movl_T0_Dshift(ot);
1188 gen_op_add_reg_T0(s->aflag, R_ESI);
1189 gen_op_add_reg_T0(s->aflag, R_EDI);
1192 static inline void gen_ins(DisasContext *s, int ot)
1194 if (use_icount)
1195 gen_io_start();
1196 gen_string_movl_A0_EDI(s);
1197 /* Note: we must do this dummy write first to be restartable in
1198 case of page fault. */
1199 gen_op_movl_T0_0();
1200 gen_op_st_T0_A0(ot + s->mem_index);
1201 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1202 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1203 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1204 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1205 gen_op_st_T0_A0(ot + s->mem_index);
1206 gen_op_movl_T0_Dshift(ot);
1207 gen_op_add_reg_T0(s->aflag, R_EDI);
1208 if (use_icount)
1209 gen_io_end();
1212 static inline void gen_outs(DisasContext *s, int ot)
1214 if (use_icount)
1215 gen_io_start();
1216 gen_string_movl_A0_ESI(s);
1217 gen_op_ld_T0_A0(ot + s->mem_index);
1219 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1220 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1221 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1222 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1223 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1225 gen_op_movl_T0_Dshift(ot);
1226 gen_op_add_reg_T0(s->aflag, R_ESI);
1227 if (use_icount)
1228 gen_io_end();
1231 /* same method as Valgrind : we generate jumps to current or next
1232 instruction */
1233 #define GEN_REPZ(op) \
1234 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1235 target_ulong cur_eip, target_ulong next_eip) \
1237 int l2;\
1238 gen_update_cc_op(s); \
1239 l2 = gen_jz_ecx_string(s, next_eip); \
1240 gen_ ## op(s, ot); \
1241 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1242 /* a loop would cause two single step exceptions if ECX = 1 \
1243 before rep string_insn */ \
1244 if (!s->jmp_opt) \
1245 gen_op_jz_ecx(s->aflag, l2); \
1246 gen_jmp(s, cur_eip); \
1249 #define GEN_REPZ2(op) \
1250 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1251 target_ulong cur_eip, \
1252 target_ulong next_eip, \
1253 int nz) \
1255 int l2;\
1256 gen_update_cc_op(s); \
1257 l2 = gen_jz_ecx_string(s, next_eip); \
1258 gen_ ## op(s, ot); \
1259 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1260 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1261 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1262 if (!s->jmp_opt) \
1263 gen_op_jz_ecx(s->aflag, l2); \
1264 gen_jmp(s, cur_eip); \
1267 GEN_REPZ(movs)
1268 GEN_REPZ(stos)
1269 GEN_REPZ(lods)
1270 GEN_REPZ(ins)
1271 GEN_REPZ(outs)
1272 GEN_REPZ2(scas)
1273 GEN_REPZ2(cmps)
1275 static void gen_helper_fp_arith_ST0_FT0(int op)
1277 switch (op) {
1278 case 0: gen_helper_fadd_ST0_FT0(); break;
1279 case 1: gen_helper_fmul_ST0_FT0(); break;
1280 case 2: gen_helper_fcom_ST0_FT0(); break;
1281 case 3: gen_helper_fcom_ST0_FT0(); break;
1282 case 4: gen_helper_fsub_ST0_FT0(); break;
1283 case 5: gen_helper_fsubr_ST0_FT0(); break;
1284 case 6: gen_helper_fdiv_ST0_FT0(); break;
1285 case 7: gen_helper_fdivr_ST0_FT0(); break;
1289 /* NOTE the exception in "r" op ordering */
1290 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1292 TCGv_i32 tmp = tcg_const_i32(opreg);
1293 switch (op) {
1294 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1295 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1296 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1297 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1298 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1299 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1303 /* if d == OR_TMP0, it means memory operand (address in A0) */
1304 static void gen_op(DisasContext *s1, int op, int ot, int d)
1306 if (d != OR_TMP0) {
1307 gen_op_mov_TN_reg(ot, 0, d);
1308 } else {
1309 gen_op_ld_T0_A0(ot + s1->mem_index);
1311 switch(op) {
1312 case OP_ADCL:
1313 if (s1->cc_op != CC_OP_DYNAMIC)
1314 gen_op_set_cc_op(s1->cc_op);
1315 gen_compute_eflags_c(cpu_tmp4);
1316 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1317 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1318 if (d != OR_TMP0)
1319 gen_op_mov_reg_T0(ot, d);
1320 else
1321 gen_op_st_T0_A0(ot + s1->mem_index);
1322 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1323 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1324 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1325 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1326 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1327 s1->cc_op = CC_OP_DYNAMIC;
1328 break;
1329 case OP_SBBL:
1330 if (s1->cc_op != CC_OP_DYNAMIC)
1331 gen_op_set_cc_op(s1->cc_op);
1332 gen_compute_eflags_c(cpu_tmp4);
1333 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1334 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1335 if (d != OR_TMP0)
1336 gen_op_mov_reg_T0(ot, d);
1337 else
1338 gen_op_st_T0_A0(ot + s1->mem_index);
1339 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1340 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1341 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1342 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1343 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1344 s1->cc_op = CC_OP_DYNAMIC;
1345 break;
1346 case OP_ADDL:
1347 gen_op_addl_T0_T1();
1348 if (d != OR_TMP0)
1349 gen_op_mov_reg_T0(ot, d);
1350 else
1351 gen_op_st_T0_A0(ot + s1->mem_index);
1352 gen_op_update2_cc();
1353 s1->cc_op = CC_OP_ADDB + ot;
1354 break;
1355 case OP_SUBL:
1356 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1357 if (d != OR_TMP0)
1358 gen_op_mov_reg_T0(ot, d);
1359 else
1360 gen_op_st_T0_A0(ot + s1->mem_index);
1361 gen_op_update2_cc();
1362 s1->cc_op = CC_OP_SUBB + ot;
1363 break;
1364 default:
1365 case OP_ANDL:
1366 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1367 if (d != OR_TMP0)
1368 gen_op_mov_reg_T0(ot, d);
1369 else
1370 gen_op_st_T0_A0(ot + s1->mem_index);
1371 gen_op_update1_cc();
1372 s1->cc_op = CC_OP_LOGICB + ot;
1373 break;
1374 case OP_ORL:
1375 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1376 if (d != OR_TMP0)
1377 gen_op_mov_reg_T0(ot, d);
1378 else
1379 gen_op_st_T0_A0(ot + s1->mem_index);
1380 gen_op_update1_cc();
1381 s1->cc_op = CC_OP_LOGICB + ot;
1382 break;
1383 case OP_XORL:
1384 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1385 if (d != OR_TMP0)
1386 gen_op_mov_reg_T0(ot, d);
1387 else
1388 gen_op_st_T0_A0(ot + s1->mem_index);
1389 gen_op_update1_cc();
1390 s1->cc_op = CC_OP_LOGICB + ot;
1391 break;
1392 case OP_CMPL:
1393 gen_op_cmpl_T0_T1_cc();
1394 s1->cc_op = CC_OP_SUBB + ot;
1395 break;
1399 /* if d == OR_TMP0, it means memory operand (address in A0) */
1400 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1402 if (d != OR_TMP0)
1403 gen_op_mov_TN_reg(ot, 0, d);
1404 else
1405 gen_op_ld_T0_A0(ot + s1->mem_index);
1406 if (s1->cc_op != CC_OP_DYNAMIC)
1407 gen_op_set_cc_op(s1->cc_op);
1408 if (c > 0) {
1409 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1410 s1->cc_op = CC_OP_INCB + ot;
1411 } else {
1412 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1413 s1->cc_op = CC_OP_DECB + ot;
1415 if (d != OR_TMP0)
1416 gen_op_mov_reg_T0(ot, d);
1417 else
1418 gen_op_st_T0_A0(ot + s1->mem_index);
1419 gen_compute_eflags_c(cpu_cc_src);
1420 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1423 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1424 int is_right, int is_arith)
1426 target_ulong mask;
1427 int shift_label;
1428 TCGv t0, t1;
1430 if (ot == OT_QUAD)
1431 mask = 0x3f;
1432 else
1433 mask = 0x1f;
1435 /* load */
1436 if (op1 == OR_TMP0)
1437 gen_op_ld_T0_A0(ot + s->mem_index);
1438 else
1439 gen_op_mov_TN_reg(ot, 0, op1);
1441 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1443 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1445 if (is_right) {
1446 if (is_arith) {
1447 gen_exts(ot, cpu_T[0]);
1448 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1449 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1450 } else {
1451 gen_extu(ot, cpu_T[0]);
1452 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1453 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1455 } else {
1456 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1457 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1460 /* store */
1461 if (op1 == OR_TMP0)
1462 gen_op_st_T0_A0(ot + s->mem_index);
1463 else
1464 gen_op_mov_reg_T0(ot, op1);
1466 /* update eflags if non zero shift */
1467 if (s->cc_op != CC_OP_DYNAMIC)
1468 gen_op_set_cc_op(s->cc_op);
1470 /* XXX: inefficient */
1471 t0 = tcg_temp_local_new();
1472 t1 = tcg_temp_local_new();
1474 tcg_gen_mov_tl(t0, cpu_T[0]);
1475 tcg_gen_mov_tl(t1, cpu_T3);
1477 shift_label = gen_new_label();
1478 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1480 tcg_gen_mov_tl(cpu_cc_src, t1);
1481 tcg_gen_mov_tl(cpu_cc_dst, t0);
1482 if (is_right)
1483 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1484 else
1485 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1487 gen_set_label(shift_label);
1488 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1490 tcg_temp_free(t0);
1491 tcg_temp_free(t1);
1494 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1495 int is_right, int is_arith)
1497 int mask;
1499 if (ot == OT_QUAD)
1500 mask = 0x3f;
1501 else
1502 mask = 0x1f;
1504 /* load */
1505 if (op1 == OR_TMP0)
1506 gen_op_ld_T0_A0(ot + s->mem_index);
1507 else
1508 gen_op_mov_TN_reg(ot, 0, op1);
1510 op2 &= mask;
1511 if (op2 != 0) {
1512 if (is_right) {
1513 if (is_arith) {
1514 gen_exts(ot, cpu_T[0]);
1515 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1516 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1517 } else {
1518 gen_extu(ot, cpu_T[0]);
1519 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1520 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1522 } else {
1523 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1524 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1528 /* store */
1529 if (op1 == OR_TMP0)
1530 gen_op_st_T0_A0(ot + s->mem_index);
1531 else
1532 gen_op_mov_reg_T0(ot, op1);
1534 /* update eflags if non zero shift */
1535 if (op2 != 0) {
1536 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1537 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1538 if (is_right)
1539 s->cc_op = CC_OP_SARB + ot;
1540 else
1541 s->cc_op = CC_OP_SHLB + ot;
1545 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1547 if (arg2 >= 0)
1548 tcg_gen_shli_tl(ret, arg1, arg2);
1549 else
1550 tcg_gen_shri_tl(ret, arg1, -arg2);
1553 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1554 int is_right)
1556 target_ulong mask;
1557 int label1, label2, data_bits;
1558 TCGv t0, t1, t2, a0;
1560 /* XXX: inefficient, but we must use local temps */
1561 t0 = tcg_temp_local_new();
1562 t1 = tcg_temp_local_new();
1563 t2 = tcg_temp_local_new();
1564 a0 = tcg_temp_local_new();
1566 if (ot == OT_QUAD)
1567 mask = 0x3f;
1568 else
1569 mask = 0x1f;
1571 /* load */
1572 if (op1 == OR_TMP0) {
1573 tcg_gen_mov_tl(a0, cpu_A0);
1574 gen_op_ld_v(ot + s->mem_index, t0, a0);
1575 } else {
1576 gen_op_mov_v_reg(ot, t0, op1);
1579 tcg_gen_mov_tl(t1, cpu_T[1]);
1581 tcg_gen_andi_tl(t1, t1, mask);
1583 /* Must test zero case to avoid using undefined behaviour in TCG
1584 shifts. */
1585 label1 = gen_new_label();
1586 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1588 if (ot <= OT_WORD)
1589 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1590 else
1591 tcg_gen_mov_tl(cpu_tmp0, t1);
1593 gen_extu(ot, t0);
1594 tcg_gen_mov_tl(t2, t0);
1596 data_bits = 8 << ot;
1597 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1598 fix TCG definition) */
1599 if (is_right) {
1600 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1601 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1602 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1603 } else {
1604 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1605 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1606 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1608 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1610 gen_set_label(label1);
1611 /* store */
1612 if (op1 == OR_TMP0) {
1613 gen_op_st_v(ot + s->mem_index, t0, a0);
1614 } else {
1615 gen_op_mov_reg_v(ot, op1, t0);
1618 /* update eflags */
1619 if (s->cc_op != CC_OP_DYNAMIC)
1620 gen_op_set_cc_op(s->cc_op);
1622 label2 = gen_new_label();
1623 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1625 gen_compute_eflags(cpu_cc_src);
1626 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1627 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1628 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1629 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1630 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1631 if (is_right) {
1632 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1634 tcg_gen_andi_tl(t0, t0, CC_C);
1635 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1637 tcg_gen_discard_tl(cpu_cc_dst);
1638 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1640 gen_set_label(label2);
1641 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1643 tcg_temp_free(t0);
1644 tcg_temp_free(t1);
1645 tcg_temp_free(t2);
1646 tcg_temp_free(a0);
1649 static void gen_rot_rm_im(DisasContext *s, int ot, int op1, int op2,
1650 int is_right)
1652 int mask;
1653 int data_bits;
1654 TCGv t0, t1, a0;
1656 /* XXX: inefficient, but we must use local temps */
1657 t0 = tcg_temp_local_new();
1658 t1 = tcg_temp_local_new();
1659 a0 = tcg_temp_local_new();
1661 if (ot == OT_QUAD)
1662 mask = 0x3f;
1663 else
1664 mask = 0x1f;
1666 /* load */
1667 if (op1 == OR_TMP0) {
1668 tcg_gen_mov_tl(a0, cpu_A0);
1669 gen_op_ld_v(ot + s->mem_index, t0, a0);
1670 } else {
1671 gen_op_mov_v_reg(ot, t0, op1);
1674 gen_extu(ot, t0);
1675 tcg_gen_mov_tl(t1, t0);
1677 op2 &= mask;
1678 data_bits = 8 << ot;
1679 if (op2 != 0) {
1680 int shift = op2 & ((1 << (3 + ot)) - 1);
1681 if (is_right) {
1682 tcg_gen_shri_tl(cpu_tmp4, t0, shift);
1683 tcg_gen_shli_tl(t0, t0, data_bits - shift);
1685 else {
1686 tcg_gen_shli_tl(cpu_tmp4, t0, shift);
1687 tcg_gen_shri_tl(t0, t0, data_bits - shift);
1689 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1692 /* store */
1693 if (op1 == OR_TMP0) {
1694 gen_op_st_v(ot + s->mem_index, t0, a0);
1695 } else {
1696 gen_op_mov_reg_v(ot, op1, t0);
1699 if (op2 != 0) {
1700 /* update eflags */
1701 if (s->cc_op != CC_OP_DYNAMIC)
1702 gen_op_set_cc_op(s->cc_op);
1704 gen_compute_eflags(cpu_cc_src);
1705 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1706 tcg_gen_xor_tl(cpu_tmp0, t1, t0);
1707 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1708 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1709 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1710 if (is_right) {
1711 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1713 tcg_gen_andi_tl(t0, t0, CC_C);
1714 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1716 tcg_gen_discard_tl(cpu_cc_dst);
1717 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1718 s->cc_op = CC_OP_EFLAGS;
1721 tcg_temp_free(t0);
1722 tcg_temp_free(t1);
1723 tcg_temp_free(a0);
1726 /* XXX: add faster immediate = 1 case */
1727 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1728 int is_right)
1730 int label1;
1732 if (s->cc_op != CC_OP_DYNAMIC)
1733 gen_op_set_cc_op(s->cc_op);
1735 /* load */
1736 if (op1 == OR_TMP0)
1737 gen_op_ld_T0_A0(ot + s->mem_index);
1738 else
1739 gen_op_mov_TN_reg(ot, 0, op1);
1741 if (is_right) {
1742 switch (ot) {
1743 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1744 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1745 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1746 #ifdef TARGET_X86_64
1747 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1748 #endif
1750 } else {
1751 switch (ot) {
1752 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1753 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1754 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1755 #ifdef TARGET_X86_64
1756 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1757 #endif
1760 /* store */
1761 if (op1 == OR_TMP0)
1762 gen_op_st_T0_A0(ot + s->mem_index);
1763 else
1764 gen_op_mov_reg_T0(ot, op1);
1766 /* update eflags */
1767 label1 = gen_new_label();
1768 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1770 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1771 tcg_gen_discard_tl(cpu_cc_dst);
1772 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1774 gen_set_label(label1);
1775 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1778 /* XXX: add faster immediate case */
1779 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1780 int is_right)
1782 int label1, label2, data_bits;
1783 target_ulong mask;
1784 TCGv t0, t1, t2, a0;
1786 t0 = tcg_temp_local_new();
1787 t1 = tcg_temp_local_new();
1788 t2 = tcg_temp_local_new();
1789 a0 = tcg_temp_local_new();
1791 if (ot == OT_QUAD)
1792 mask = 0x3f;
1793 else
1794 mask = 0x1f;
1796 /* load */
1797 if (op1 == OR_TMP0) {
1798 tcg_gen_mov_tl(a0, cpu_A0);
1799 gen_op_ld_v(ot + s->mem_index, t0, a0);
1800 } else {
1801 gen_op_mov_v_reg(ot, t0, op1);
1804 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1806 tcg_gen_mov_tl(t1, cpu_T[1]);
1807 tcg_gen_mov_tl(t2, cpu_T3);
1809 /* Must test zero case to avoid using undefined behaviour in TCG
1810 shifts. */
1811 label1 = gen_new_label();
1812 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1814 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1815 if (ot == OT_WORD) {
1816 /* Note: we implement the Intel behaviour for shift count > 16 */
1817 if (is_right) {
1818 tcg_gen_andi_tl(t0, t0, 0xffff);
1819 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1820 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1821 tcg_gen_ext32u_tl(t0, t0);
1823 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1825 /* only needed if count > 16, but a test would complicate */
1826 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1827 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1829 tcg_gen_shr_tl(t0, t0, t2);
1831 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1832 } else {
1833 /* XXX: not optimal */
1834 tcg_gen_andi_tl(t0, t0, 0xffff);
1835 tcg_gen_shli_tl(t1, t1, 16);
1836 tcg_gen_or_tl(t1, t1, t0);
1837 tcg_gen_ext32u_tl(t1, t1);
1839 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1840 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
1841 tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
1842 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6);
1844 tcg_gen_shl_tl(t0, t0, t2);
1845 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1846 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1847 tcg_gen_or_tl(t0, t0, t1);
1849 } else {
1850 data_bits = 8 << ot;
1851 if (is_right) {
1852 if (ot == OT_LONG)
1853 tcg_gen_ext32u_tl(t0, t0);
1855 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1857 tcg_gen_shr_tl(t0, t0, t2);
1858 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1859 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1860 tcg_gen_or_tl(t0, t0, t1);
1862 } else {
1863 if (ot == OT_LONG)
1864 tcg_gen_ext32u_tl(t1, t1);
1866 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1868 tcg_gen_shl_tl(t0, t0, t2);
1869 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1870 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1871 tcg_gen_or_tl(t0, t0, t1);
1874 tcg_gen_mov_tl(t1, cpu_tmp4);
1876 gen_set_label(label1);
1877 /* store */
1878 if (op1 == OR_TMP0) {
1879 gen_op_st_v(ot + s->mem_index, t0, a0);
1880 } else {
1881 gen_op_mov_reg_v(ot, op1, t0);
1884 /* update eflags */
1885 if (s->cc_op != CC_OP_DYNAMIC)
1886 gen_op_set_cc_op(s->cc_op);
1888 label2 = gen_new_label();
1889 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1891 tcg_gen_mov_tl(cpu_cc_src, t1);
1892 tcg_gen_mov_tl(cpu_cc_dst, t0);
1893 if (is_right) {
1894 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1895 } else {
1896 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1898 gen_set_label(label2);
1899 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1901 tcg_temp_free(t0);
1902 tcg_temp_free(t1);
1903 tcg_temp_free(t2);
1904 tcg_temp_free(a0);
1907 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1909 if (s != OR_TMP1)
1910 gen_op_mov_TN_reg(ot, 1, s);
1911 switch(op) {
1912 case OP_ROL:
1913 gen_rot_rm_T1(s1, ot, d, 0);
1914 break;
1915 case OP_ROR:
1916 gen_rot_rm_T1(s1, ot, d, 1);
1917 break;
1918 case OP_SHL:
1919 case OP_SHL1:
1920 gen_shift_rm_T1(s1, ot, d, 0, 0);
1921 break;
1922 case OP_SHR:
1923 gen_shift_rm_T1(s1, ot, d, 1, 0);
1924 break;
1925 case OP_SAR:
1926 gen_shift_rm_T1(s1, ot, d, 1, 1);
1927 break;
1928 case OP_RCL:
1929 gen_rotc_rm_T1(s1, ot, d, 0);
1930 break;
1931 case OP_RCR:
1932 gen_rotc_rm_T1(s1, ot, d, 1);
1933 break;
1937 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1939 switch(op) {
1940 case OP_ROL:
1941 gen_rot_rm_im(s1, ot, d, c, 0);
1942 break;
1943 case OP_ROR:
1944 gen_rot_rm_im(s1, ot, d, c, 1);
1945 break;
1946 case OP_SHL:
1947 case OP_SHL1:
1948 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1949 break;
1950 case OP_SHR:
1951 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1952 break;
1953 case OP_SAR:
1954 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1955 break;
1956 default:
1957 /* currently not optimized */
1958 gen_op_movl_T1_im(c);
1959 gen_shift(s1, op, ot, d, OR_TMP1);
1960 break;
1964 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1966 target_long disp;
1967 int havesib;
1968 int base;
1969 int index;
1970 int scale;
1971 int opreg;
1972 int mod, rm, code, override, must_add_seg;
1974 override = s->override;
1975 must_add_seg = s->addseg;
1976 if (override >= 0)
1977 must_add_seg = 1;
1978 mod = (modrm >> 6) & 3;
1979 rm = modrm & 7;
1981 if (s->aflag) {
1983 havesib = 0;
1984 base = rm;
1985 index = 0;
1986 scale = 0;
1988 if (base == 4) {
1989 havesib = 1;
1990 code = ldub_code(s->pc++);
1991 scale = (code >> 6) & 3;
1992 index = ((code >> 3) & 7) | REX_X(s);
1993 base = (code & 7);
1995 base |= REX_B(s);
1997 switch (mod) {
1998 case 0:
1999 if ((base & 7) == 5) {
2000 base = -1;
2001 disp = (int32_t)ldl_code(s->pc);
2002 s->pc += 4;
2003 if (CODE64(s) && !havesib) {
2004 disp += s->pc + s->rip_offset;
2006 } else {
2007 disp = 0;
2009 break;
2010 case 1:
2011 disp = (int8_t)ldub_code(s->pc++);
2012 break;
2013 default:
2014 case 2:
2015 disp = ldl_code(s->pc);
2016 s->pc += 4;
2017 break;
2020 if (base >= 0) {
2021 /* for correct popl handling with esp */
2022 if (base == 4 && s->popl_esp_hack)
2023 disp += s->popl_esp_hack;
2024 #ifdef TARGET_X86_64
2025 if (s->aflag == 2) {
2026 gen_op_movq_A0_reg(base);
2027 if (disp != 0) {
2028 gen_op_addq_A0_im(disp);
2030 } else
2031 #endif
2033 gen_op_movl_A0_reg(base);
2034 if (disp != 0)
2035 gen_op_addl_A0_im(disp);
2037 } else {
2038 #ifdef TARGET_X86_64
2039 if (s->aflag == 2) {
2040 gen_op_movq_A0_im(disp);
2041 } else
2042 #endif
2044 gen_op_movl_A0_im(disp);
2047 /* XXX: index == 4 is always invalid */
2048 if (havesib && (index != 4 || scale != 0)) {
2049 #ifdef TARGET_X86_64
2050 if (s->aflag == 2) {
2051 gen_op_addq_A0_reg_sN(scale, index);
2052 } else
2053 #endif
2055 gen_op_addl_A0_reg_sN(scale, index);
2058 if (must_add_seg) {
2059 if (override < 0) {
2060 if (base == R_EBP || base == R_ESP)
2061 override = R_SS;
2062 else
2063 override = R_DS;
2065 #ifdef TARGET_X86_64
2066 if (s->aflag == 2) {
2067 gen_op_addq_A0_seg(override);
2068 } else
2069 #endif
2071 gen_op_addl_A0_seg(override);
2074 } else {
2075 switch (mod) {
2076 case 0:
2077 if (rm == 6) {
2078 disp = lduw_code(s->pc);
2079 s->pc += 2;
2080 gen_op_movl_A0_im(disp);
2081 rm = 0; /* avoid SS override */
2082 goto no_rm;
2083 } else {
2084 disp = 0;
2086 break;
2087 case 1:
2088 disp = (int8_t)ldub_code(s->pc++);
2089 break;
2090 default:
2091 case 2:
2092 disp = lduw_code(s->pc);
2093 s->pc += 2;
2094 break;
2096 switch(rm) {
2097 case 0:
2098 gen_op_movl_A0_reg(R_EBX);
2099 gen_op_addl_A0_reg_sN(0, R_ESI);
2100 break;
2101 case 1:
2102 gen_op_movl_A0_reg(R_EBX);
2103 gen_op_addl_A0_reg_sN(0, R_EDI);
2104 break;
2105 case 2:
2106 gen_op_movl_A0_reg(R_EBP);
2107 gen_op_addl_A0_reg_sN(0, R_ESI);
2108 break;
2109 case 3:
2110 gen_op_movl_A0_reg(R_EBP);
2111 gen_op_addl_A0_reg_sN(0, R_EDI);
2112 break;
2113 case 4:
2114 gen_op_movl_A0_reg(R_ESI);
2115 break;
2116 case 5:
2117 gen_op_movl_A0_reg(R_EDI);
2118 break;
2119 case 6:
2120 gen_op_movl_A0_reg(R_EBP);
2121 break;
2122 default:
2123 case 7:
2124 gen_op_movl_A0_reg(R_EBX);
2125 break;
2127 if (disp != 0)
2128 gen_op_addl_A0_im(disp);
2129 gen_op_andl_A0_ffff();
2130 no_rm:
2131 if (must_add_seg) {
2132 if (override < 0) {
2133 if (rm == 2 || rm == 3 || rm == 6)
2134 override = R_SS;
2135 else
2136 override = R_DS;
2138 gen_op_addl_A0_seg(override);
2142 opreg = OR_A0;
2143 disp = 0;
2144 *reg_ptr = opreg;
2145 *offset_ptr = disp;
2148 static void gen_nop_modrm(DisasContext *s, int modrm)
2150 int mod, rm, base, code;
2152 mod = (modrm >> 6) & 3;
2153 if (mod == 3)
2154 return;
2155 rm = modrm & 7;
2157 if (s->aflag) {
2159 base = rm;
2161 if (base == 4) {
2162 code = ldub_code(s->pc++);
2163 base = (code & 7);
2166 switch (mod) {
2167 case 0:
2168 if (base == 5) {
2169 s->pc += 4;
2171 break;
2172 case 1:
2173 s->pc++;
2174 break;
2175 default:
2176 case 2:
2177 s->pc += 4;
2178 break;
2180 } else {
2181 switch (mod) {
2182 case 0:
2183 if (rm == 6) {
2184 s->pc += 2;
2186 break;
2187 case 1:
2188 s->pc++;
2189 break;
2190 default:
2191 case 2:
2192 s->pc += 2;
2193 break;
2198 /* used for LEA and MOV AX, mem */
2199 static void gen_add_A0_ds_seg(DisasContext *s)
2201 int override, must_add_seg;
2202 must_add_seg = s->addseg;
2203 override = R_DS;
2204 if (s->override >= 0) {
2205 override = s->override;
2206 must_add_seg = 1;
2207 } else {
2208 override = R_DS;
2210 if (must_add_seg) {
2211 #ifdef TARGET_X86_64
2212 if (CODE64(s)) {
2213 gen_op_addq_A0_seg(override);
2214 } else
2215 #endif
2217 gen_op_addl_A0_seg(override);
2222 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2223 OR_TMP0 */
2224 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2226 int mod, rm, opreg, disp;
2228 mod = (modrm >> 6) & 3;
2229 rm = (modrm & 7) | REX_B(s);
2230 if (mod == 3) {
2231 if (is_store) {
2232 if (reg != OR_TMP0)
2233 gen_op_mov_TN_reg(ot, 0, reg);
2234 gen_op_mov_reg_T0(ot, rm);
2235 } else {
2236 gen_op_mov_TN_reg(ot, 0, rm);
2237 if (reg != OR_TMP0)
2238 gen_op_mov_reg_T0(ot, reg);
2240 } else {
2241 gen_lea_modrm(s, modrm, &opreg, &disp);
2242 if (is_store) {
2243 if (reg != OR_TMP0)
2244 gen_op_mov_TN_reg(ot, 0, reg);
2245 gen_op_st_T0_A0(ot + s->mem_index);
2246 } else {
2247 gen_op_ld_T0_A0(ot + s->mem_index);
2248 if (reg != OR_TMP0)
2249 gen_op_mov_reg_T0(ot, reg);
2254 static inline uint32_t insn_get(DisasContext *s, int ot)
2256 uint32_t ret;
2258 switch(ot) {
2259 case OT_BYTE:
2260 ret = ldub_code(s->pc);
2261 s->pc++;
2262 break;
2263 case OT_WORD:
2264 ret = lduw_code(s->pc);
2265 s->pc += 2;
2266 break;
2267 default:
2268 case OT_LONG:
2269 ret = ldl_code(s->pc);
2270 s->pc += 4;
2271 break;
2273 return ret;
2276 static inline int insn_const_size(unsigned int ot)
2278 if (ot <= OT_LONG)
2279 return 1 << ot;
2280 else
2281 return 4;
2284 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2286 TranslationBlock *tb;
2287 target_ulong pc;
2289 pc = s->cs_base + eip;
2290 tb = s->tb;
2291 /* NOTE: we handle the case where the TB spans two pages here */
2292 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2293 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2294 /* jump to same page: we can use a direct jump */
2295 tcg_gen_goto_tb(tb_num);
2296 gen_jmp_im(eip);
2297 tcg_gen_exit_tb((long)tb + tb_num);
2298 } else {
2299 /* jump to another page: currently not optimized */
2300 gen_jmp_im(eip);
2301 gen_eob(s);
2305 static inline void gen_jcc(DisasContext *s, int b,
2306 target_ulong val, target_ulong next_eip)
2308 int l1, l2, cc_op;
2310 cc_op = s->cc_op;
2311 if (s->cc_op != CC_OP_DYNAMIC) {
2312 gen_op_set_cc_op(s->cc_op);
2313 s->cc_op = CC_OP_DYNAMIC;
2315 if (s->jmp_opt) {
2316 l1 = gen_new_label();
2317 gen_jcc1(s, cc_op, b, l1);
2319 gen_goto_tb(s, 0, next_eip);
2321 gen_set_label(l1);
2322 gen_goto_tb(s, 1, val);
2323 s->is_jmp = 3;
2324 } else {
2326 l1 = gen_new_label();
2327 l2 = gen_new_label();
2328 gen_jcc1(s, cc_op, b, l1);
2330 gen_jmp_im(next_eip);
2331 tcg_gen_br(l2);
2333 gen_set_label(l1);
2334 gen_jmp_im(val);
2335 gen_set_label(l2);
2336 gen_eob(s);
2340 static void gen_setcc(DisasContext *s, int b)
2342 int inv, jcc_op, l1;
2343 TCGv t0;
2345 if (is_fast_jcc_case(s, b)) {
2346 /* nominal case: we use a jump */
2347 /* XXX: make it faster by adding new instructions in TCG */
2348 t0 = tcg_temp_local_new();
2349 tcg_gen_movi_tl(t0, 0);
2350 l1 = gen_new_label();
2351 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2352 tcg_gen_movi_tl(t0, 1);
2353 gen_set_label(l1);
2354 tcg_gen_mov_tl(cpu_T[0], t0);
2355 tcg_temp_free(t0);
2356 } else {
2357 /* slow case: it is more efficient not to generate a jump,
2358 although it is questionnable whether this optimization is
2359 worth to */
2360 inv = b & 1;
2361 jcc_op = (b >> 1) & 7;
2362 gen_setcc_slow_T0(s, jcc_op);
2363 if (inv) {
2364 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2369 static inline void gen_op_movl_T0_seg(int seg_reg)
2371 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2372 offsetof(CPUX86State,segs[seg_reg].selector));
2375 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2377 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2378 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2379 offsetof(CPUX86State,segs[seg_reg].selector));
2380 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2381 tcg_gen_st_tl(cpu_T[0], cpu_env,
2382 offsetof(CPUX86State,segs[seg_reg].base));
2385 /* move T0 to seg_reg and compute if the CPU state may change. Never
2386 call this function with seg_reg == R_CS */
2387 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2389 if (s->pe && !s->vm86) {
2390 /* XXX: optimize by finding processor state dynamically */
2391 if (s->cc_op != CC_OP_DYNAMIC)
2392 gen_op_set_cc_op(s->cc_op);
2393 gen_jmp_im(cur_eip);
2394 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2395 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2396 /* abort translation because the addseg value may change or
2397 because ss32 may change. For R_SS, translation must always
2398 stop as a special handling must be done to disable hardware
2399 interrupts for the next instruction */
2400 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2401 s->is_jmp = 3;
2402 } else {
2403 gen_op_movl_seg_T0_vm(seg_reg);
2404 if (seg_reg == R_SS)
2405 s->is_jmp = 3;
2409 static inline int svm_is_rep(int prefixes)
2411 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2414 static inline void
2415 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2416 uint32_t type, uint64_t param)
2418 /* no SVM activated; fast case */
2419 if (likely(!(s->flags & HF_SVMI_MASK)))
2420 return;
2421 if (s->cc_op != CC_OP_DYNAMIC)
2422 gen_op_set_cc_op(s->cc_op);
2423 gen_jmp_im(pc_start - s->cs_base);
2424 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2425 tcg_const_i64(param));
2428 static inline void
2429 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2431 gen_svm_check_intercept_param(s, pc_start, type, 0);
2434 static inline void gen_stack_update(DisasContext *s, int addend)
2436 #ifdef TARGET_X86_64
2437 if (CODE64(s)) {
2438 gen_op_add_reg_im(2, R_ESP, addend);
2439 } else
2440 #endif
2441 if (s->ss32) {
2442 gen_op_add_reg_im(1, R_ESP, addend);
2443 } else {
2444 gen_op_add_reg_im(0, R_ESP, addend);
2448 /* generate a push. It depends on ss32, addseg and dflag */
2449 static void gen_push_T0(DisasContext *s)
2451 #ifdef TARGET_X86_64
2452 if (CODE64(s)) {
2453 gen_op_movq_A0_reg(R_ESP);
2454 if (s->dflag) {
2455 gen_op_addq_A0_im(-8);
2456 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2457 } else {
2458 gen_op_addq_A0_im(-2);
2459 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2461 gen_op_mov_reg_A0(2, R_ESP);
2462 } else
2463 #endif
2465 gen_op_movl_A0_reg(R_ESP);
2466 if (!s->dflag)
2467 gen_op_addl_A0_im(-2);
2468 else
2469 gen_op_addl_A0_im(-4);
2470 if (s->ss32) {
2471 if (s->addseg) {
2472 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2473 gen_op_addl_A0_seg(R_SS);
2475 } else {
2476 gen_op_andl_A0_ffff();
2477 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2478 gen_op_addl_A0_seg(R_SS);
2480 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2481 if (s->ss32 && !s->addseg)
2482 gen_op_mov_reg_A0(1, R_ESP);
2483 else
2484 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2488 /* generate a push. It depends on ss32, addseg and dflag */
2489 /* slower version for T1, only used for call Ev */
2490 static void gen_push_T1(DisasContext *s)
2492 #ifdef TARGET_X86_64
2493 if (CODE64(s)) {
2494 gen_op_movq_A0_reg(R_ESP);
2495 if (s->dflag) {
2496 gen_op_addq_A0_im(-8);
2497 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2498 } else {
2499 gen_op_addq_A0_im(-2);
2500 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2502 gen_op_mov_reg_A0(2, R_ESP);
2503 } else
2504 #endif
2506 gen_op_movl_A0_reg(R_ESP);
2507 if (!s->dflag)
2508 gen_op_addl_A0_im(-2);
2509 else
2510 gen_op_addl_A0_im(-4);
2511 if (s->ss32) {
2512 if (s->addseg) {
2513 gen_op_addl_A0_seg(R_SS);
2515 } else {
2516 gen_op_andl_A0_ffff();
2517 gen_op_addl_A0_seg(R_SS);
2519 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2521 if (s->ss32 && !s->addseg)
2522 gen_op_mov_reg_A0(1, R_ESP);
2523 else
2524 gen_stack_update(s, (-2) << s->dflag);
2528 /* two step pop is necessary for precise exceptions */
2529 static void gen_pop_T0(DisasContext *s)
2531 #ifdef TARGET_X86_64
2532 if (CODE64(s)) {
2533 gen_op_movq_A0_reg(R_ESP);
2534 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2535 } else
2536 #endif
2538 gen_op_movl_A0_reg(R_ESP);
2539 if (s->ss32) {
2540 if (s->addseg)
2541 gen_op_addl_A0_seg(R_SS);
2542 } else {
2543 gen_op_andl_A0_ffff();
2544 gen_op_addl_A0_seg(R_SS);
2546 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2550 static void gen_pop_update(DisasContext *s)
2552 #ifdef TARGET_X86_64
2553 if (CODE64(s) && s->dflag) {
2554 gen_stack_update(s, 8);
2555 } else
2556 #endif
2558 gen_stack_update(s, 2 << s->dflag);
2562 static void gen_stack_A0(DisasContext *s)
2564 gen_op_movl_A0_reg(R_ESP);
2565 if (!s->ss32)
2566 gen_op_andl_A0_ffff();
2567 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2568 if (s->addseg)
2569 gen_op_addl_A0_seg(R_SS);
2572 /* NOTE: wrap around in 16 bit not fully handled */
2573 static void gen_pusha(DisasContext *s)
2575 int i;
2576 gen_op_movl_A0_reg(R_ESP);
2577 gen_op_addl_A0_im(-16 << s->dflag);
2578 if (!s->ss32)
2579 gen_op_andl_A0_ffff();
2580 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2581 if (s->addseg)
2582 gen_op_addl_A0_seg(R_SS);
2583 for(i = 0;i < 8; i++) {
2584 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2585 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2586 gen_op_addl_A0_im(2 << s->dflag);
2588 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2591 /* NOTE: wrap around in 16 bit not fully handled */
2592 static void gen_popa(DisasContext *s)
2594 int i;
2595 gen_op_movl_A0_reg(R_ESP);
2596 if (!s->ss32)
2597 gen_op_andl_A0_ffff();
2598 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2599 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2600 if (s->addseg)
2601 gen_op_addl_A0_seg(R_SS);
2602 for(i = 0;i < 8; i++) {
2603 /* ESP is not reloaded */
2604 if (i != 3) {
2605 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2606 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2608 gen_op_addl_A0_im(2 << s->dflag);
2610 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2613 static void gen_enter(DisasContext *s, int esp_addend, int level)
2615 int ot, opsize;
2617 level &= 0x1f;
2618 #ifdef TARGET_X86_64
2619 if (CODE64(s)) {
2620 ot = s->dflag ? OT_QUAD : OT_WORD;
2621 opsize = 1 << ot;
2623 gen_op_movl_A0_reg(R_ESP);
2624 gen_op_addq_A0_im(-opsize);
2625 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2627 /* push bp */
2628 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2629 gen_op_st_T0_A0(ot + s->mem_index);
2630 if (level) {
2631 /* XXX: must save state */
2632 gen_helper_enter64_level(tcg_const_i32(level),
2633 tcg_const_i32((ot == OT_QUAD)),
2634 cpu_T[1]);
2636 gen_op_mov_reg_T1(ot, R_EBP);
2637 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2638 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2639 } else
2640 #endif
2642 ot = s->dflag + OT_WORD;
2643 opsize = 2 << s->dflag;
2645 gen_op_movl_A0_reg(R_ESP);
2646 gen_op_addl_A0_im(-opsize);
2647 if (!s->ss32)
2648 gen_op_andl_A0_ffff();
2649 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2650 if (s->addseg)
2651 gen_op_addl_A0_seg(R_SS);
2652 /* push bp */
2653 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2654 gen_op_st_T0_A0(ot + s->mem_index);
2655 if (level) {
2656 /* XXX: must save state */
2657 gen_helper_enter_level(tcg_const_i32(level),
2658 tcg_const_i32(s->dflag),
2659 cpu_T[1]);
2661 gen_op_mov_reg_T1(ot, R_EBP);
2662 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2663 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2667 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2669 if (s->cc_op != CC_OP_DYNAMIC)
2670 gen_op_set_cc_op(s->cc_op);
2671 gen_jmp_im(cur_eip);
2672 gen_helper_raise_exception(tcg_const_i32(trapno));
2673 s->is_jmp = 3;
2676 /* an interrupt is different from an exception because of the
2677 privilege checks */
2678 static void gen_interrupt(DisasContext *s, int intno,
2679 target_ulong cur_eip, target_ulong next_eip)
2681 if (s->cc_op != CC_OP_DYNAMIC)
2682 gen_op_set_cc_op(s->cc_op);
2683 gen_jmp_im(cur_eip);
2684 gen_helper_raise_interrupt(tcg_const_i32(intno),
2685 tcg_const_i32(next_eip - cur_eip));
2686 s->is_jmp = 3;
2689 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2691 if (s->cc_op != CC_OP_DYNAMIC)
2692 gen_op_set_cc_op(s->cc_op);
2693 gen_jmp_im(cur_eip);
2694 gen_helper_debug();
2695 s->is_jmp = 3;
2698 /* generate a generic end of block. Trace exception is also generated
2699 if needed */
2700 static void gen_eob(DisasContext *s)
2702 if (s->cc_op != CC_OP_DYNAMIC)
2703 gen_op_set_cc_op(s->cc_op);
2704 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2705 gen_helper_reset_inhibit_irq();
2707 if (s->tb->flags & HF_RF_MASK) {
2708 gen_helper_reset_rf();
2710 if (s->singlestep_enabled) {
2711 gen_helper_debug();
2712 } else if (s->tf) {
2713 gen_helper_single_step();
2714 } else {
2715 tcg_gen_exit_tb(0);
2717 s->is_jmp = 3;
2720 /* generate a jump to eip. No segment change must happen before as a
2721 direct call to the next block may occur */
2722 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2724 if (s->jmp_opt) {
2725 if (s->cc_op != CC_OP_DYNAMIC) {
2726 gen_op_set_cc_op(s->cc_op);
2727 s->cc_op = CC_OP_DYNAMIC;
2729 gen_goto_tb(s, tb_num, eip);
2730 s->is_jmp = 3;
2731 } else {
2732 gen_jmp_im(eip);
2733 gen_eob(s);
2737 static void gen_jmp(DisasContext *s, target_ulong eip)
2739 gen_jmp_tb(s, eip, 0);
2742 static inline void gen_ldq_env_A0(int idx, int offset)
2744 int mem_index = (idx >> 2) - 1;
2745 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2746 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2749 static inline void gen_stq_env_A0(int idx, int offset)
2751 int mem_index = (idx >> 2) - 1;
2752 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2753 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2756 static inline void gen_ldo_env_A0(int idx, int offset)
2758 int mem_index = (idx >> 2) - 1;
2759 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2760 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2761 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2762 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2763 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2766 static inline void gen_sto_env_A0(int idx, int offset)
2768 int mem_index = (idx >> 2) - 1;
2769 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2770 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2771 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2772 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2773 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2776 static inline void gen_op_movo(int d_offset, int s_offset)
2778 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2779 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2780 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2781 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2784 static inline void gen_op_movq(int d_offset, int s_offset)
2786 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2787 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2790 static inline void gen_op_movl(int d_offset, int s_offset)
2792 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2793 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2796 static inline void gen_op_movq_env_0(int d_offset)
2798 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2799 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2802 #define SSE_SPECIAL ((void *)1)
2803 #define SSE_DUMMY ((void *)2)
2805 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2806 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2807 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2809 static void *sse_op_table1[256][4] = {
2810 /* 3DNow! extensions */
2811 [0x0e] = { SSE_DUMMY }, /* femms */
2812 [0x0f] = { SSE_DUMMY }, /* pf... */
2813 /* pure SSE operations */
2814 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2815 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2816 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2817 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2818 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2819 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2820 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2821 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2823 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2824 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2825 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2826 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */
2827 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2828 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2829 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2830 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2831 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2832 [0x51] = SSE_FOP(sqrt),
2833 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2834 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2835 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2836 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2837 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2838 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2839 [0x58] = SSE_FOP(add),
2840 [0x59] = SSE_FOP(mul),
2841 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2842 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2843 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2844 [0x5c] = SSE_FOP(sub),
2845 [0x5d] = SSE_FOP(min),
2846 [0x5e] = SSE_FOP(div),
2847 [0x5f] = SSE_FOP(max),
2849 [0xc2] = SSE_FOP(cmpeq),
2850 [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
2852 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2853 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2855 /* MMX ops and their SSE extensions */
2856 [0x60] = MMX_OP2(punpcklbw),
2857 [0x61] = MMX_OP2(punpcklwd),
2858 [0x62] = MMX_OP2(punpckldq),
2859 [0x63] = MMX_OP2(packsswb),
2860 [0x64] = MMX_OP2(pcmpgtb),
2861 [0x65] = MMX_OP2(pcmpgtw),
2862 [0x66] = MMX_OP2(pcmpgtl),
2863 [0x67] = MMX_OP2(packuswb),
2864 [0x68] = MMX_OP2(punpckhbw),
2865 [0x69] = MMX_OP2(punpckhwd),
2866 [0x6a] = MMX_OP2(punpckhdq),
2867 [0x6b] = MMX_OP2(packssdw),
2868 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2869 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2870 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2871 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2872 [0x70] = { gen_helper_pshufw_mmx,
2873 gen_helper_pshufd_xmm,
2874 gen_helper_pshufhw_xmm,
2875 gen_helper_pshuflw_xmm },
2876 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2877 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2878 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2879 [0x74] = MMX_OP2(pcmpeqb),
2880 [0x75] = MMX_OP2(pcmpeqw),
2881 [0x76] = MMX_OP2(pcmpeql),
2882 [0x77] = { SSE_DUMMY }, /* emms */
2883 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2884 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2885 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2886 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2887 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2888 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2889 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2890 [0xd1] = MMX_OP2(psrlw),
2891 [0xd2] = MMX_OP2(psrld),
2892 [0xd3] = MMX_OP2(psrlq),
2893 [0xd4] = MMX_OP2(paddq),
2894 [0xd5] = MMX_OP2(pmullw),
2895 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2896 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2897 [0xd8] = MMX_OP2(psubusb),
2898 [0xd9] = MMX_OP2(psubusw),
2899 [0xda] = MMX_OP2(pminub),
2900 [0xdb] = MMX_OP2(pand),
2901 [0xdc] = MMX_OP2(paddusb),
2902 [0xdd] = MMX_OP2(paddusw),
2903 [0xde] = MMX_OP2(pmaxub),
2904 [0xdf] = MMX_OP2(pandn),
2905 [0xe0] = MMX_OP2(pavgb),
2906 [0xe1] = MMX_OP2(psraw),
2907 [0xe2] = MMX_OP2(psrad),
2908 [0xe3] = MMX_OP2(pavgw),
2909 [0xe4] = MMX_OP2(pmulhuw),
2910 [0xe5] = MMX_OP2(pmulhw),
2911 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2912 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2913 [0xe8] = MMX_OP2(psubsb),
2914 [0xe9] = MMX_OP2(psubsw),
2915 [0xea] = MMX_OP2(pminsw),
2916 [0xeb] = MMX_OP2(por),
2917 [0xec] = MMX_OP2(paddsb),
2918 [0xed] = MMX_OP2(paddsw),
2919 [0xee] = MMX_OP2(pmaxsw),
2920 [0xef] = MMX_OP2(pxor),
2921 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2922 [0xf1] = MMX_OP2(psllw),
2923 [0xf2] = MMX_OP2(pslld),
2924 [0xf3] = MMX_OP2(psllq),
2925 [0xf4] = MMX_OP2(pmuludq),
2926 [0xf5] = MMX_OP2(pmaddwd),
2927 [0xf6] = MMX_OP2(psadbw),
2928 [0xf7] = MMX_OP2(maskmov),
2929 [0xf8] = MMX_OP2(psubb),
2930 [0xf9] = MMX_OP2(psubw),
2931 [0xfa] = MMX_OP2(psubl),
2932 [0xfb] = MMX_OP2(psubq),
2933 [0xfc] = MMX_OP2(paddb),
2934 [0xfd] = MMX_OP2(paddw),
2935 [0xfe] = MMX_OP2(paddl),
2938 static void *sse_op_table2[3 * 8][2] = {
2939 [0 + 2] = MMX_OP2(psrlw),
2940 [0 + 4] = MMX_OP2(psraw),
2941 [0 + 6] = MMX_OP2(psllw),
2942 [8 + 2] = MMX_OP2(psrld),
2943 [8 + 4] = MMX_OP2(psrad),
2944 [8 + 6] = MMX_OP2(pslld),
2945 [16 + 2] = MMX_OP2(psrlq),
2946 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2947 [16 + 6] = MMX_OP2(psllq),
2948 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2951 static void *sse_op_table3[4 * 3] = {
2952 gen_helper_cvtsi2ss,
2953 gen_helper_cvtsi2sd,
2954 X86_64_ONLY(gen_helper_cvtsq2ss),
2955 X86_64_ONLY(gen_helper_cvtsq2sd),
2957 gen_helper_cvttss2si,
2958 gen_helper_cvttsd2si,
2959 X86_64_ONLY(gen_helper_cvttss2sq),
2960 X86_64_ONLY(gen_helper_cvttsd2sq),
2962 gen_helper_cvtss2si,
2963 gen_helper_cvtsd2si,
2964 X86_64_ONLY(gen_helper_cvtss2sq),
2965 X86_64_ONLY(gen_helper_cvtsd2sq),
2968 static void *sse_op_table4[8][4] = {
2969 SSE_FOP(cmpeq),
2970 SSE_FOP(cmplt),
2971 SSE_FOP(cmple),
2972 SSE_FOP(cmpunord),
2973 SSE_FOP(cmpneq),
2974 SSE_FOP(cmpnlt),
2975 SSE_FOP(cmpnle),
2976 SSE_FOP(cmpord),
2979 static void *sse_op_table5[256] = {
2980 [0x0c] = gen_helper_pi2fw,
2981 [0x0d] = gen_helper_pi2fd,
2982 [0x1c] = gen_helper_pf2iw,
2983 [0x1d] = gen_helper_pf2id,
2984 [0x8a] = gen_helper_pfnacc,
2985 [0x8e] = gen_helper_pfpnacc,
2986 [0x90] = gen_helper_pfcmpge,
2987 [0x94] = gen_helper_pfmin,
2988 [0x96] = gen_helper_pfrcp,
2989 [0x97] = gen_helper_pfrsqrt,
2990 [0x9a] = gen_helper_pfsub,
2991 [0x9e] = gen_helper_pfadd,
2992 [0xa0] = gen_helper_pfcmpgt,
2993 [0xa4] = gen_helper_pfmax,
2994 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2995 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2996 [0xaa] = gen_helper_pfsubr,
2997 [0xae] = gen_helper_pfacc,
2998 [0xb0] = gen_helper_pfcmpeq,
2999 [0xb4] = gen_helper_pfmul,
3000 [0xb6] = gen_helper_movq, /* pfrcpit2 */
3001 [0xb7] = gen_helper_pmulhrw_mmx,
3002 [0xbb] = gen_helper_pswapd,
3003 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
3006 struct sse_op_helper_s {
3007 void *op[2]; uint32_t ext_mask;
3009 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
3010 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
3011 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
3012 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
3013 static struct sse_op_helper_s sse_op_table6[256] = {
3014 [0x00] = SSSE3_OP(pshufb),
3015 [0x01] = SSSE3_OP(phaddw),
3016 [0x02] = SSSE3_OP(phaddd),
3017 [0x03] = SSSE3_OP(phaddsw),
3018 [0x04] = SSSE3_OP(pmaddubsw),
3019 [0x05] = SSSE3_OP(phsubw),
3020 [0x06] = SSSE3_OP(phsubd),
3021 [0x07] = SSSE3_OP(phsubsw),
3022 [0x08] = SSSE3_OP(psignb),
3023 [0x09] = SSSE3_OP(psignw),
3024 [0x0a] = SSSE3_OP(psignd),
3025 [0x0b] = SSSE3_OP(pmulhrsw),
3026 [0x10] = SSE41_OP(pblendvb),
3027 [0x14] = SSE41_OP(blendvps),
3028 [0x15] = SSE41_OP(blendvpd),
3029 [0x17] = SSE41_OP(ptest),
3030 [0x1c] = SSSE3_OP(pabsb),
3031 [0x1d] = SSSE3_OP(pabsw),
3032 [0x1e] = SSSE3_OP(pabsd),
3033 [0x20] = SSE41_OP(pmovsxbw),
3034 [0x21] = SSE41_OP(pmovsxbd),
3035 [0x22] = SSE41_OP(pmovsxbq),
3036 [0x23] = SSE41_OP(pmovsxwd),
3037 [0x24] = SSE41_OP(pmovsxwq),
3038 [0x25] = SSE41_OP(pmovsxdq),
3039 [0x28] = SSE41_OP(pmuldq),
3040 [0x29] = SSE41_OP(pcmpeqq),
3041 [0x2a] = SSE41_SPECIAL, /* movntqda */
3042 [0x2b] = SSE41_OP(packusdw),
3043 [0x30] = SSE41_OP(pmovzxbw),
3044 [0x31] = SSE41_OP(pmovzxbd),
3045 [0x32] = SSE41_OP(pmovzxbq),
3046 [0x33] = SSE41_OP(pmovzxwd),
3047 [0x34] = SSE41_OP(pmovzxwq),
3048 [0x35] = SSE41_OP(pmovzxdq),
3049 [0x37] = SSE42_OP(pcmpgtq),
3050 [0x38] = SSE41_OP(pminsb),
3051 [0x39] = SSE41_OP(pminsd),
3052 [0x3a] = SSE41_OP(pminuw),
3053 [0x3b] = SSE41_OP(pminud),
3054 [0x3c] = SSE41_OP(pmaxsb),
3055 [0x3d] = SSE41_OP(pmaxsd),
3056 [0x3e] = SSE41_OP(pmaxuw),
3057 [0x3f] = SSE41_OP(pmaxud),
3058 [0x40] = SSE41_OP(pmulld),
3059 [0x41] = SSE41_OP(phminposuw),
3062 static struct sse_op_helper_s sse_op_table7[256] = {
3063 [0x08] = SSE41_OP(roundps),
3064 [0x09] = SSE41_OP(roundpd),
3065 [0x0a] = SSE41_OP(roundss),
3066 [0x0b] = SSE41_OP(roundsd),
3067 [0x0c] = SSE41_OP(blendps),
3068 [0x0d] = SSE41_OP(blendpd),
3069 [0x0e] = SSE41_OP(pblendw),
3070 [0x0f] = SSSE3_OP(palignr),
3071 [0x14] = SSE41_SPECIAL, /* pextrb */
3072 [0x15] = SSE41_SPECIAL, /* pextrw */
3073 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
3074 [0x17] = SSE41_SPECIAL, /* extractps */
3075 [0x20] = SSE41_SPECIAL, /* pinsrb */
3076 [0x21] = SSE41_SPECIAL, /* insertps */
3077 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
3078 [0x40] = SSE41_OP(dpps),
3079 [0x41] = SSE41_OP(dppd),
3080 [0x42] = SSE41_OP(mpsadbw),
3081 [0x60] = SSE42_OP(pcmpestrm),
3082 [0x61] = SSE42_OP(pcmpestri),
3083 [0x62] = SSE42_OP(pcmpistrm),
3084 [0x63] = SSE42_OP(pcmpistri),
3087 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3089 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3090 int modrm, mod, rm, reg, reg_addr, offset_addr;
3091 void *sse_op2;
3093 b &= 0xff;
3094 if (s->prefix & PREFIX_DATA)
3095 b1 = 1;
3096 else if (s->prefix & PREFIX_REPZ)
3097 b1 = 2;
3098 else if (s->prefix & PREFIX_REPNZ)
3099 b1 = 3;
3100 else
3101 b1 = 0;
3102 sse_op2 = sse_op_table1[b][b1];
3103 if (!sse_op2)
3104 goto illegal_op;
3105 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3106 is_xmm = 1;
3107 } else {
3108 if (b1 == 0) {
3109 /* MMX case */
3110 is_xmm = 0;
3111 } else {
3112 is_xmm = 1;
3115 /* simple MMX/SSE operation */
3116 if (s->flags & HF_TS_MASK) {
3117 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3118 return;
3120 if (s->flags & HF_EM_MASK) {
3121 illegal_op:
3122 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3123 return;
3125 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3126 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3127 goto illegal_op;
3128 if (b == 0x0e) {
3129 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3130 goto illegal_op;
3131 /* femms */
3132 gen_helper_emms();
3133 return;
3135 if (b == 0x77) {
3136 /* emms */
3137 gen_helper_emms();
3138 return;
3140 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3141 the static cpu state) */
3142 if (!is_xmm) {
3143 gen_helper_enter_mmx();
3146 modrm = ldub_code(s->pc++);
3147 reg = ((modrm >> 3) & 7);
3148 if (is_xmm)
3149 reg |= rex_r;
3150 mod = (modrm >> 6) & 3;
3151 if (sse_op2 == SSE_SPECIAL) {
3152 b |= (b1 << 8);
3153 switch(b) {
3154 case 0x0e7: /* movntq */
3155 if (mod == 3)
3156 goto illegal_op;
3157 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3158 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3159 break;
3160 case 0x1e7: /* movntdq */
3161 case 0x02b: /* movntps */
3162 case 0x12b: /* movntps */
3163 case 0x3f0: /* lddqu */
3164 if (mod == 3)
3165 goto illegal_op;
3166 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3167 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3168 break;
3169 case 0x6e: /* movd mm, ea */
3170 #ifdef TARGET_X86_64
3171 if (s->dflag == 2) {
3172 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3173 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3174 } else
3175 #endif
3177 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3178 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3179 offsetof(CPUX86State,fpregs[reg].mmx));
3180 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3181 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3183 break;
3184 case 0x16e: /* movd xmm, ea */
3185 #ifdef TARGET_X86_64
3186 if (s->dflag == 2) {
3187 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3188 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3189 offsetof(CPUX86State,xmm_regs[reg]));
3190 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3191 } else
3192 #endif
3194 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3195 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3196 offsetof(CPUX86State,xmm_regs[reg]));
3197 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3198 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3200 break;
3201 case 0x6f: /* movq mm, ea */
3202 if (mod != 3) {
3203 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3204 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3205 } else {
3206 rm = (modrm & 7);
3207 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3208 offsetof(CPUX86State,fpregs[rm].mmx));
3209 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3210 offsetof(CPUX86State,fpregs[reg].mmx));
3212 break;
3213 case 0x010: /* movups */
3214 case 0x110: /* movupd */
3215 case 0x028: /* movaps */
3216 case 0x128: /* movapd */
3217 case 0x16f: /* movdqa xmm, ea */
3218 case 0x26f: /* movdqu xmm, ea */
3219 if (mod != 3) {
3220 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3221 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3222 } else {
3223 rm = (modrm & 7) | REX_B(s);
3224 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3225 offsetof(CPUX86State,xmm_regs[rm]));
3227 break;
3228 case 0x210: /* movss xmm, ea */
3229 if (mod != 3) {
3230 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3231 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3232 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3233 gen_op_movl_T0_0();
3234 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3235 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3236 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3237 } else {
3238 rm = (modrm & 7) | REX_B(s);
3239 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3240 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3242 break;
3243 case 0x310: /* movsd xmm, ea */
3244 if (mod != 3) {
3245 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3246 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3247 gen_op_movl_T0_0();
3248 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3249 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3250 } else {
3251 rm = (modrm & 7) | REX_B(s);
3252 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3253 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3255 break;
3256 case 0x012: /* movlps */
3257 case 0x112: /* movlpd */
3258 if (mod != 3) {
3259 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3260 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3261 } else {
3262 /* movhlps */
3263 rm = (modrm & 7) | REX_B(s);
3264 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3265 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3267 break;
3268 case 0x212: /* movsldup */
3269 if (mod != 3) {
3270 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3271 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3272 } else {
3273 rm = (modrm & 7) | REX_B(s);
3274 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3275 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3276 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3277 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3279 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3280 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3281 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3282 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3283 break;
3284 case 0x312: /* movddup */
3285 if (mod != 3) {
3286 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3287 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3288 } else {
3289 rm = (modrm & 7) | REX_B(s);
3290 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3291 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3293 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3294 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3295 break;
3296 case 0x016: /* movhps */
3297 case 0x116: /* movhpd */
3298 if (mod != 3) {
3299 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3300 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3301 } else {
3302 /* movlhps */
3303 rm = (modrm & 7) | REX_B(s);
3304 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3305 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3307 break;
3308 case 0x216: /* movshdup */
3309 if (mod != 3) {
3310 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3311 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3312 } else {
3313 rm = (modrm & 7) | REX_B(s);
3314 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3315 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3316 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3317 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3319 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3320 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3321 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3322 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3323 break;
3324 case 0x7e: /* movd ea, mm */
3325 #ifdef TARGET_X86_64
3326 if (s->dflag == 2) {
3327 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3328 offsetof(CPUX86State,fpregs[reg].mmx));
3329 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3330 } else
3331 #endif
3333 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3334 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3335 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3337 break;
3338 case 0x17e: /* movd ea, xmm */
3339 #ifdef TARGET_X86_64
3340 if (s->dflag == 2) {
3341 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3342 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3343 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3344 } else
3345 #endif
3347 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3348 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3349 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3351 break;
3352 case 0x27e: /* movq xmm, ea */
3353 if (mod != 3) {
3354 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3355 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3356 } else {
3357 rm = (modrm & 7) | REX_B(s);
3358 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3359 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3361 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3362 break;
3363 case 0x7f: /* movq ea, mm */
3364 if (mod != 3) {
3365 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3366 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3367 } else {
3368 rm = (modrm & 7);
3369 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3370 offsetof(CPUX86State,fpregs[reg].mmx));
3372 break;
3373 case 0x011: /* movups */
3374 case 0x111: /* movupd */
3375 case 0x029: /* movaps */
3376 case 0x129: /* movapd */
3377 case 0x17f: /* movdqa ea, xmm */
3378 case 0x27f: /* movdqu ea, xmm */
3379 if (mod != 3) {
3380 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3381 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3382 } else {
3383 rm = (modrm & 7) | REX_B(s);
3384 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3385 offsetof(CPUX86State,xmm_regs[reg]));
3387 break;
3388 case 0x211: /* movss ea, xmm */
3389 if (mod != 3) {
3390 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3391 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3392 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3393 } else {
3394 rm = (modrm & 7) | REX_B(s);
3395 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3396 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3398 break;
3399 case 0x311: /* movsd ea, xmm */
3400 if (mod != 3) {
3401 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3402 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3403 } else {
3404 rm = (modrm & 7) | REX_B(s);
3405 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3406 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3408 break;
3409 case 0x013: /* movlps */
3410 case 0x113: /* movlpd */
3411 if (mod != 3) {
3412 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3413 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3414 } else {
3415 goto illegal_op;
3417 break;
3418 case 0x017: /* movhps */
3419 case 0x117: /* movhpd */
3420 if (mod != 3) {
3421 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3422 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3423 } else {
3424 goto illegal_op;
3426 break;
3427 case 0x71: /* shift mm, im */
3428 case 0x72:
3429 case 0x73:
3430 case 0x171: /* shift xmm, im */
3431 case 0x172:
3432 case 0x173:
3433 val = ldub_code(s->pc++);
3434 if (is_xmm) {
3435 gen_op_movl_T0_im(val);
3436 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3437 gen_op_movl_T0_0();
3438 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3439 op1_offset = offsetof(CPUX86State,xmm_t0);
3440 } else {
3441 gen_op_movl_T0_im(val);
3442 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3443 gen_op_movl_T0_0();
3444 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3445 op1_offset = offsetof(CPUX86State,mmx_t0);
3447 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3448 if (!sse_op2)
3449 goto illegal_op;
3450 if (is_xmm) {
3451 rm = (modrm & 7) | REX_B(s);
3452 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3453 } else {
3454 rm = (modrm & 7);
3455 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3457 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3458 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3459 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3460 break;
3461 case 0x050: /* movmskps */
3462 rm = (modrm & 7) | REX_B(s);
3463 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3464 offsetof(CPUX86State,xmm_regs[rm]));
3465 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3466 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3467 gen_op_mov_reg_T0(OT_LONG, reg);
3468 break;
3469 case 0x150: /* movmskpd */
3470 rm = (modrm & 7) | REX_B(s);
3471 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3472 offsetof(CPUX86State,xmm_regs[rm]));
3473 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3474 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3475 gen_op_mov_reg_T0(OT_LONG, reg);
3476 break;
3477 case 0x02a: /* cvtpi2ps */
3478 case 0x12a: /* cvtpi2pd */
3479 gen_helper_enter_mmx();
3480 if (mod != 3) {
3481 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3482 op2_offset = offsetof(CPUX86State,mmx_t0);
3483 gen_ldq_env_A0(s->mem_index, op2_offset);
3484 } else {
3485 rm = (modrm & 7);
3486 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3488 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3489 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3490 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3491 switch(b >> 8) {
3492 case 0x0:
3493 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3494 break;
3495 default:
3496 case 0x1:
3497 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3498 break;
3500 break;
3501 case 0x22a: /* cvtsi2ss */
3502 case 0x32a: /* cvtsi2sd */
3503 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3504 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3505 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3506 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3507 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3508 if (ot == OT_LONG) {
3509 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3510 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3511 } else {
3512 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3514 break;
3515 case 0x02c: /* cvttps2pi */
3516 case 0x12c: /* cvttpd2pi */
3517 case 0x02d: /* cvtps2pi */
3518 case 0x12d: /* cvtpd2pi */
3519 gen_helper_enter_mmx();
3520 if (mod != 3) {
3521 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3522 op2_offset = offsetof(CPUX86State,xmm_t0);
3523 gen_ldo_env_A0(s->mem_index, op2_offset);
3524 } else {
3525 rm = (modrm & 7) | REX_B(s);
3526 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3528 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3529 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3530 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3531 switch(b) {
3532 case 0x02c:
3533 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3534 break;
3535 case 0x12c:
3536 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3537 break;
3538 case 0x02d:
3539 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3540 break;
3541 case 0x12d:
3542 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3543 break;
3545 break;
3546 case 0x22c: /* cvttss2si */
3547 case 0x32c: /* cvttsd2si */
3548 case 0x22d: /* cvtss2si */
3549 case 0x32d: /* cvtsd2si */
3550 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3551 if (mod != 3) {
3552 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3553 if ((b >> 8) & 1) {
3554 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3555 } else {
3556 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3557 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3559 op2_offset = offsetof(CPUX86State,xmm_t0);
3560 } else {
3561 rm = (modrm & 7) | REX_B(s);
3562 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3564 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3565 (b & 1) * 4];
3566 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3567 if (ot == OT_LONG) {
3568 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3569 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3570 } else {
3571 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3573 gen_op_mov_reg_T0(ot, reg);
3574 break;
3575 case 0xc4: /* pinsrw */
3576 case 0x1c4:
3577 s->rip_offset = 1;
3578 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3579 val = ldub_code(s->pc++);
3580 if (b1) {
3581 val &= 7;
3582 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3583 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3584 } else {
3585 val &= 3;
3586 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3587 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3589 break;
3590 case 0xc5: /* pextrw */
3591 case 0x1c5:
3592 if (mod != 3)
3593 goto illegal_op;
3594 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3595 val = ldub_code(s->pc++);
3596 if (b1) {
3597 val &= 7;
3598 rm = (modrm & 7) | REX_B(s);
3599 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3600 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3601 } else {
3602 val &= 3;
3603 rm = (modrm & 7);
3604 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3605 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3607 reg = ((modrm >> 3) & 7) | rex_r;
3608 gen_op_mov_reg_T0(ot, reg);
3609 break;
3610 case 0x1d6: /* movq ea, xmm */
3611 if (mod != 3) {
3612 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3613 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3614 } else {
3615 rm = (modrm & 7) | REX_B(s);
3616 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3617 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3618 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3620 break;
3621 case 0x2d6: /* movq2dq */
3622 gen_helper_enter_mmx();
3623 rm = (modrm & 7);
3624 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3625 offsetof(CPUX86State,fpregs[rm].mmx));
3626 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3627 break;
3628 case 0x3d6: /* movdq2q */
3629 gen_helper_enter_mmx();
3630 rm = (modrm & 7) | REX_B(s);
3631 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3632 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3633 break;
3634 case 0xd7: /* pmovmskb */
3635 case 0x1d7:
3636 if (mod != 3)
3637 goto illegal_op;
3638 if (b1) {
3639 rm = (modrm & 7) | REX_B(s);
3640 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3641 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3642 } else {
3643 rm = (modrm & 7);
3644 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3645 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3647 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3648 reg = ((modrm >> 3) & 7) | rex_r;
3649 gen_op_mov_reg_T0(OT_LONG, reg);
3650 break;
3651 case 0x138:
3652 if (s->prefix & PREFIX_REPNZ)
3653 goto crc32;
3654 case 0x038:
3655 b = modrm;
3656 modrm = ldub_code(s->pc++);
3657 rm = modrm & 7;
3658 reg = ((modrm >> 3) & 7) | rex_r;
3659 mod = (modrm >> 6) & 3;
3661 sse_op2 = sse_op_table6[b].op[b1];
3662 if (!sse_op2)
3663 goto illegal_op;
3664 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3665 goto illegal_op;
3667 if (b1) {
3668 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3669 if (mod == 3) {
3670 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3671 } else {
3672 op2_offset = offsetof(CPUX86State,xmm_t0);
3673 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3674 switch (b) {
3675 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3676 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3677 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3678 gen_ldq_env_A0(s->mem_index, op2_offset +
3679 offsetof(XMMReg, XMM_Q(0)));
3680 break;
3681 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3682 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3683 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3684 (s->mem_index >> 2) - 1);
3685 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3686 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3687 offsetof(XMMReg, XMM_L(0)));
3688 break;
3689 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3690 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3691 (s->mem_index >> 2) - 1);
3692 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3693 offsetof(XMMReg, XMM_W(0)));
3694 break;
3695 case 0x2a: /* movntqda */
3696 gen_ldo_env_A0(s->mem_index, op1_offset);
3697 return;
3698 default:
3699 gen_ldo_env_A0(s->mem_index, op2_offset);
3702 } else {
3703 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3704 if (mod == 3) {
3705 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3706 } else {
3707 op2_offset = offsetof(CPUX86State,mmx_t0);
3708 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3709 gen_ldq_env_A0(s->mem_index, op2_offset);
3712 if (sse_op2 == SSE_SPECIAL)
3713 goto illegal_op;
3715 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3716 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3717 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3719 if (b == 0x17)
3720 s->cc_op = CC_OP_EFLAGS;
3721 break;
3722 case 0x338: /* crc32 */
3723 crc32:
3724 b = modrm;
3725 modrm = ldub_code(s->pc++);
3726 reg = ((modrm >> 3) & 7) | rex_r;
3728 if (b != 0xf0 && b != 0xf1)
3729 goto illegal_op;
3730 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3731 goto illegal_op;
3733 if (b == 0xf0)
3734 ot = OT_BYTE;
3735 else if (b == 0xf1 && s->dflag != 2)
3736 if (s->prefix & PREFIX_DATA)
3737 ot = OT_WORD;
3738 else
3739 ot = OT_LONG;
3740 else
3741 ot = OT_QUAD;
3743 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3744 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3745 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3746 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3747 cpu_T[0], tcg_const_i32(8 << ot));
3749 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3750 gen_op_mov_reg_T0(ot, reg);
3751 break;
3752 case 0x03a:
3753 case 0x13a:
3754 b = modrm;
3755 modrm = ldub_code(s->pc++);
3756 rm = modrm & 7;
3757 reg = ((modrm >> 3) & 7) | rex_r;
3758 mod = (modrm >> 6) & 3;
3760 sse_op2 = sse_op_table7[b].op[b1];
3761 if (!sse_op2)
3762 goto illegal_op;
3763 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3764 goto illegal_op;
3766 if (sse_op2 == SSE_SPECIAL) {
3767 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3768 rm = (modrm & 7) | REX_B(s);
3769 if (mod != 3)
3770 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3771 reg = ((modrm >> 3) & 7) | rex_r;
3772 val = ldub_code(s->pc++);
3773 switch (b) {
3774 case 0x14: /* pextrb */
3775 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3776 xmm_regs[reg].XMM_B(val & 15)));
3777 if (mod == 3)
3778 gen_op_mov_reg_T0(ot, rm);
3779 else
3780 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3781 (s->mem_index >> 2) - 1);
3782 break;
3783 case 0x15: /* pextrw */
3784 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3785 xmm_regs[reg].XMM_W(val & 7)));
3786 if (mod == 3)
3787 gen_op_mov_reg_T0(ot, rm);
3788 else
3789 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3790 (s->mem_index >> 2) - 1);
3791 break;
3792 case 0x16:
3793 if (ot == OT_LONG) { /* pextrd */
3794 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3795 offsetof(CPUX86State,
3796 xmm_regs[reg].XMM_L(val & 3)));
3797 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3798 if (mod == 3)
3799 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3800 else
3801 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3802 (s->mem_index >> 2) - 1);
3803 } else { /* pextrq */
3804 #ifdef TARGET_X86_64
3805 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3806 offsetof(CPUX86State,
3807 xmm_regs[reg].XMM_Q(val & 1)));
3808 if (mod == 3)
3809 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3810 else
3811 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3812 (s->mem_index >> 2) - 1);
3813 #else
3814 goto illegal_op;
3815 #endif
3817 break;
3818 case 0x17: /* extractps */
3819 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3820 xmm_regs[reg].XMM_L(val & 3)));
3821 if (mod == 3)
3822 gen_op_mov_reg_T0(ot, rm);
3823 else
3824 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3825 (s->mem_index >> 2) - 1);
3826 break;
3827 case 0x20: /* pinsrb */
3828 if (mod == 3)
3829 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3830 else
3831 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3832 (s->mem_index >> 2) - 1);
3833 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3834 xmm_regs[reg].XMM_B(val & 15)));
3835 break;
3836 case 0x21: /* insertps */
3837 if (mod == 3) {
3838 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3839 offsetof(CPUX86State,xmm_regs[rm]
3840 .XMM_L((val >> 6) & 3)));
3841 } else {
3842 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3843 (s->mem_index >> 2) - 1);
3844 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3846 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3847 offsetof(CPUX86State,xmm_regs[reg]
3848 .XMM_L((val >> 4) & 3)));
3849 if ((val >> 0) & 1)
3850 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3851 cpu_env, offsetof(CPUX86State,
3852 xmm_regs[reg].XMM_L(0)));
3853 if ((val >> 1) & 1)
3854 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3855 cpu_env, offsetof(CPUX86State,
3856 xmm_regs[reg].XMM_L(1)));
3857 if ((val >> 2) & 1)
3858 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3859 cpu_env, offsetof(CPUX86State,
3860 xmm_regs[reg].XMM_L(2)));
3861 if ((val >> 3) & 1)
3862 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3863 cpu_env, offsetof(CPUX86State,
3864 xmm_regs[reg].XMM_L(3)));
3865 break;
3866 case 0x22:
3867 if (ot == OT_LONG) { /* pinsrd */
3868 if (mod == 3)
3869 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3870 else
3871 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3872 (s->mem_index >> 2) - 1);
3873 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3874 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3875 offsetof(CPUX86State,
3876 xmm_regs[reg].XMM_L(val & 3)));
3877 } else { /* pinsrq */
3878 #ifdef TARGET_X86_64
3879 if (mod == 3)
3880 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3881 else
3882 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3883 (s->mem_index >> 2) - 1);
3884 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3885 offsetof(CPUX86State,
3886 xmm_regs[reg].XMM_Q(val & 1)));
3887 #else
3888 goto illegal_op;
3889 #endif
3891 break;
3893 return;
3896 if (b1) {
3897 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3898 if (mod == 3) {
3899 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3900 } else {
3901 op2_offset = offsetof(CPUX86State,xmm_t0);
3902 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3903 gen_ldo_env_A0(s->mem_index, op2_offset);
3905 } else {
3906 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3907 if (mod == 3) {
3908 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3909 } else {
3910 op2_offset = offsetof(CPUX86State,mmx_t0);
3911 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3912 gen_ldq_env_A0(s->mem_index, op2_offset);
3915 val = ldub_code(s->pc++);
3917 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3918 s->cc_op = CC_OP_EFLAGS;
3920 if (s->dflag == 2)
3921 /* The helper must use entire 64-bit gp registers */
3922 val |= 1 << 8;
3925 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3926 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3927 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3928 break;
3929 default:
3930 goto illegal_op;
3932 } else {
3933 /* generic MMX or SSE operation */
3934 switch(b) {
3935 case 0x70: /* pshufx insn */
3936 case 0xc6: /* pshufx insn */
3937 case 0xc2: /* compare insns */
3938 s->rip_offset = 1;
3939 break;
3940 default:
3941 break;
3943 if (is_xmm) {
3944 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3945 if (mod != 3) {
3946 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3947 op2_offset = offsetof(CPUX86State,xmm_t0);
3948 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3949 b == 0xc2)) {
3950 /* specific case for SSE single instructions */
3951 if (b1 == 2) {
3952 /* 32 bit access */
3953 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3954 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3955 } else {
3956 /* 64 bit access */
3957 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3959 } else {
3960 gen_ldo_env_A0(s->mem_index, op2_offset);
3962 } else {
3963 rm = (modrm & 7) | REX_B(s);
3964 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3966 } else {
3967 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3968 if (mod != 3) {
3969 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3970 op2_offset = offsetof(CPUX86State,mmx_t0);
3971 gen_ldq_env_A0(s->mem_index, op2_offset);
3972 } else {
3973 rm = (modrm & 7);
3974 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3977 switch(b) {
3978 case 0x0f: /* 3DNow! data insns */
3979 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3980 goto illegal_op;
3981 val = ldub_code(s->pc++);
3982 sse_op2 = sse_op_table5[val];
3983 if (!sse_op2)
3984 goto illegal_op;
3985 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3986 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3987 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3988 break;
3989 case 0x70: /* pshufx insn */
3990 case 0xc6: /* pshufx insn */
3991 val = ldub_code(s->pc++);
3992 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3993 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3994 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3995 break;
3996 case 0xc2:
3997 /* compare insns */
3998 val = ldub_code(s->pc++);
3999 if (val >= 8)
4000 goto illegal_op;
4001 sse_op2 = sse_op_table4[val][b1];
4002 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4003 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4004 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4005 break;
4006 case 0xf7:
4007 /* maskmov : we must prepare A0 */
4008 if (mod != 3)
4009 goto illegal_op;
4010 #ifdef TARGET_X86_64
4011 if (s->aflag == 2) {
4012 gen_op_movq_A0_reg(R_EDI);
4013 } else
4014 #endif
4016 gen_op_movl_A0_reg(R_EDI);
4017 if (s->aflag == 0)
4018 gen_op_andl_A0_ffff();
4020 gen_add_A0_ds_seg(s);
4022 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4023 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4024 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
4025 break;
4026 default:
4027 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
4028 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
4029 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
4030 break;
4032 if (b == 0x2e || b == 0x2f) {
4033 s->cc_op = CC_OP_EFLAGS;
4038 /* convert one instruction. s->is_jmp is set if the translation must
4039 be stopped. Return the next pc value */
4040 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
4042 int b, prefixes, aflag, dflag;
4043 int shift, ot;
4044 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
4045 target_ulong next_eip, tval;
4046 int rex_w, rex_r;
4048 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
4049 tcg_gen_debug_insn_start(pc_start);
4050 s->pc = pc_start;
4051 prefixes = 0;
4052 aflag = s->code32;
4053 dflag = s->code32;
4054 s->override = -1;
4055 rex_w = -1;
4056 rex_r = 0;
4057 #ifdef TARGET_X86_64
4058 s->rex_x = 0;
4059 s->rex_b = 0;
4060 x86_64_hregs = 0;
4061 #endif
4062 s->rip_offset = 0; /* for relative ip address */
4063 next_byte:
4064 b = ldub_code(s->pc);
4065 s->pc++;
4066 /* check prefixes */
4067 #ifdef TARGET_X86_64
4068 if (CODE64(s)) {
4069 switch (b) {
4070 case 0xf3:
4071 prefixes |= PREFIX_REPZ;
4072 goto next_byte;
4073 case 0xf2:
4074 prefixes |= PREFIX_REPNZ;
4075 goto next_byte;
4076 case 0xf0:
4077 prefixes |= PREFIX_LOCK;
4078 goto next_byte;
4079 case 0x2e:
4080 s->override = R_CS;
4081 goto next_byte;
4082 case 0x36:
4083 s->override = R_SS;
4084 goto next_byte;
4085 case 0x3e:
4086 s->override = R_DS;
4087 goto next_byte;
4088 case 0x26:
4089 s->override = R_ES;
4090 goto next_byte;
4091 case 0x64:
4092 s->override = R_FS;
4093 goto next_byte;
4094 case 0x65:
4095 s->override = R_GS;
4096 goto next_byte;
4097 case 0x66:
4098 prefixes |= PREFIX_DATA;
4099 goto next_byte;
4100 case 0x67:
4101 prefixes |= PREFIX_ADR;
4102 goto next_byte;
4103 case 0x40 ... 0x4f:
4104 /* REX prefix */
4105 rex_w = (b >> 3) & 1;
4106 rex_r = (b & 0x4) << 1;
4107 s->rex_x = (b & 0x2) << 2;
4108 REX_B(s) = (b & 0x1) << 3;
4109 x86_64_hregs = 1; /* select uniform byte register addressing */
4110 goto next_byte;
4112 if (rex_w == 1) {
4113 /* 0x66 is ignored if rex.w is set */
4114 dflag = 2;
4115 } else {
4116 if (prefixes & PREFIX_DATA)
4117 dflag ^= 1;
4119 if (!(prefixes & PREFIX_ADR))
4120 aflag = 2;
4121 } else
4122 #endif
4124 switch (b) {
4125 case 0xf3:
4126 prefixes |= PREFIX_REPZ;
4127 goto next_byte;
4128 case 0xf2:
4129 prefixes |= PREFIX_REPNZ;
4130 goto next_byte;
4131 case 0xf0:
4132 prefixes |= PREFIX_LOCK;
4133 goto next_byte;
4134 case 0x2e:
4135 s->override = R_CS;
4136 goto next_byte;
4137 case 0x36:
4138 s->override = R_SS;
4139 goto next_byte;
4140 case 0x3e:
4141 s->override = R_DS;
4142 goto next_byte;
4143 case 0x26:
4144 s->override = R_ES;
4145 goto next_byte;
4146 case 0x64:
4147 s->override = R_FS;
4148 goto next_byte;
4149 case 0x65:
4150 s->override = R_GS;
4151 goto next_byte;
4152 case 0x66:
4153 prefixes |= PREFIX_DATA;
4154 goto next_byte;
4155 case 0x67:
4156 prefixes |= PREFIX_ADR;
4157 goto next_byte;
4159 if (prefixes & PREFIX_DATA)
4160 dflag ^= 1;
4161 if (prefixes & PREFIX_ADR)
4162 aflag ^= 1;
4165 s->prefix = prefixes;
4166 s->aflag = aflag;
4167 s->dflag = dflag;
4169 /* lock generation */
4170 if (prefixes & PREFIX_LOCK)
4171 gen_helper_lock();
4173 /* now check op code */
4174 reswitch:
4175 switch(b) {
4176 case 0x0f:
4177 /**************************/
4178 /* extended op code */
4179 b = ldub_code(s->pc++) | 0x100;
4180 goto reswitch;
4182 /**************************/
4183 /* arith & logic */
4184 case 0x00 ... 0x05:
4185 case 0x08 ... 0x0d:
4186 case 0x10 ... 0x15:
4187 case 0x18 ... 0x1d:
4188 case 0x20 ... 0x25:
4189 case 0x28 ... 0x2d:
4190 case 0x30 ... 0x35:
4191 case 0x38 ... 0x3d:
4193 int op, f, val;
4194 op = (b >> 3) & 7;
4195 f = (b >> 1) & 3;
4197 if ((b & 1) == 0)
4198 ot = OT_BYTE;
4199 else
4200 ot = dflag + OT_WORD;
4202 switch(f) {
4203 case 0: /* OP Ev, Gv */
4204 modrm = ldub_code(s->pc++);
4205 reg = ((modrm >> 3) & 7) | rex_r;
4206 mod = (modrm >> 6) & 3;
4207 rm = (modrm & 7) | REX_B(s);
4208 if (mod != 3) {
4209 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4210 opreg = OR_TMP0;
4211 } else if (op == OP_XORL && rm == reg) {
4212 xor_zero:
4213 /* xor reg, reg optimisation */
4214 gen_op_movl_T0_0();
4215 s->cc_op = CC_OP_LOGICB + ot;
4216 gen_op_mov_reg_T0(ot, reg);
4217 gen_op_update1_cc();
4218 break;
4219 } else {
4220 opreg = rm;
4222 gen_op_mov_TN_reg(ot, 1, reg);
4223 gen_op(s, op, ot, opreg);
4224 break;
4225 case 1: /* OP Gv, Ev */
4226 modrm = ldub_code(s->pc++);
4227 mod = (modrm >> 6) & 3;
4228 reg = ((modrm >> 3) & 7) | rex_r;
4229 rm = (modrm & 7) | REX_B(s);
4230 if (mod != 3) {
4231 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4232 gen_op_ld_T1_A0(ot + s->mem_index);
4233 } else if (op == OP_XORL && rm == reg) {
4234 goto xor_zero;
4235 } else {
4236 gen_op_mov_TN_reg(ot, 1, rm);
4238 gen_op(s, op, ot, reg);
4239 break;
4240 case 2: /* OP A, Iv */
4241 val = insn_get(s, ot);
4242 gen_op_movl_T1_im(val);
4243 gen_op(s, op, ot, OR_EAX);
4244 break;
4247 break;
4249 case 0x82:
4250 if (CODE64(s))
4251 goto illegal_op;
4252 case 0x80: /* GRP1 */
4253 case 0x81:
4254 case 0x83:
4256 int val;
4258 if ((b & 1) == 0)
4259 ot = OT_BYTE;
4260 else
4261 ot = dflag + OT_WORD;
4263 modrm = ldub_code(s->pc++);
4264 mod = (modrm >> 6) & 3;
4265 rm = (modrm & 7) | REX_B(s);
4266 op = (modrm >> 3) & 7;
4268 if (mod != 3) {
4269 if (b == 0x83)
4270 s->rip_offset = 1;
4271 else
4272 s->rip_offset = insn_const_size(ot);
4273 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4274 opreg = OR_TMP0;
4275 } else {
4276 opreg = rm;
4279 switch(b) {
4280 default:
4281 case 0x80:
4282 case 0x81:
4283 case 0x82:
4284 val = insn_get(s, ot);
4285 break;
4286 case 0x83:
4287 val = (int8_t)insn_get(s, OT_BYTE);
4288 break;
4290 gen_op_movl_T1_im(val);
4291 gen_op(s, op, ot, opreg);
4293 break;
4295 /**************************/
4296 /* inc, dec, and other misc arith */
4297 case 0x40 ... 0x47: /* inc Gv */
4298 ot = dflag ? OT_LONG : OT_WORD;
4299 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4300 break;
4301 case 0x48 ... 0x4f: /* dec Gv */
4302 ot = dflag ? OT_LONG : OT_WORD;
4303 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4304 break;
4305 case 0xf6: /* GRP3 */
4306 case 0xf7:
4307 if ((b & 1) == 0)
4308 ot = OT_BYTE;
4309 else
4310 ot = dflag + OT_WORD;
4312 modrm = ldub_code(s->pc++);
4313 mod = (modrm >> 6) & 3;
4314 rm = (modrm & 7) | REX_B(s);
4315 op = (modrm >> 3) & 7;
4316 if (mod != 3) {
4317 if (op == 0)
4318 s->rip_offset = insn_const_size(ot);
4319 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4320 gen_op_ld_T0_A0(ot + s->mem_index);
4321 } else {
4322 gen_op_mov_TN_reg(ot, 0, rm);
4325 switch(op) {
4326 case 0: /* test */
4327 val = insn_get(s, ot);
4328 gen_op_movl_T1_im(val);
4329 gen_op_testl_T0_T1_cc();
4330 s->cc_op = CC_OP_LOGICB + ot;
4331 break;
4332 case 2: /* not */
4333 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4334 if (mod != 3) {
4335 gen_op_st_T0_A0(ot + s->mem_index);
4336 } else {
4337 gen_op_mov_reg_T0(ot, rm);
4339 break;
4340 case 3: /* neg */
4341 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4342 if (mod != 3) {
4343 gen_op_st_T0_A0(ot + s->mem_index);
4344 } else {
4345 gen_op_mov_reg_T0(ot, rm);
4347 gen_op_update_neg_cc();
4348 s->cc_op = CC_OP_SUBB + ot;
4349 break;
4350 case 4: /* mul */
4351 switch(ot) {
4352 case OT_BYTE:
4353 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4354 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4355 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4356 /* XXX: use 32 bit mul which could be faster */
4357 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4358 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4359 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4360 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4361 s->cc_op = CC_OP_MULB;
4362 break;
4363 case OT_WORD:
4364 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4365 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4366 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4367 /* XXX: use 32 bit mul which could be faster */
4368 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4369 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4370 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4371 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4372 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4373 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4374 s->cc_op = CC_OP_MULW;
4375 break;
4376 default:
4377 case OT_LONG:
4378 #ifdef TARGET_X86_64
4379 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4380 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4381 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4382 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4383 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4384 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4385 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4386 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4387 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4388 #else
4390 TCGv_i64 t0, t1;
4391 t0 = tcg_temp_new_i64();
4392 t1 = tcg_temp_new_i64();
4393 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4394 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4395 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4396 tcg_gen_mul_i64(t0, t0, t1);
4397 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4398 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4399 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4400 tcg_gen_shri_i64(t0, t0, 32);
4401 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4402 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4403 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4405 #endif
4406 s->cc_op = CC_OP_MULL;
4407 break;
4408 #ifdef TARGET_X86_64
4409 case OT_QUAD:
4410 gen_helper_mulq_EAX_T0(cpu_T[0]);
4411 s->cc_op = CC_OP_MULQ;
4412 break;
4413 #endif
4415 break;
4416 case 5: /* imul */
4417 switch(ot) {
4418 case OT_BYTE:
4419 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4420 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4421 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4422 /* XXX: use 32 bit mul which could be faster */
4423 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4424 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4425 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4426 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4427 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4428 s->cc_op = CC_OP_MULB;
4429 break;
4430 case OT_WORD:
4431 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4432 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4433 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4434 /* XXX: use 32 bit mul which could be faster */
4435 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4436 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4437 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4438 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4439 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4440 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4441 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4442 s->cc_op = CC_OP_MULW;
4443 break;
4444 default:
4445 case OT_LONG:
4446 #ifdef TARGET_X86_64
4447 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4448 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4449 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4450 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4451 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4452 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4453 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4454 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4455 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4456 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4457 #else
4459 TCGv_i64 t0, t1;
4460 t0 = tcg_temp_new_i64();
4461 t1 = tcg_temp_new_i64();
4462 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4463 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4464 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4465 tcg_gen_mul_i64(t0, t0, t1);
4466 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4467 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4468 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4469 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4470 tcg_gen_shri_i64(t0, t0, 32);
4471 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4472 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4473 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4475 #endif
4476 s->cc_op = CC_OP_MULL;
4477 break;
4478 #ifdef TARGET_X86_64
4479 case OT_QUAD:
4480 gen_helper_imulq_EAX_T0(cpu_T[0]);
4481 s->cc_op = CC_OP_MULQ;
4482 break;
4483 #endif
4485 break;
4486 case 6: /* div */
4487 switch(ot) {
4488 case OT_BYTE:
4489 gen_jmp_im(pc_start - s->cs_base);
4490 gen_helper_divb_AL(cpu_T[0]);
4491 break;
4492 case OT_WORD:
4493 gen_jmp_im(pc_start - s->cs_base);
4494 gen_helper_divw_AX(cpu_T[0]);
4495 break;
4496 default:
4497 case OT_LONG:
4498 gen_jmp_im(pc_start - s->cs_base);
4499 gen_helper_divl_EAX(cpu_T[0]);
4500 break;
4501 #ifdef TARGET_X86_64
4502 case OT_QUAD:
4503 gen_jmp_im(pc_start - s->cs_base);
4504 gen_helper_divq_EAX(cpu_T[0]);
4505 break;
4506 #endif
4508 break;
4509 case 7: /* idiv */
4510 switch(ot) {
4511 case OT_BYTE:
4512 gen_jmp_im(pc_start - s->cs_base);
4513 gen_helper_idivb_AL(cpu_T[0]);
4514 break;
4515 case OT_WORD:
4516 gen_jmp_im(pc_start - s->cs_base);
4517 gen_helper_idivw_AX(cpu_T[0]);
4518 break;
4519 default:
4520 case OT_LONG:
4521 gen_jmp_im(pc_start - s->cs_base);
4522 gen_helper_idivl_EAX(cpu_T[0]);
4523 break;
4524 #ifdef TARGET_X86_64
4525 case OT_QUAD:
4526 gen_jmp_im(pc_start - s->cs_base);
4527 gen_helper_idivq_EAX(cpu_T[0]);
4528 break;
4529 #endif
4531 break;
4532 default:
4533 goto illegal_op;
4535 break;
4537 case 0xfe: /* GRP4 */
4538 case 0xff: /* GRP5 */
4539 if ((b & 1) == 0)
4540 ot = OT_BYTE;
4541 else
4542 ot = dflag + OT_WORD;
4544 modrm = ldub_code(s->pc++);
4545 mod = (modrm >> 6) & 3;
4546 rm = (modrm & 7) | REX_B(s);
4547 op = (modrm >> 3) & 7;
4548 if (op >= 2 && b == 0xfe) {
4549 goto illegal_op;
4551 if (CODE64(s)) {
4552 if (op == 2 || op == 4) {
4553 /* operand size for jumps is 64 bit */
4554 ot = OT_QUAD;
4555 } else if (op == 3 || op == 5) {
4556 /* for call calls, the operand is 16 or 32 bit, even
4557 in long mode */
4558 ot = dflag ? OT_LONG : OT_WORD;
4559 } else if (op == 6) {
4560 /* default push size is 64 bit */
4561 ot = dflag ? OT_QUAD : OT_WORD;
4564 if (mod != 3) {
4565 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4566 if (op >= 2 && op != 3 && op != 5)
4567 gen_op_ld_T0_A0(ot + s->mem_index);
4568 } else {
4569 gen_op_mov_TN_reg(ot, 0, rm);
4572 switch(op) {
4573 case 0: /* inc Ev */
4574 if (mod != 3)
4575 opreg = OR_TMP0;
4576 else
4577 opreg = rm;
4578 gen_inc(s, ot, opreg, 1);
4579 break;
4580 case 1: /* dec Ev */
4581 if (mod != 3)
4582 opreg = OR_TMP0;
4583 else
4584 opreg = rm;
4585 gen_inc(s, ot, opreg, -1);
4586 break;
4587 case 2: /* call Ev */
4588 /* XXX: optimize if memory (no 'and' is necessary) */
4589 if (s->dflag == 0)
4590 gen_op_andl_T0_ffff();
4591 next_eip = s->pc - s->cs_base;
4592 gen_movtl_T1_im(next_eip);
4593 gen_push_T1(s);
4594 gen_op_jmp_T0();
4595 gen_eob(s);
4596 break;
4597 case 3: /* lcall Ev */
4598 gen_op_ld_T1_A0(ot + s->mem_index);
4599 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4600 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4601 do_lcall:
4602 if (s->pe && !s->vm86) {
4603 if (s->cc_op != CC_OP_DYNAMIC)
4604 gen_op_set_cc_op(s->cc_op);
4605 gen_jmp_im(pc_start - s->cs_base);
4606 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4607 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4608 tcg_const_i32(dflag),
4609 tcg_const_i32(s->pc - pc_start));
4610 } else {
4611 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4612 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4613 tcg_const_i32(dflag),
4614 tcg_const_i32(s->pc - s->cs_base));
4616 gen_eob(s);
4617 break;
4618 case 4: /* jmp Ev */
4619 if (s->dflag == 0)
4620 gen_op_andl_T0_ffff();
4621 gen_op_jmp_T0();
4622 gen_eob(s);
4623 break;
4624 case 5: /* ljmp Ev */
4625 gen_op_ld_T1_A0(ot + s->mem_index);
4626 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4627 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4628 do_ljmp:
4629 if (s->pe && !s->vm86) {
4630 if (s->cc_op != CC_OP_DYNAMIC)
4631 gen_op_set_cc_op(s->cc_op);
4632 gen_jmp_im(pc_start - s->cs_base);
4633 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4634 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4635 tcg_const_i32(s->pc - pc_start));
4636 } else {
4637 gen_op_movl_seg_T0_vm(R_CS);
4638 gen_op_movl_T0_T1();
4639 gen_op_jmp_T0();
4641 gen_eob(s);
4642 break;
4643 case 6: /* push Ev */
4644 gen_push_T0(s);
4645 break;
4646 default:
4647 goto illegal_op;
4649 break;
4651 case 0x84: /* test Ev, Gv */
4652 case 0x85:
4653 if ((b & 1) == 0)
4654 ot = OT_BYTE;
4655 else
4656 ot = dflag + OT_WORD;
4658 modrm = ldub_code(s->pc++);
4659 mod = (modrm >> 6) & 3;
4660 rm = (modrm & 7) | REX_B(s);
4661 reg = ((modrm >> 3) & 7) | rex_r;
4663 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4664 gen_op_mov_TN_reg(ot, 1, reg);
4665 gen_op_testl_T0_T1_cc();
4666 s->cc_op = CC_OP_LOGICB + ot;
4667 break;
4669 case 0xa8: /* test eAX, Iv */
4670 case 0xa9:
4671 if ((b & 1) == 0)
4672 ot = OT_BYTE;
4673 else
4674 ot = dflag + OT_WORD;
4675 val = insn_get(s, ot);
4677 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4678 gen_op_movl_T1_im(val);
4679 gen_op_testl_T0_T1_cc();
4680 s->cc_op = CC_OP_LOGICB + ot;
4681 break;
4683 case 0x98: /* CWDE/CBW */
4684 #ifdef TARGET_X86_64
4685 if (dflag == 2) {
4686 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4687 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4688 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4689 } else
4690 #endif
4691 if (dflag == 1) {
4692 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4693 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4694 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4695 } else {
4696 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4697 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4698 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4700 break;
4701 case 0x99: /* CDQ/CWD */
4702 #ifdef TARGET_X86_64
4703 if (dflag == 2) {
4704 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4705 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4706 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4707 } else
4708 #endif
4709 if (dflag == 1) {
4710 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4711 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4712 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4713 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4714 } else {
4715 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4716 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4717 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4718 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4720 break;
4721 case 0x1af: /* imul Gv, Ev */
4722 case 0x69: /* imul Gv, Ev, I */
4723 case 0x6b:
4724 ot = dflag + OT_WORD;
4725 modrm = ldub_code(s->pc++);
4726 reg = ((modrm >> 3) & 7) | rex_r;
4727 if (b == 0x69)
4728 s->rip_offset = insn_const_size(ot);
4729 else if (b == 0x6b)
4730 s->rip_offset = 1;
4731 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4732 if (b == 0x69) {
4733 val = insn_get(s, ot);
4734 gen_op_movl_T1_im(val);
4735 } else if (b == 0x6b) {
4736 val = (int8_t)insn_get(s, OT_BYTE);
4737 gen_op_movl_T1_im(val);
4738 } else {
4739 gen_op_mov_TN_reg(ot, 1, reg);
4742 #ifdef TARGET_X86_64
4743 if (ot == OT_QUAD) {
4744 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4745 } else
4746 #endif
4747 if (ot == OT_LONG) {
4748 #ifdef TARGET_X86_64
4749 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4750 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4751 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4752 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4753 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4754 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4755 #else
4757 TCGv_i64 t0, t1;
4758 t0 = tcg_temp_new_i64();
4759 t1 = tcg_temp_new_i64();
4760 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4761 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4762 tcg_gen_mul_i64(t0, t0, t1);
4763 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4764 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4765 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4766 tcg_gen_shri_i64(t0, t0, 32);
4767 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4768 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4770 #endif
4771 } else {
4772 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4773 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4774 /* XXX: use 32 bit mul which could be faster */
4775 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4776 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4777 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4778 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4780 gen_op_mov_reg_T0(ot, reg);
4781 s->cc_op = CC_OP_MULB + ot;
4782 break;
4783 case 0x1c0:
4784 case 0x1c1: /* xadd Ev, Gv */
4785 if ((b & 1) == 0)
4786 ot = OT_BYTE;
4787 else
4788 ot = dflag + OT_WORD;
4789 modrm = ldub_code(s->pc++);
4790 reg = ((modrm >> 3) & 7) | rex_r;
4791 mod = (modrm >> 6) & 3;
4792 if (mod == 3) {
4793 rm = (modrm & 7) | REX_B(s);
4794 gen_op_mov_TN_reg(ot, 0, reg);
4795 gen_op_mov_TN_reg(ot, 1, rm);
4796 gen_op_addl_T0_T1();
4797 gen_op_mov_reg_T1(ot, reg);
4798 gen_op_mov_reg_T0(ot, rm);
4799 } else {
4800 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4801 gen_op_mov_TN_reg(ot, 0, reg);
4802 gen_op_ld_T1_A0(ot + s->mem_index);
4803 gen_op_addl_T0_T1();
4804 gen_op_st_T0_A0(ot + s->mem_index);
4805 gen_op_mov_reg_T1(ot, reg);
4807 gen_op_update2_cc();
4808 s->cc_op = CC_OP_ADDB + ot;
4809 break;
4810 case 0x1b0:
4811 case 0x1b1: /* cmpxchg Ev, Gv */
4813 int label1, label2;
4814 TCGv t0, t1, t2, a0;
4816 if ((b & 1) == 0)
4817 ot = OT_BYTE;
4818 else
4819 ot = dflag + OT_WORD;
4820 modrm = ldub_code(s->pc++);
4821 reg = ((modrm >> 3) & 7) | rex_r;
4822 mod = (modrm >> 6) & 3;
4823 t0 = tcg_temp_local_new();
4824 t1 = tcg_temp_local_new();
4825 t2 = tcg_temp_local_new();
4826 a0 = tcg_temp_local_new();
4827 gen_op_mov_v_reg(ot, t1, reg);
4828 if (mod == 3) {
4829 rm = (modrm & 7) | REX_B(s);
4830 gen_op_mov_v_reg(ot, t0, rm);
4831 } else {
4832 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4833 tcg_gen_mov_tl(a0, cpu_A0);
4834 gen_op_ld_v(ot + s->mem_index, t0, a0);
4835 rm = 0; /* avoid warning */
4837 label1 = gen_new_label();
4838 tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
4839 tcg_gen_sub_tl(t2, t2, t0);
4840 gen_extu(ot, t2);
4841 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4842 if (mod == 3) {
4843 label2 = gen_new_label();
4844 gen_op_mov_reg_v(ot, R_EAX, t0);
4845 tcg_gen_br(label2);
4846 gen_set_label(label1);
4847 gen_op_mov_reg_v(ot, rm, t1);
4848 gen_set_label(label2);
4849 } else {
4850 tcg_gen_mov_tl(t1, t0);
4851 gen_op_mov_reg_v(ot, R_EAX, t0);
4852 gen_set_label(label1);
4853 /* always store */
4854 gen_op_st_v(ot + s->mem_index, t1, a0);
4856 tcg_gen_mov_tl(cpu_cc_src, t0);
4857 tcg_gen_mov_tl(cpu_cc_dst, t2);
4858 s->cc_op = CC_OP_SUBB + ot;
4859 tcg_temp_free(t0);
4860 tcg_temp_free(t1);
4861 tcg_temp_free(t2);
4862 tcg_temp_free(a0);
4864 break;
4865 case 0x1c7: /* cmpxchg8b */
4866 modrm = ldub_code(s->pc++);
4867 mod = (modrm >> 6) & 3;
4868 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4869 goto illegal_op;
4870 #ifdef TARGET_X86_64
4871 if (dflag == 2) {
4872 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4873 goto illegal_op;
4874 gen_jmp_im(pc_start - s->cs_base);
4875 if (s->cc_op != CC_OP_DYNAMIC)
4876 gen_op_set_cc_op(s->cc_op);
4877 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4878 gen_helper_cmpxchg16b(cpu_A0);
4879 } else
4880 #endif
4882 if (!(s->cpuid_features & CPUID_CX8))
4883 goto illegal_op;
4884 gen_jmp_im(pc_start - s->cs_base);
4885 if (s->cc_op != CC_OP_DYNAMIC)
4886 gen_op_set_cc_op(s->cc_op);
4887 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4888 gen_helper_cmpxchg8b(cpu_A0);
4890 s->cc_op = CC_OP_EFLAGS;
4891 break;
4893 /**************************/
4894 /* push/pop */
4895 case 0x50 ... 0x57: /* push */
4896 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4897 gen_push_T0(s);
4898 break;
4899 case 0x58 ... 0x5f: /* pop */
4900 if (CODE64(s)) {
4901 ot = dflag ? OT_QUAD : OT_WORD;
4902 } else {
4903 ot = dflag + OT_WORD;
4905 gen_pop_T0(s);
4906 /* NOTE: order is important for pop %sp */
4907 gen_pop_update(s);
4908 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4909 break;
4910 case 0x60: /* pusha */
4911 if (CODE64(s))
4912 goto illegal_op;
4913 gen_pusha(s);
4914 break;
4915 case 0x61: /* popa */
4916 if (CODE64(s))
4917 goto illegal_op;
4918 gen_popa(s);
4919 break;
4920 case 0x68: /* push Iv */
4921 case 0x6a:
4922 if (CODE64(s)) {
4923 ot = dflag ? OT_QUAD : OT_WORD;
4924 } else {
4925 ot = dflag + OT_WORD;
4927 if (b == 0x68)
4928 val = insn_get(s, ot);
4929 else
4930 val = (int8_t)insn_get(s, OT_BYTE);
4931 gen_op_movl_T0_im(val);
4932 gen_push_T0(s);
4933 break;
4934 case 0x8f: /* pop Ev */
4935 if (CODE64(s)) {
4936 ot = dflag ? OT_QUAD : OT_WORD;
4937 } else {
4938 ot = dflag + OT_WORD;
4940 modrm = ldub_code(s->pc++);
4941 mod = (modrm >> 6) & 3;
4942 gen_pop_T0(s);
4943 if (mod == 3) {
4944 /* NOTE: order is important for pop %sp */
4945 gen_pop_update(s);
4946 rm = (modrm & 7) | REX_B(s);
4947 gen_op_mov_reg_T0(ot, rm);
4948 } else {
4949 /* NOTE: order is important too for MMU exceptions */
4950 s->popl_esp_hack = 1 << ot;
4951 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4952 s->popl_esp_hack = 0;
4953 gen_pop_update(s);
4955 break;
4956 case 0xc8: /* enter */
4958 int level;
4959 val = lduw_code(s->pc);
4960 s->pc += 2;
4961 level = ldub_code(s->pc++);
4962 gen_enter(s, val, level);
4964 break;
4965 case 0xc9: /* leave */
4966 /* XXX: exception not precise (ESP is updated before potential exception) */
4967 if (CODE64(s)) {
4968 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
4969 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
4970 } else if (s->ss32) {
4971 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
4972 gen_op_mov_reg_T0(OT_LONG, R_ESP);
4973 } else {
4974 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
4975 gen_op_mov_reg_T0(OT_WORD, R_ESP);
4977 gen_pop_T0(s);
4978 if (CODE64(s)) {
4979 ot = dflag ? OT_QUAD : OT_WORD;
4980 } else {
4981 ot = dflag + OT_WORD;
4983 gen_op_mov_reg_T0(ot, R_EBP);
4984 gen_pop_update(s);
4985 break;
4986 case 0x06: /* push es */
4987 case 0x0e: /* push cs */
4988 case 0x16: /* push ss */
4989 case 0x1e: /* push ds */
4990 if (CODE64(s))
4991 goto illegal_op;
4992 gen_op_movl_T0_seg(b >> 3);
4993 gen_push_T0(s);
4994 break;
4995 case 0x1a0: /* push fs */
4996 case 0x1a8: /* push gs */
4997 gen_op_movl_T0_seg((b >> 3) & 7);
4998 gen_push_T0(s);
4999 break;
5000 case 0x07: /* pop es */
5001 case 0x17: /* pop ss */
5002 case 0x1f: /* pop ds */
5003 if (CODE64(s))
5004 goto illegal_op;
5005 reg = b >> 3;
5006 gen_pop_T0(s);
5007 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5008 gen_pop_update(s);
5009 if (reg == R_SS) {
5010 /* if reg == SS, inhibit interrupts/trace. */
5011 /* If several instructions disable interrupts, only the
5012 _first_ does it */
5013 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5014 gen_helper_set_inhibit_irq();
5015 s->tf = 0;
5017 if (s->is_jmp) {
5018 gen_jmp_im(s->pc - s->cs_base);
5019 gen_eob(s);
5021 break;
5022 case 0x1a1: /* pop fs */
5023 case 0x1a9: /* pop gs */
5024 gen_pop_T0(s);
5025 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
5026 gen_pop_update(s);
5027 if (s->is_jmp) {
5028 gen_jmp_im(s->pc - s->cs_base);
5029 gen_eob(s);
5031 break;
5033 /**************************/
5034 /* mov */
5035 case 0x88:
5036 case 0x89: /* mov Gv, Ev */
5037 if ((b & 1) == 0)
5038 ot = OT_BYTE;
5039 else
5040 ot = dflag + OT_WORD;
5041 modrm = ldub_code(s->pc++);
5042 reg = ((modrm >> 3) & 7) | rex_r;
5044 /* generate a generic store */
5045 gen_ldst_modrm(s, modrm, ot, reg, 1);
5046 break;
5047 case 0xc6:
5048 case 0xc7: /* mov Ev, Iv */
5049 if ((b & 1) == 0)
5050 ot = OT_BYTE;
5051 else
5052 ot = dflag + OT_WORD;
5053 modrm = ldub_code(s->pc++);
5054 mod = (modrm >> 6) & 3;
5055 if (mod != 3) {
5056 s->rip_offset = insn_const_size(ot);
5057 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5059 val = insn_get(s, ot);
5060 gen_op_movl_T0_im(val);
5061 if (mod != 3)
5062 gen_op_st_T0_A0(ot + s->mem_index);
5063 else
5064 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
5065 break;
5066 case 0x8a:
5067 case 0x8b: /* mov Ev, Gv */
5068 if ((b & 1) == 0)
5069 ot = OT_BYTE;
5070 else
5071 ot = OT_WORD + dflag;
5072 modrm = ldub_code(s->pc++);
5073 reg = ((modrm >> 3) & 7) | rex_r;
5075 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
5076 gen_op_mov_reg_T0(ot, reg);
5077 break;
5078 case 0x8e: /* mov seg, Gv */
5079 modrm = ldub_code(s->pc++);
5080 reg = (modrm >> 3) & 7;
5081 if (reg >= 6 || reg == R_CS)
5082 goto illegal_op;
5083 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5084 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5085 if (reg == R_SS) {
5086 /* if reg == SS, inhibit interrupts/trace */
5087 /* If several instructions disable interrupts, only the
5088 _first_ does it */
5089 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5090 gen_helper_set_inhibit_irq();
5091 s->tf = 0;
5093 if (s->is_jmp) {
5094 gen_jmp_im(s->pc - s->cs_base);
5095 gen_eob(s);
5097 break;
5098 case 0x8c: /* mov Gv, seg */
5099 modrm = ldub_code(s->pc++);
5100 reg = (modrm >> 3) & 7;
5101 mod = (modrm >> 6) & 3;
5102 if (reg >= 6)
5103 goto illegal_op;
5104 gen_op_movl_T0_seg(reg);
5105 if (mod == 3)
5106 ot = OT_WORD + dflag;
5107 else
5108 ot = OT_WORD;
5109 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5110 break;
5112 case 0x1b6: /* movzbS Gv, Eb */
5113 case 0x1b7: /* movzwS Gv, Eb */
5114 case 0x1be: /* movsbS Gv, Eb */
5115 case 0x1bf: /* movswS Gv, Eb */
5117 int d_ot;
5118 /* d_ot is the size of destination */
5119 d_ot = dflag + OT_WORD;
5120 /* ot is the size of source */
5121 ot = (b & 1) + OT_BYTE;
5122 modrm = ldub_code(s->pc++);
5123 reg = ((modrm >> 3) & 7) | rex_r;
5124 mod = (modrm >> 6) & 3;
5125 rm = (modrm & 7) | REX_B(s);
5127 if (mod == 3) {
5128 gen_op_mov_TN_reg(ot, 0, rm);
5129 switch(ot | (b & 8)) {
5130 case OT_BYTE:
5131 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5132 break;
5133 case OT_BYTE | 8:
5134 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5135 break;
5136 case OT_WORD:
5137 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5138 break;
5139 default:
5140 case OT_WORD | 8:
5141 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5142 break;
5144 gen_op_mov_reg_T0(d_ot, reg);
5145 } else {
5146 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5147 if (b & 8) {
5148 gen_op_lds_T0_A0(ot + s->mem_index);
5149 } else {
5150 gen_op_ldu_T0_A0(ot + s->mem_index);
5152 gen_op_mov_reg_T0(d_ot, reg);
5155 break;
5157 case 0x8d: /* lea */
5158 ot = dflag + OT_WORD;
5159 modrm = ldub_code(s->pc++);
5160 mod = (modrm >> 6) & 3;
5161 if (mod == 3)
5162 goto illegal_op;
5163 reg = ((modrm >> 3) & 7) | rex_r;
5164 /* we must ensure that no segment is added */
5165 s->override = -1;
5166 val = s->addseg;
5167 s->addseg = 0;
5168 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5169 s->addseg = val;
5170 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5171 break;
5173 case 0xa0: /* mov EAX, Ov */
5174 case 0xa1:
5175 case 0xa2: /* mov Ov, EAX */
5176 case 0xa3:
5178 target_ulong offset_addr;
5180 if ((b & 1) == 0)
5181 ot = OT_BYTE;
5182 else
5183 ot = dflag + OT_WORD;
5184 #ifdef TARGET_X86_64
5185 if (s->aflag == 2) {
5186 offset_addr = ldq_code(s->pc);
5187 s->pc += 8;
5188 gen_op_movq_A0_im(offset_addr);
5189 } else
5190 #endif
5192 if (s->aflag) {
5193 offset_addr = insn_get(s, OT_LONG);
5194 } else {
5195 offset_addr = insn_get(s, OT_WORD);
5197 gen_op_movl_A0_im(offset_addr);
5199 gen_add_A0_ds_seg(s);
5200 if ((b & 2) == 0) {
5201 gen_op_ld_T0_A0(ot + s->mem_index);
5202 gen_op_mov_reg_T0(ot, R_EAX);
5203 } else {
5204 gen_op_mov_TN_reg(ot, 0, R_EAX);
5205 gen_op_st_T0_A0(ot + s->mem_index);
5208 break;
5209 case 0xd7: /* xlat */
5210 #ifdef TARGET_X86_64
5211 if (s->aflag == 2) {
5212 gen_op_movq_A0_reg(R_EBX);
5213 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5214 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5215 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5216 } else
5217 #endif
5219 gen_op_movl_A0_reg(R_EBX);
5220 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5221 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5222 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5223 if (s->aflag == 0)
5224 gen_op_andl_A0_ffff();
5225 else
5226 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5228 gen_add_A0_ds_seg(s);
5229 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5230 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5231 break;
5232 case 0xb0 ... 0xb7: /* mov R, Ib */
5233 val = insn_get(s, OT_BYTE);
5234 gen_op_movl_T0_im(val);
5235 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5236 break;
5237 case 0xb8 ... 0xbf: /* mov R, Iv */
5238 #ifdef TARGET_X86_64
5239 if (dflag == 2) {
5240 uint64_t tmp;
5241 /* 64 bit case */
5242 tmp = ldq_code(s->pc);
5243 s->pc += 8;
5244 reg = (b & 7) | REX_B(s);
5245 gen_movtl_T0_im(tmp);
5246 gen_op_mov_reg_T0(OT_QUAD, reg);
5247 } else
5248 #endif
5250 ot = dflag ? OT_LONG : OT_WORD;
5251 val = insn_get(s, ot);
5252 reg = (b & 7) | REX_B(s);
5253 gen_op_movl_T0_im(val);
5254 gen_op_mov_reg_T0(ot, reg);
5256 break;
5258 case 0x91 ... 0x97: /* xchg R, EAX */
5259 ot = dflag + OT_WORD;
5260 reg = (b & 7) | REX_B(s);
5261 rm = R_EAX;
5262 goto do_xchg_reg;
5263 case 0x86:
5264 case 0x87: /* xchg Ev, Gv */
5265 if ((b & 1) == 0)
5266 ot = OT_BYTE;
5267 else
5268 ot = dflag + OT_WORD;
5269 modrm = ldub_code(s->pc++);
5270 reg = ((modrm >> 3) & 7) | rex_r;
5271 mod = (modrm >> 6) & 3;
5272 if (mod == 3) {
5273 rm = (modrm & 7) | REX_B(s);
5274 do_xchg_reg:
5275 gen_op_mov_TN_reg(ot, 0, reg);
5276 gen_op_mov_TN_reg(ot, 1, rm);
5277 gen_op_mov_reg_T0(ot, rm);
5278 gen_op_mov_reg_T1(ot, reg);
5279 } else {
5280 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5281 gen_op_mov_TN_reg(ot, 0, reg);
5282 /* for xchg, lock is implicit */
5283 if (!(prefixes & PREFIX_LOCK))
5284 gen_helper_lock();
5285 gen_op_ld_T1_A0(ot + s->mem_index);
5286 gen_op_st_T0_A0(ot + s->mem_index);
5287 if (!(prefixes & PREFIX_LOCK))
5288 gen_helper_unlock();
5289 gen_op_mov_reg_T1(ot, reg);
5291 break;
5292 case 0xc4: /* les Gv */
5293 if (CODE64(s))
5294 goto illegal_op;
5295 op = R_ES;
5296 goto do_lxx;
5297 case 0xc5: /* lds Gv */
5298 if (CODE64(s))
5299 goto illegal_op;
5300 op = R_DS;
5301 goto do_lxx;
5302 case 0x1b2: /* lss Gv */
5303 op = R_SS;
5304 goto do_lxx;
5305 case 0x1b4: /* lfs Gv */
5306 op = R_FS;
5307 goto do_lxx;
5308 case 0x1b5: /* lgs Gv */
5309 op = R_GS;
5310 do_lxx:
5311 ot = dflag ? OT_LONG : OT_WORD;
5312 modrm = ldub_code(s->pc++);
5313 reg = ((modrm >> 3) & 7) | rex_r;
5314 mod = (modrm >> 6) & 3;
5315 if (mod == 3)
5316 goto illegal_op;
5317 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5318 gen_op_ld_T1_A0(ot + s->mem_index);
5319 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5320 /* load the segment first to handle exceptions properly */
5321 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5322 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5323 /* then put the data */
5324 gen_op_mov_reg_T1(ot, reg);
5325 if (s->is_jmp) {
5326 gen_jmp_im(s->pc - s->cs_base);
5327 gen_eob(s);
5329 break;
5331 /************************/
5332 /* shifts */
5333 case 0xc0:
5334 case 0xc1:
5335 /* shift Ev,Ib */
5336 shift = 2;
5337 grp2:
5339 if ((b & 1) == 0)
5340 ot = OT_BYTE;
5341 else
5342 ot = dflag + OT_WORD;
5344 modrm = ldub_code(s->pc++);
5345 mod = (modrm >> 6) & 3;
5346 op = (modrm >> 3) & 7;
5348 if (mod != 3) {
5349 if (shift == 2) {
5350 s->rip_offset = 1;
5352 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5353 opreg = OR_TMP0;
5354 } else {
5355 opreg = (modrm & 7) | REX_B(s);
5358 /* simpler op */
5359 if (shift == 0) {
5360 gen_shift(s, op, ot, opreg, OR_ECX);
5361 } else {
5362 if (shift == 2) {
5363 shift = ldub_code(s->pc++);
5365 gen_shifti(s, op, ot, opreg, shift);
5368 break;
5369 case 0xd0:
5370 case 0xd1:
5371 /* shift Ev,1 */
5372 shift = 1;
5373 goto grp2;
5374 case 0xd2:
5375 case 0xd3:
5376 /* shift Ev,cl */
5377 shift = 0;
5378 goto grp2;
5380 case 0x1a4: /* shld imm */
5381 op = 0;
5382 shift = 1;
5383 goto do_shiftd;
5384 case 0x1a5: /* shld cl */
5385 op = 0;
5386 shift = 0;
5387 goto do_shiftd;
5388 case 0x1ac: /* shrd imm */
5389 op = 1;
5390 shift = 1;
5391 goto do_shiftd;
5392 case 0x1ad: /* shrd cl */
5393 op = 1;
5394 shift = 0;
5395 do_shiftd:
5396 ot = dflag + OT_WORD;
5397 modrm = ldub_code(s->pc++);
5398 mod = (modrm >> 6) & 3;
5399 rm = (modrm & 7) | REX_B(s);
5400 reg = ((modrm >> 3) & 7) | rex_r;
5401 if (mod != 3) {
5402 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5403 opreg = OR_TMP0;
5404 } else {
5405 opreg = rm;
5407 gen_op_mov_TN_reg(ot, 1, reg);
5409 if (shift) {
5410 val = ldub_code(s->pc++);
5411 tcg_gen_movi_tl(cpu_T3, val);
5412 } else {
5413 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
5415 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5416 break;
5418 /************************/
5419 /* floats */
5420 case 0xd8 ... 0xdf:
5421 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5422 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5423 /* XXX: what to do if illegal op ? */
5424 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5425 break;
5427 modrm = ldub_code(s->pc++);
5428 mod = (modrm >> 6) & 3;
5429 rm = modrm & 7;
5430 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5431 if (mod != 3) {
5432 /* memory op */
5433 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5434 switch(op) {
5435 case 0x00 ... 0x07: /* fxxxs */
5436 case 0x10 ... 0x17: /* fixxxl */
5437 case 0x20 ... 0x27: /* fxxxl */
5438 case 0x30 ... 0x37: /* fixxx */
5440 int op1;
5441 op1 = op & 7;
5443 switch(op >> 4) {
5444 case 0:
5445 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5446 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5447 gen_helper_flds_FT0(cpu_tmp2_i32);
5448 break;
5449 case 1:
5450 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5451 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5452 gen_helper_fildl_FT0(cpu_tmp2_i32);
5453 break;
5454 case 2:
5455 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5456 (s->mem_index >> 2) - 1);
5457 gen_helper_fldl_FT0(cpu_tmp1_i64);
5458 break;
5459 case 3:
5460 default:
5461 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5462 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5463 gen_helper_fildl_FT0(cpu_tmp2_i32);
5464 break;
5467 gen_helper_fp_arith_ST0_FT0(op1);
5468 if (op1 == 3) {
5469 /* fcomp needs pop */
5470 gen_helper_fpop();
5473 break;
5474 case 0x08: /* flds */
5475 case 0x0a: /* fsts */
5476 case 0x0b: /* fstps */
5477 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5478 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5479 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5480 switch(op & 7) {
5481 case 0:
5482 switch(op >> 4) {
5483 case 0:
5484 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5485 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5486 gen_helper_flds_ST0(cpu_tmp2_i32);
5487 break;
5488 case 1:
5489 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5490 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5491 gen_helper_fildl_ST0(cpu_tmp2_i32);
5492 break;
5493 case 2:
5494 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5495 (s->mem_index >> 2) - 1);
5496 gen_helper_fldl_ST0(cpu_tmp1_i64);
5497 break;
5498 case 3:
5499 default:
5500 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5501 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5502 gen_helper_fildl_ST0(cpu_tmp2_i32);
5503 break;
5505 break;
5506 case 1:
5507 /* XXX: the corresponding CPUID bit must be tested ! */
5508 switch(op >> 4) {
5509 case 1:
5510 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5511 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5512 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5513 break;
5514 case 2:
5515 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5516 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5517 (s->mem_index >> 2) - 1);
5518 break;
5519 case 3:
5520 default:
5521 gen_helper_fistt_ST0(cpu_tmp2_i32);
5522 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5523 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5524 break;
5526 gen_helper_fpop();
5527 break;
5528 default:
5529 switch(op >> 4) {
5530 case 0:
5531 gen_helper_fsts_ST0(cpu_tmp2_i32);
5532 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5533 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5534 break;
5535 case 1:
5536 gen_helper_fistl_ST0(cpu_tmp2_i32);
5537 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5538 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5539 break;
5540 case 2:
5541 gen_helper_fstl_ST0(cpu_tmp1_i64);
5542 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5543 (s->mem_index >> 2) - 1);
5544 break;
5545 case 3:
5546 default:
5547 gen_helper_fist_ST0(cpu_tmp2_i32);
5548 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5549 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5550 break;
5552 if ((op & 7) == 3)
5553 gen_helper_fpop();
5554 break;
5556 break;
5557 case 0x0c: /* fldenv mem */
5558 if (s->cc_op != CC_OP_DYNAMIC)
5559 gen_op_set_cc_op(s->cc_op);
5560 gen_jmp_im(pc_start - s->cs_base);
5561 gen_helper_fldenv(
5562 cpu_A0, tcg_const_i32(s->dflag));
5563 break;
5564 case 0x0d: /* fldcw mem */
5565 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5566 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5567 gen_helper_fldcw(cpu_tmp2_i32);
5568 break;
5569 case 0x0e: /* fnstenv mem */
5570 if (s->cc_op != CC_OP_DYNAMIC)
5571 gen_op_set_cc_op(s->cc_op);
5572 gen_jmp_im(pc_start - s->cs_base);
5573 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5574 break;
5575 case 0x0f: /* fnstcw mem */
5576 gen_helper_fnstcw(cpu_tmp2_i32);
5577 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5578 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5579 break;
5580 case 0x1d: /* fldt mem */
5581 if (s->cc_op != CC_OP_DYNAMIC)
5582 gen_op_set_cc_op(s->cc_op);
5583 gen_jmp_im(pc_start - s->cs_base);
5584 gen_helper_fldt_ST0(cpu_A0);
5585 break;
5586 case 0x1f: /* fstpt mem */
5587 if (s->cc_op != CC_OP_DYNAMIC)
5588 gen_op_set_cc_op(s->cc_op);
5589 gen_jmp_im(pc_start - s->cs_base);
5590 gen_helper_fstt_ST0(cpu_A0);
5591 gen_helper_fpop();
5592 break;
5593 case 0x2c: /* frstor mem */
5594 if (s->cc_op != CC_OP_DYNAMIC)
5595 gen_op_set_cc_op(s->cc_op);
5596 gen_jmp_im(pc_start - s->cs_base);
5597 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5598 break;
5599 case 0x2e: /* fnsave mem */
5600 if (s->cc_op != CC_OP_DYNAMIC)
5601 gen_op_set_cc_op(s->cc_op);
5602 gen_jmp_im(pc_start - s->cs_base);
5603 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
5604 break;
5605 case 0x2f: /* fnstsw mem */
5606 gen_helper_fnstsw(cpu_tmp2_i32);
5607 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5608 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5609 break;
5610 case 0x3c: /* fbld */
5611 if (s->cc_op != CC_OP_DYNAMIC)
5612 gen_op_set_cc_op(s->cc_op);
5613 gen_jmp_im(pc_start - s->cs_base);
5614 gen_helper_fbld_ST0(cpu_A0);
5615 break;
5616 case 0x3e: /* fbstp */
5617 if (s->cc_op != CC_OP_DYNAMIC)
5618 gen_op_set_cc_op(s->cc_op);
5619 gen_jmp_im(pc_start - s->cs_base);
5620 gen_helper_fbst_ST0(cpu_A0);
5621 gen_helper_fpop();
5622 break;
5623 case 0x3d: /* fildll */
5624 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5625 (s->mem_index >> 2) - 1);
5626 gen_helper_fildll_ST0(cpu_tmp1_i64);
5627 break;
5628 case 0x3f: /* fistpll */
5629 gen_helper_fistll_ST0(cpu_tmp1_i64);
5630 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5631 (s->mem_index >> 2) - 1);
5632 gen_helper_fpop();
5633 break;
5634 default:
5635 goto illegal_op;
5637 } else {
5638 /* register float ops */
5639 opreg = rm;
5641 switch(op) {
5642 case 0x08: /* fld sti */
5643 gen_helper_fpush();
5644 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
5645 break;
5646 case 0x09: /* fxchg sti */
5647 case 0x29: /* fxchg4 sti, undocumented op */
5648 case 0x39: /* fxchg7 sti, undocumented op */
5649 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
5650 break;
5651 case 0x0a: /* grp d9/2 */
5652 switch(rm) {
5653 case 0: /* fnop */
5654 /* check exceptions (FreeBSD FPU probe) */
5655 if (s->cc_op != CC_OP_DYNAMIC)
5656 gen_op_set_cc_op(s->cc_op);
5657 gen_jmp_im(pc_start - s->cs_base);
5658 gen_helper_fwait();
5659 break;
5660 default:
5661 goto illegal_op;
5663 break;
5664 case 0x0c: /* grp d9/4 */
5665 switch(rm) {
5666 case 0: /* fchs */
5667 gen_helper_fchs_ST0();
5668 break;
5669 case 1: /* fabs */
5670 gen_helper_fabs_ST0();
5671 break;
5672 case 4: /* ftst */
5673 gen_helper_fldz_FT0();
5674 gen_helper_fcom_ST0_FT0();
5675 break;
5676 case 5: /* fxam */
5677 gen_helper_fxam_ST0();
5678 break;
5679 default:
5680 goto illegal_op;
5682 break;
5683 case 0x0d: /* grp d9/5 */
5685 switch(rm) {
5686 case 0:
5687 gen_helper_fpush();
5688 gen_helper_fld1_ST0();
5689 break;
5690 case 1:
5691 gen_helper_fpush();
5692 gen_helper_fldl2t_ST0();
5693 break;
5694 case 2:
5695 gen_helper_fpush();
5696 gen_helper_fldl2e_ST0();
5697 break;
5698 case 3:
5699 gen_helper_fpush();
5700 gen_helper_fldpi_ST0();
5701 break;
5702 case 4:
5703 gen_helper_fpush();
5704 gen_helper_fldlg2_ST0();
5705 break;
5706 case 5:
5707 gen_helper_fpush();
5708 gen_helper_fldln2_ST0();
5709 break;
5710 case 6:
5711 gen_helper_fpush();
5712 gen_helper_fldz_ST0();
5713 break;
5714 default:
5715 goto illegal_op;
5718 break;
5719 case 0x0e: /* grp d9/6 */
5720 switch(rm) {
5721 case 0: /* f2xm1 */
5722 gen_helper_f2xm1();
5723 break;
5724 case 1: /* fyl2x */
5725 gen_helper_fyl2x();
5726 break;
5727 case 2: /* fptan */
5728 gen_helper_fptan();
5729 break;
5730 case 3: /* fpatan */
5731 gen_helper_fpatan();
5732 break;
5733 case 4: /* fxtract */
5734 gen_helper_fxtract();
5735 break;
5736 case 5: /* fprem1 */
5737 gen_helper_fprem1();
5738 break;
5739 case 6: /* fdecstp */
5740 gen_helper_fdecstp();
5741 break;
5742 default:
5743 case 7: /* fincstp */
5744 gen_helper_fincstp();
5745 break;
5747 break;
5748 case 0x0f: /* grp d9/7 */
5749 switch(rm) {
5750 case 0: /* fprem */
5751 gen_helper_fprem();
5752 break;
5753 case 1: /* fyl2xp1 */
5754 gen_helper_fyl2xp1();
5755 break;
5756 case 2: /* fsqrt */
5757 gen_helper_fsqrt();
5758 break;
5759 case 3: /* fsincos */
5760 gen_helper_fsincos();
5761 break;
5762 case 5: /* fscale */
5763 gen_helper_fscale();
5764 break;
5765 case 4: /* frndint */
5766 gen_helper_frndint();
5767 break;
5768 case 6: /* fsin */
5769 gen_helper_fsin();
5770 break;
5771 default:
5772 case 7: /* fcos */
5773 gen_helper_fcos();
5774 break;
5776 break;
5777 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5778 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5779 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5781 int op1;
5783 op1 = op & 7;
5784 if (op >= 0x20) {
5785 gen_helper_fp_arith_STN_ST0(op1, opreg);
5786 if (op >= 0x30)
5787 gen_helper_fpop();
5788 } else {
5789 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5790 gen_helper_fp_arith_ST0_FT0(op1);
5793 break;
5794 case 0x02: /* fcom */
5795 case 0x22: /* fcom2, undocumented op */
5796 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5797 gen_helper_fcom_ST0_FT0();
5798 break;
5799 case 0x03: /* fcomp */
5800 case 0x23: /* fcomp3, undocumented op */
5801 case 0x32: /* fcomp5, undocumented op */
5802 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5803 gen_helper_fcom_ST0_FT0();
5804 gen_helper_fpop();
5805 break;
5806 case 0x15: /* da/5 */
5807 switch(rm) {
5808 case 1: /* fucompp */
5809 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5810 gen_helper_fucom_ST0_FT0();
5811 gen_helper_fpop();
5812 gen_helper_fpop();
5813 break;
5814 default:
5815 goto illegal_op;
5817 break;
5818 case 0x1c:
5819 switch(rm) {
5820 case 0: /* feni (287 only, just do nop here) */
5821 break;
5822 case 1: /* fdisi (287 only, just do nop here) */
5823 break;
5824 case 2: /* fclex */
5825 gen_helper_fclex();
5826 break;
5827 case 3: /* fninit */
5828 gen_helper_fninit();
5829 break;
5830 case 4: /* fsetpm (287 only, just do nop here) */
5831 break;
5832 default:
5833 goto illegal_op;
5835 break;
5836 case 0x1d: /* fucomi */
5837 if (s->cc_op != CC_OP_DYNAMIC)
5838 gen_op_set_cc_op(s->cc_op);
5839 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5840 gen_helper_fucomi_ST0_FT0();
5841 s->cc_op = CC_OP_EFLAGS;
5842 break;
5843 case 0x1e: /* fcomi */
5844 if (s->cc_op != CC_OP_DYNAMIC)
5845 gen_op_set_cc_op(s->cc_op);
5846 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5847 gen_helper_fcomi_ST0_FT0();
5848 s->cc_op = CC_OP_EFLAGS;
5849 break;
5850 case 0x28: /* ffree sti */
5851 gen_helper_ffree_STN(tcg_const_i32(opreg));
5852 break;
5853 case 0x2a: /* fst sti */
5854 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5855 break;
5856 case 0x2b: /* fstp sti */
5857 case 0x0b: /* fstp1 sti, undocumented op */
5858 case 0x3a: /* fstp8 sti, undocumented op */
5859 case 0x3b: /* fstp9 sti, undocumented op */
5860 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5861 gen_helper_fpop();
5862 break;
5863 case 0x2c: /* fucom st(i) */
5864 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5865 gen_helper_fucom_ST0_FT0();
5866 break;
5867 case 0x2d: /* fucomp st(i) */
5868 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5869 gen_helper_fucom_ST0_FT0();
5870 gen_helper_fpop();
5871 break;
5872 case 0x33: /* de/3 */
5873 switch(rm) {
5874 case 1: /* fcompp */
5875 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5876 gen_helper_fcom_ST0_FT0();
5877 gen_helper_fpop();
5878 gen_helper_fpop();
5879 break;
5880 default:
5881 goto illegal_op;
5883 break;
5884 case 0x38: /* ffreep sti, undocumented op */
5885 gen_helper_ffree_STN(tcg_const_i32(opreg));
5886 gen_helper_fpop();
5887 break;
5888 case 0x3c: /* df/4 */
5889 switch(rm) {
5890 case 0:
5891 gen_helper_fnstsw(cpu_tmp2_i32);
5892 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5893 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5894 break;
5895 default:
5896 goto illegal_op;
5898 break;
5899 case 0x3d: /* fucomip */
5900 if (s->cc_op != CC_OP_DYNAMIC)
5901 gen_op_set_cc_op(s->cc_op);
5902 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5903 gen_helper_fucomi_ST0_FT0();
5904 gen_helper_fpop();
5905 s->cc_op = CC_OP_EFLAGS;
5906 break;
5907 case 0x3e: /* fcomip */
5908 if (s->cc_op != CC_OP_DYNAMIC)
5909 gen_op_set_cc_op(s->cc_op);
5910 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5911 gen_helper_fcomi_ST0_FT0();
5912 gen_helper_fpop();
5913 s->cc_op = CC_OP_EFLAGS;
5914 break;
5915 case 0x10 ... 0x13: /* fcmovxx */
5916 case 0x18 ... 0x1b:
5918 int op1, l1;
5919 static const uint8_t fcmov_cc[8] = {
5920 (JCC_B << 1),
5921 (JCC_Z << 1),
5922 (JCC_BE << 1),
5923 (JCC_P << 1),
5925 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
5926 l1 = gen_new_label();
5927 gen_jcc1(s, s->cc_op, op1, l1);
5928 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
5929 gen_set_label(l1);
5931 break;
5932 default:
5933 goto illegal_op;
5936 break;
5937 /************************/
5938 /* string ops */
5940 case 0xa4: /* movsS */
5941 case 0xa5:
5942 if ((b & 1) == 0)
5943 ot = OT_BYTE;
5944 else
5945 ot = dflag + OT_WORD;
5947 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5948 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5949 } else {
5950 gen_movs(s, ot);
5952 break;
5954 case 0xaa: /* stosS */
5955 case 0xab:
5956 if ((b & 1) == 0)
5957 ot = OT_BYTE;
5958 else
5959 ot = dflag + OT_WORD;
5961 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5962 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5963 } else {
5964 gen_stos(s, ot);
5966 break;
5967 case 0xac: /* lodsS */
5968 case 0xad:
5969 if ((b & 1) == 0)
5970 ot = OT_BYTE;
5971 else
5972 ot = dflag + OT_WORD;
5973 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5974 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5975 } else {
5976 gen_lods(s, ot);
5978 break;
5979 case 0xae: /* scasS */
5980 case 0xaf:
5981 if ((b & 1) == 0)
5982 ot = OT_BYTE;
5983 else
5984 ot = dflag + OT_WORD;
5985 if (prefixes & PREFIX_REPNZ) {
5986 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5987 } else if (prefixes & PREFIX_REPZ) {
5988 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5989 } else {
5990 gen_scas(s, ot);
5991 s->cc_op = CC_OP_SUBB + ot;
5993 break;
5995 case 0xa6: /* cmpsS */
5996 case 0xa7:
5997 if ((b & 1) == 0)
5998 ot = OT_BYTE;
5999 else
6000 ot = dflag + OT_WORD;
6001 if (prefixes & PREFIX_REPNZ) {
6002 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
6003 } else if (prefixes & PREFIX_REPZ) {
6004 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
6005 } else {
6006 gen_cmps(s, ot);
6007 s->cc_op = CC_OP_SUBB + ot;
6009 break;
6010 case 0x6c: /* insS */
6011 case 0x6d:
6012 if ((b & 1) == 0)
6013 ot = OT_BYTE;
6014 else
6015 ot = dflag ? OT_LONG : OT_WORD;
6016 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6017 gen_op_andl_T0_ffff();
6018 gen_check_io(s, ot, pc_start - s->cs_base,
6019 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
6020 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6021 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6022 } else {
6023 gen_ins(s, ot);
6024 if (use_icount) {
6025 gen_jmp(s, s->pc - s->cs_base);
6028 break;
6029 case 0x6e: /* outsS */
6030 case 0x6f:
6031 if ((b & 1) == 0)
6032 ot = OT_BYTE;
6033 else
6034 ot = dflag ? OT_LONG : OT_WORD;
6035 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6036 gen_op_andl_T0_ffff();
6037 gen_check_io(s, ot, pc_start - s->cs_base,
6038 svm_is_rep(prefixes) | 4);
6039 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
6040 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
6041 } else {
6042 gen_outs(s, ot);
6043 if (use_icount) {
6044 gen_jmp(s, s->pc - s->cs_base);
6047 break;
6049 /************************/
6050 /* port I/O */
6052 case 0xe4:
6053 case 0xe5:
6054 if ((b & 1) == 0)
6055 ot = OT_BYTE;
6056 else
6057 ot = dflag ? OT_LONG : OT_WORD;
6058 val = ldub_code(s->pc++);
6059 gen_op_movl_T0_im(val);
6060 gen_check_io(s, ot, pc_start - s->cs_base,
6061 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6062 if (use_icount)
6063 gen_io_start();
6064 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6065 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6066 gen_op_mov_reg_T1(ot, R_EAX);
6067 if (use_icount) {
6068 gen_io_end();
6069 gen_jmp(s, s->pc - s->cs_base);
6071 break;
6072 case 0xe6:
6073 case 0xe7:
6074 if ((b & 1) == 0)
6075 ot = OT_BYTE;
6076 else
6077 ot = dflag ? OT_LONG : OT_WORD;
6078 val = ldub_code(s->pc++);
6079 gen_op_movl_T0_im(val);
6080 gen_check_io(s, ot, pc_start - s->cs_base,
6081 svm_is_rep(prefixes));
6082 gen_op_mov_TN_reg(ot, 1, R_EAX);
6084 if (use_icount)
6085 gen_io_start();
6086 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6087 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6088 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6089 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6090 if (use_icount) {
6091 gen_io_end();
6092 gen_jmp(s, s->pc - s->cs_base);
6094 break;
6095 case 0xec:
6096 case 0xed:
6097 if ((b & 1) == 0)
6098 ot = OT_BYTE;
6099 else
6100 ot = dflag ? OT_LONG : OT_WORD;
6101 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6102 gen_op_andl_T0_ffff();
6103 gen_check_io(s, ot, pc_start - s->cs_base,
6104 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6105 if (use_icount)
6106 gen_io_start();
6107 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6108 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6109 gen_op_mov_reg_T1(ot, R_EAX);
6110 if (use_icount) {
6111 gen_io_end();
6112 gen_jmp(s, s->pc - s->cs_base);
6114 break;
6115 case 0xee:
6116 case 0xef:
6117 if ((b & 1) == 0)
6118 ot = OT_BYTE;
6119 else
6120 ot = dflag ? OT_LONG : OT_WORD;
6121 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6122 gen_op_andl_T0_ffff();
6123 gen_check_io(s, ot, pc_start - s->cs_base,
6124 svm_is_rep(prefixes));
6125 gen_op_mov_TN_reg(ot, 1, R_EAX);
6127 if (use_icount)
6128 gen_io_start();
6129 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6130 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6131 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6132 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6133 if (use_icount) {
6134 gen_io_end();
6135 gen_jmp(s, s->pc - s->cs_base);
6137 break;
6139 /************************/
6140 /* control */
6141 case 0xc2: /* ret im */
6142 val = ldsw_code(s->pc);
6143 s->pc += 2;
6144 gen_pop_T0(s);
6145 if (CODE64(s) && s->dflag)
6146 s->dflag = 2;
6147 gen_stack_update(s, val + (2 << s->dflag));
6148 if (s->dflag == 0)
6149 gen_op_andl_T0_ffff();
6150 gen_op_jmp_T0();
6151 gen_eob(s);
6152 break;
6153 case 0xc3: /* ret */
6154 gen_pop_T0(s);
6155 gen_pop_update(s);
6156 if (s->dflag == 0)
6157 gen_op_andl_T0_ffff();
6158 gen_op_jmp_T0();
6159 gen_eob(s);
6160 break;
6161 case 0xca: /* lret im */
6162 val = ldsw_code(s->pc);
6163 s->pc += 2;
6164 do_lret:
6165 if (s->pe && !s->vm86) {
6166 if (s->cc_op != CC_OP_DYNAMIC)
6167 gen_op_set_cc_op(s->cc_op);
6168 gen_jmp_im(pc_start - s->cs_base);
6169 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6170 tcg_const_i32(val));
6171 } else {
6172 gen_stack_A0(s);
6173 /* pop offset */
6174 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6175 if (s->dflag == 0)
6176 gen_op_andl_T0_ffff();
6177 /* NOTE: keeping EIP updated is not a problem in case of
6178 exception */
6179 gen_op_jmp_T0();
6180 /* pop selector */
6181 gen_op_addl_A0_im(2 << s->dflag);
6182 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6183 gen_op_movl_seg_T0_vm(R_CS);
6184 /* add stack offset */
6185 gen_stack_update(s, val + (4 << s->dflag));
6187 gen_eob(s);
6188 break;
6189 case 0xcb: /* lret */
6190 val = 0;
6191 goto do_lret;
6192 case 0xcf: /* iret */
6193 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6194 if (!s->pe) {
6195 /* real mode */
6196 gen_helper_iret_real(tcg_const_i32(s->dflag));
6197 s->cc_op = CC_OP_EFLAGS;
6198 } else if (s->vm86) {
6199 if (s->iopl != 3) {
6200 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6201 } else {
6202 gen_helper_iret_real(tcg_const_i32(s->dflag));
6203 s->cc_op = CC_OP_EFLAGS;
6205 } else {
6206 if (s->cc_op != CC_OP_DYNAMIC)
6207 gen_op_set_cc_op(s->cc_op);
6208 gen_jmp_im(pc_start - s->cs_base);
6209 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6210 tcg_const_i32(s->pc - s->cs_base));
6211 s->cc_op = CC_OP_EFLAGS;
6213 gen_eob(s);
6214 break;
6215 case 0xe8: /* call im */
6217 if (dflag)
6218 tval = (int32_t)insn_get(s, OT_LONG);
6219 else
6220 tval = (int16_t)insn_get(s, OT_WORD);
6221 next_eip = s->pc - s->cs_base;
6222 tval += next_eip;
6223 if (s->dflag == 0)
6224 tval &= 0xffff;
6225 gen_movtl_T0_im(next_eip);
6226 gen_push_T0(s);
6227 gen_jmp(s, tval);
6229 break;
6230 case 0x9a: /* lcall im */
6232 unsigned int selector, offset;
6234 if (CODE64(s))
6235 goto illegal_op;
6236 ot = dflag ? OT_LONG : OT_WORD;
6237 offset = insn_get(s, ot);
6238 selector = insn_get(s, OT_WORD);
6240 gen_op_movl_T0_im(selector);
6241 gen_op_movl_T1_imu(offset);
6243 goto do_lcall;
6244 case 0xe9: /* jmp im */
6245 if (dflag)
6246 tval = (int32_t)insn_get(s, OT_LONG);
6247 else
6248 tval = (int16_t)insn_get(s, OT_WORD);
6249 tval += s->pc - s->cs_base;
6250 if (s->dflag == 0)
6251 tval &= 0xffff;
6252 else if(!CODE64(s))
6253 tval &= 0xffffffff;
6254 gen_jmp(s, tval);
6255 break;
6256 case 0xea: /* ljmp im */
6258 unsigned int selector, offset;
6260 if (CODE64(s))
6261 goto illegal_op;
6262 ot = dflag ? OT_LONG : OT_WORD;
6263 offset = insn_get(s, ot);
6264 selector = insn_get(s, OT_WORD);
6266 gen_op_movl_T0_im(selector);
6267 gen_op_movl_T1_imu(offset);
6269 goto do_ljmp;
6270 case 0xeb: /* jmp Jb */
6271 tval = (int8_t)insn_get(s, OT_BYTE);
6272 tval += s->pc - s->cs_base;
6273 if (s->dflag == 0)
6274 tval &= 0xffff;
6275 gen_jmp(s, tval);
6276 break;
6277 case 0x70 ... 0x7f: /* jcc Jb */
6278 tval = (int8_t)insn_get(s, OT_BYTE);
6279 goto do_jcc;
6280 case 0x180 ... 0x18f: /* jcc Jv */
6281 if (dflag) {
6282 tval = (int32_t)insn_get(s, OT_LONG);
6283 } else {
6284 tval = (int16_t)insn_get(s, OT_WORD);
6286 do_jcc:
6287 next_eip = s->pc - s->cs_base;
6288 tval += next_eip;
6289 if (s->dflag == 0)
6290 tval &= 0xffff;
6291 gen_jcc(s, b, tval, next_eip);
6292 break;
6294 case 0x190 ... 0x19f: /* setcc Gv */
6295 modrm = ldub_code(s->pc++);
6296 gen_setcc(s, b);
6297 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6298 break;
6299 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6301 int l1;
6302 TCGv t0;
6304 ot = dflag + OT_WORD;
6305 modrm = ldub_code(s->pc++);
6306 reg = ((modrm >> 3) & 7) | rex_r;
6307 mod = (modrm >> 6) & 3;
6308 t0 = tcg_temp_local_new();
6309 if (mod != 3) {
6310 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6311 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6312 } else {
6313 rm = (modrm & 7) | REX_B(s);
6314 gen_op_mov_v_reg(ot, t0, rm);
6316 #ifdef TARGET_X86_64
6317 if (ot == OT_LONG) {
6318 /* XXX: specific Intel behaviour ? */
6319 l1 = gen_new_label();
6320 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6321 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
6322 gen_set_label(l1);
6323 tcg_gen_movi_tl(cpu_tmp0, 0);
6324 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
6325 } else
6326 #endif
6328 l1 = gen_new_label();
6329 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6330 gen_op_mov_reg_v(ot, reg, t0);
6331 gen_set_label(l1);
6333 tcg_temp_free(t0);
6335 break;
6337 /************************/
6338 /* flags */
6339 case 0x9c: /* pushf */
6340 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6341 if (s->vm86 && s->iopl != 3) {
6342 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6343 } else {
6344 if (s->cc_op != CC_OP_DYNAMIC)
6345 gen_op_set_cc_op(s->cc_op);
6346 gen_helper_read_eflags(cpu_T[0]);
6347 gen_push_T0(s);
6349 break;
6350 case 0x9d: /* popf */
6351 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6352 if (s->vm86 && s->iopl != 3) {
6353 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6354 } else {
6355 gen_pop_T0(s);
6356 if (s->cpl == 0) {
6357 if (s->dflag) {
6358 gen_helper_write_eflags(cpu_T[0],
6359 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6360 } else {
6361 gen_helper_write_eflags(cpu_T[0],
6362 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6364 } else {
6365 if (s->cpl <= s->iopl) {
6366 if (s->dflag) {
6367 gen_helper_write_eflags(cpu_T[0],
6368 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6369 } else {
6370 gen_helper_write_eflags(cpu_T[0],
6371 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6373 } else {
6374 if (s->dflag) {
6375 gen_helper_write_eflags(cpu_T[0],
6376 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6377 } else {
6378 gen_helper_write_eflags(cpu_T[0],
6379 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6383 gen_pop_update(s);
6384 s->cc_op = CC_OP_EFLAGS;
6385 /* abort translation because TF flag may change */
6386 gen_jmp_im(s->pc - s->cs_base);
6387 gen_eob(s);
6389 break;
6390 case 0x9e: /* sahf */
6391 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6392 goto illegal_op;
6393 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6394 if (s->cc_op != CC_OP_DYNAMIC)
6395 gen_op_set_cc_op(s->cc_op);
6396 gen_compute_eflags(cpu_cc_src);
6397 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6398 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6399 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6400 s->cc_op = CC_OP_EFLAGS;
6401 break;
6402 case 0x9f: /* lahf */
6403 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6404 goto illegal_op;
6405 if (s->cc_op != CC_OP_DYNAMIC)
6406 gen_op_set_cc_op(s->cc_op);
6407 gen_compute_eflags(cpu_T[0]);
6408 /* Note: gen_compute_eflags() only gives the condition codes */
6409 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6410 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6411 break;
6412 case 0xf5: /* cmc */
6413 if (s->cc_op != CC_OP_DYNAMIC)
6414 gen_op_set_cc_op(s->cc_op);
6415 gen_compute_eflags(cpu_cc_src);
6416 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6417 s->cc_op = CC_OP_EFLAGS;
6418 break;
6419 case 0xf8: /* clc */
6420 if (s->cc_op != CC_OP_DYNAMIC)
6421 gen_op_set_cc_op(s->cc_op);
6422 gen_compute_eflags(cpu_cc_src);
6423 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6424 s->cc_op = CC_OP_EFLAGS;
6425 break;
6426 case 0xf9: /* stc */
6427 if (s->cc_op != CC_OP_DYNAMIC)
6428 gen_op_set_cc_op(s->cc_op);
6429 gen_compute_eflags(cpu_cc_src);
6430 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6431 s->cc_op = CC_OP_EFLAGS;
6432 break;
6433 case 0xfc: /* cld */
6434 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6435 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6436 break;
6437 case 0xfd: /* std */
6438 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6439 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6440 break;
6442 /************************/
6443 /* bit operations */
6444 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6445 ot = dflag + OT_WORD;
6446 modrm = ldub_code(s->pc++);
6447 op = (modrm >> 3) & 7;
6448 mod = (modrm >> 6) & 3;
6449 rm = (modrm & 7) | REX_B(s);
6450 if (mod != 3) {
6451 s->rip_offset = 1;
6452 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6453 gen_op_ld_T0_A0(ot + s->mem_index);
6454 } else {
6455 gen_op_mov_TN_reg(ot, 0, rm);
6457 /* load shift */
6458 val = ldub_code(s->pc++);
6459 gen_op_movl_T1_im(val);
6460 if (op < 4)
6461 goto illegal_op;
6462 op -= 4;
6463 goto bt_op;
6464 case 0x1a3: /* bt Gv, Ev */
6465 op = 0;
6466 goto do_btx;
6467 case 0x1ab: /* bts */
6468 op = 1;
6469 goto do_btx;
6470 case 0x1b3: /* btr */
6471 op = 2;
6472 goto do_btx;
6473 case 0x1bb: /* btc */
6474 op = 3;
6475 do_btx:
6476 ot = dflag + OT_WORD;
6477 modrm = ldub_code(s->pc++);
6478 reg = ((modrm >> 3) & 7) | rex_r;
6479 mod = (modrm >> 6) & 3;
6480 rm = (modrm & 7) | REX_B(s);
6481 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6482 if (mod != 3) {
6483 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6484 /* specific case: we need to add a displacement */
6485 gen_exts(ot, cpu_T[1]);
6486 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6487 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6488 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6489 gen_op_ld_T0_A0(ot + s->mem_index);
6490 } else {
6491 gen_op_mov_TN_reg(ot, 0, rm);
6493 bt_op:
6494 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6495 switch(op) {
6496 case 0:
6497 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6498 tcg_gen_movi_tl(cpu_cc_dst, 0);
6499 break;
6500 case 1:
6501 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6502 tcg_gen_movi_tl(cpu_tmp0, 1);
6503 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6504 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6505 break;
6506 case 2:
6507 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6508 tcg_gen_movi_tl(cpu_tmp0, 1);
6509 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6510 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6511 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6512 break;
6513 default:
6514 case 3:
6515 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6516 tcg_gen_movi_tl(cpu_tmp0, 1);
6517 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6518 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6519 break;
6521 s->cc_op = CC_OP_SARB + ot;
6522 if (op != 0) {
6523 if (mod != 3)
6524 gen_op_st_T0_A0(ot + s->mem_index);
6525 else
6526 gen_op_mov_reg_T0(ot, rm);
6527 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6528 tcg_gen_movi_tl(cpu_cc_dst, 0);
6530 break;
6531 case 0x1bc: /* bsf */
6532 case 0x1bd: /* bsr */
6534 int label1;
6535 TCGv t0;
6537 ot = dflag + OT_WORD;
6538 modrm = ldub_code(s->pc++);
6539 reg = ((modrm >> 3) & 7) | rex_r;
6540 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
6541 gen_extu(ot, cpu_T[0]);
6542 label1 = gen_new_label();
6543 tcg_gen_movi_tl(cpu_cc_dst, 0);
6544 t0 = tcg_temp_local_new();
6545 tcg_gen_mov_tl(t0, cpu_T[0]);
6546 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6547 if (b & 1) {
6548 gen_helper_bsr(cpu_T[0], t0);
6549 } else {
6550 gen_helper_bsf(cpu_T[0], t0);
6552 gen_op_mov_reg_T0(ot, reg);
6553 tcg_gen_movi_tl(cpu_cc_dst, 1);
6554 gen_set_label(label1);
6555 tcg_gen_discard_tl(cpu_cc_src);
6556 s->cc_op = CC_OP_LOGICB + ot;
6557 tcg_temp_free(t0);
6559 break;
6560 /************************/
6561 /* bcd */
6562 case 0x27: /* daa */
6563 if (CODE64(s))
6564 goto illegal_op;
6565 if (s->cc_op != CC_OP_DYNAMIC)
6566 gen_op_set_cc_op(s->cc_op);
6567 gen_helper_daa();
6568 s->cc_op = CC_OP_EFLAGS;
6569 break;
6570 case 0x2f: /* das */
6571 if (CODE64(s))
6572 goto illegal_op;
6573 if (s->cc_op != CC_OP_DYNAMIC)
6574 gen_op_set_cc_op(s->cc_op);
6575 gen_helper_das();
6576 s->cc_op = CC_OP_EFLAGS;
6577 break;
6578 case 0x37: /* aaa */
6579 if (CODE64(s))
6580 goto illegal_op;
6581 if (s->cc_op != CC_OP_DYNAMIC)
6582 gen_op_set_cc_op(s->cc_op);
6583 gen_helper_aaa();
6584 s->cc_op = CC_OP_EFLAGS;
6585 break;
6586 case 0x3f: /* aas */
6587 if (CODE64(s))
6588 goto illegal_op;
6589 if (s->cc_op != CC_OP_DYNAMIC)
6590 gen_op_set_cc_op(s->cc_op);
6591 gen_helper_aas();
6592 s->cc_op = CC_OP_EFLAGS;
6593 break;
6594 case 0xd4: /* aam */
6595 if (CODE64(s))
6596 goto illegal_op;
6597 val = ldub_code(s->pc++);
6598 if (val == 0) {
6599 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6600 } else {
6601 gen_helper_aam(tcg_const_i32(val));
6602 s->cc_op = CC_OP_LOGICB;
6604 break;
6605 case 0xd5: /* aad */
6606 if (CODE64(s))
6607 goto illegal_op;
6608 val = ldub_code(s->pc++);
6609 gen_helper_aad(tcg_const_i32(val));
6610 s->cc_op = CC_OP_LOGICB;
6611 break;
6612 /************************/
6613 /* misc */
6614 case 0x90: /* nop */
6615 /* XXX: xchg + rex handling */
6616 /* XXX: correct lock test for all insn */
6617 if (prefixes & PREFIX_LOCK)
6618 goto illegal_op;
6619 if (prefixes & PREFIX_REPZ) {
6620 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6622 break;
6623 case 0x9b: /* fwait */
6624 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6625 (HF_MP_MASK | HF_TS_MASK)) {
6626 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6627 } else {
6628 if (s->cc_op != CC_OP_DYNAMIC)
6629 gen_op_set_cc_op(s->cc_op);
6630 gen_jmp_im(pc_start - s->cs_base);
6631 gen_helper_fwait();
6633 break;
6634 case 0xcc: /* int3 */
6635 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6636 break;
6637 case 0xcd: /* int N */
6638 val = ldub_code(s->pc++);
6639 if (s->vm86 && s->iopl != 3) {
6640 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6641 } else {
6642 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6644 break;
6645 case 0xce: /* into */
6646 if (CODE64(s))
6647 goto illegal_op;
6648 if (s->cc_op != CC_OP_DYNAMIC)
6649 gen_op_set_cc_op(s->cc_op);
6650 gen_jmp_im(pc_start - s->cs_base);
6651 gen_helper_into(tcg_const_i32(s->pc - pc_start));
6652 break;
6653 #ifdef WANT_ICEBP
6654 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6655 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6656 #if 1
6657 gen_debug(s, pc_start - s->cs_base);
6658 #else
6659 /* start debug */
6660 tb_flush(cpu_single_env);
6661 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6662 #endif
6663 break;
6664 #endif
6665 case 0xfa: /* cli */
6666 if (!s->vm86) {
6667 if (s->cpl <= s->iopl) {
6668 gen_helper_cli();
6669 } else {
6670 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6672 } else {
6673 if (s->iopl == 3) {
6674 gen_helper_cli();
6675 } else {
6676 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6679 break;
6680 case 0xfb: /* sti */
6681 if (!s->vm86) {
6682 if (s->cpl <= s->iopl) {
6683 gen_sti:
6684 gen_helper_sti();
6685 /* interruptions are enabled only the first insn after sti */
6686 /* If several instructions disable interrupts, only the
6687 _first_ does it */
6688 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6689 gen_helper_set_inhibit_irq();
6690 /* give a chance to handle pending irqs */
6691 gen_jmp_im(s->pc - s->cs_base);
6692 gen_eob(s);
6693 } else {
6694 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6696 } else {
6697 if (s->iopl == 3) {
6698 goto gen_sti;
6699 } else {
6700 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6703 break;
6704 case 0x62: /* bound */
6705 if (CODE64(s))
6706 goto illegal_op;
6707 ot = dflag ? OT_LONG : OT_WORD;
6708 modrm = ldub_code(s->pc++);
6709 reg = (modrm >> 3) & 7;
6710 mod = (modrm >> 6) & 3;
6711 if (mod == 3)
6712 goto illegal_op;
6713 gen_op_mov_TN_reg(ot, 0, reg);
6714 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6715 gen_jmp_im(pc_start - s->cs_base);
6716 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6717 if (ot == OT_WORD)
6718 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
6719 else
6720 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
6721 break;
6722 case 0x1c8 ... 0x1cf: /* bswap reg */
6723 reg = (b & 7) | REX_B(s);
6724 #ifdef TARGET_X86_64
6725 if (dflag == 2) {
6726 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6727 tcg_gen_bswap64_i64(cpu_T[0], cpu_T[0]);
6728 gen_op_mov_reg_T0(OT_QUAD, reg);
6729 } else
6730 #endif
6732 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6733 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
6734 tcg_gen_bswap32_tl(cpu_T[0], cpu_T[0]);
6735 gen_op_mov_reg_T0(OT_LONG, reg);
6737 break;
6738 case 0xd6: /* salc */
6739 if (CODE64(s))
6740 goto illegal_op;
6741 if (s->cc_op != CC_OP_DYNAMIC)
6742 gen_op_set_cc_op(s->cc_op);
6743 gen_compute_eflags_c(cpu_T[0]);
6744 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6745 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6746 break;
6747 case 0xe0: /* loopnz */
6748 case 0xe1: /* loopz */
6749 case 0xe2: /* loop */
6750 case 0xe3: /* jecxz */
6752 int l1, l2, l3;
6754 tval = (int8_t)insn_get(s, OT_BYTE);
6755 next_eip = s->pc - s->cs_base;
6756 tval += next_eip;
6757 if (s->dflag == 0)
6758 tval &= 0xffff;
6760 l1 = gen_new_label();
6761 l2 = gen_new_label();
6762 l3 = gen_new_label();
6763 b &= 3;
6764 switch(b) {
6765 case 0: /* loopnz */
6766 case 1: /* loopz */
6767 if (s->cc_op != CC_OP_DYNAMIC)
6768 gen_op_set_cc_op(s->cc_op);
6769 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6770 gen_op_jz_ecx(s->aflag, l3);
6771 gen_compute_eflags(cpu_tmp0);
6772 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6773 if (b == 0) {
6774 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6775 } else {
6776 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6778 break;
6779 case 2: /* loop */
6780 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6781 gen_op_jnz_ecx(s->aflag, l1);
6782 break;
6783 default:
6784 case 3: /* jcxz */
6785 gen_op_jz_ecx(s->aflag, l1);
6786 break;
6789 gen_set_label(l3);
6790 gen_jmp_im(next_eip);
6791 tcg_gen_br(l2);
6793 gen_set_label(l1);
6794 gen_jmp_im(tval);
6795 gen_set_label(l2);
6796 gen_eob(s);
6798 break;
6799 case 0x130: /* wrmsr */
6800 case 0x132: /* rdmsr */
6801 if (s->cpl != 0) {
6802 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6803 } else {
6804 if (s->cc_op != CC_OP_DYNAMIC)
6805 gen_op_set_cc_op(s->cc_op);
6806 gen_jmp_im(pc_start - s->cs_base);
6807 if (b & 2) {
6808 gen_helper_rdmsr();
6809 } else {
6810 gen_helper_wrmsr();
6813 break;
6814 case 0x131: /* rdtsc */
6815 if (s->cc_op != CC_OP_DYNAMIC)
6816 gen_op_set_cc_op(s->cc_op);
6817 gen_jmp_im(pc_start - s->cs_base);
6818 if (use_icount)
6819 gen_io_start();
6820 gen_helper_rdtsc();
6821 if (use_icount) {
6822 gen_io_end();
6823 gen_jmp(s, s->pc - s->cs_base);
6825 break;
6826 case 0x133: /* rdpmc */
6827 if (s->cc_op != CC_OP_DYNAMIC)
6828 gen_op_set_cc_op(s->cc_op);
6829 gen_jmp_im(pc_start - s->cs_base);
6830 gen_helper_rdpmc();
6831 break;
6832 case 0x134: /* sysenter */
6833 /* For Intel SYSENTER is valid on 64-bit */
6834 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6835 goto illegal_op;
6836 if (!s->pe) {
6837 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6838 } else {
6839 if (s->cc_op != CC_OP_DYNAMIC) {
6840 gen_op_set_cc_op(s->cc_op);
6841 s->cc_op = CC_OP_DYNAMIC;
6843 gen_jmp_im(pc_start - s->cs_base);
6844 gen_helper_sysenter();
6845 gen_eob(s);
6847 break;
6848 case 0x135: /* sysexit */
6849 /* For Intel SYSEXIT is valid on 64-bit */
6850 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6851 goto illegal_op;
6852 if (!s->pe) {
6853 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6854 } else {
6855 if (s->cc_op != CC_OP_DYNAMIC) {
6856 gen_op_set_cc_op(s->cc_op);
6857 s->cc_op = CC_OP_DYNAMIC;
6859 gen_jmp_im(pc_start - s->cs_base);
6860 gen_helper_sysexit(tcg_const_i32(dflag));
6861 gen_eob(s);
6863 break;
6864 #ifdef TARGET_X86_64
6865 case 0x105: /* syscall */
6866 /* XXX: is it usable in real mode ? */
6867 if (s->cc_op != CC_OP_DYNAMIC) {
6868 gen_op_set_cc_op(s->cc_op);
6869 s->cc_op = CC_OP_DYNAMIC;
6871 gen_jmp_im(pc_start - s->cs_base);
6872 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
6873 gen_eob(s);
6874 break;
6875 case 0x107: /* sysret */
6876 if (!s->pe) {
6877 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6878 } else {
6879 if (s->cc_op != CC_OP_DYNAMIC) {
6880 gen_op_set_cc_op(s->cc_op);
6881 s->cc_op = CC_OP_DYNAMIC;
6883 gen_jmp_im(pc_start - s->cs_base);
6884 gen_helper_sysret(tcg_const_i32(s->dflag));
6885 /* condition codes are modified only in long mode */
6886 if (s->lma)
6887 s->cc_op = CC_OP_EFLAGS;
6888 gen_eob(s);
6890 break;
6891 #endif
6892 case 0x1a2: /* cpuid */
6893 if (s->cc_op != CC_OP_DYNAMIC)
6894 gen_op_set_cc_op(s->cc_op);
6895 gen_jmp_im(pc_start - s->cs_base);
6896 gen_helper_cpuid();
6897 break;
6898 case 0xf4: /* hlt */
6899 if (s->cpl != 0) {
6900 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6901 } else {
6902 if (s->cc_op != CC_OP_DYNAMIC)
6903 gen_op_set_cc_op(s->cc_op);
6904 gen_jmp_im(pc_start - s->cs_base);
6905 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
6906 s->is_jmp = 3;
6908 break;
6909 case 0x100:
6910 modrm = ldub_code(s->pc++);
6911 mod = (modrm >> 6) & 3;
6912 op = (modrm >> 3) & 7;
6913 switch(op) {
6914 case 0: /* sldt */
6915 if (!s->pe || s->vm86)
6916 goto illegal_op;
6917 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6918 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6919 ot = OT_WORD;
6920 if (mod == 3)
6921 ot += s->dflag;
6922 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6923 break;
6924 case 2: /* lldt */
6925 if (!s->pe || s->vm86)
6926 goto illegal_op;
6927 if (s->cpl != 0) {
6928 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6929 } else {
6930 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6931 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6932 gen_jmp_im(pc_start - s->cs_base);
6933 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6934 gen_helper_lldt(cpu_tmp2_i32);
6936 break;
6937 case 1: /* str */
6938 if (!s->pe || s->vm86)
6939 goto illegal_op;
6940 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
6941 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6942 ot = OT_WORD;
6943 if (mod == 3)
6944 ot += s->dflag;
6945 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6946 break;
6947 case 3: /* ltr */
6948 if (!s->pe || s->vm86)
6949 goto illegal_op;
6950 if (s->cpl != 0) {
6951 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6952 } else {
6953 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
6954 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6955 gen_jmp_im(pc_start - s->cs_base);
6956 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6957 gen_helper_ltr(cpu_tmp2_i32);
6959 break;
6960 case 4: /* verr */
6961 case 5: /* verw */
6962 if (!s->pe || s->vm86)
6963 goto illegal_op;
6964 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6965 if (s->cc_op != CC_OP_DYNAMIC)
6966 gen_op_set_cc_op(s->cc_op);
6967 if (op == 4)
6968 gen_helper_verr(cpu_T[0]);
6969 else
6970 gen_helper_verw(cpu_T[0]);
6971 s->cc_op = CC_OP_EFLAGS;
6972 break;
6973 default:
6974 goto illegal_op;
6976 break;
6977 case 0x101:
6978 modrm = ldub_code(s->pc++);
6979 mod = (modrm >> 6) & 3;
6980 op = (modrm >> 3) & 7;
6981 rm = modrm & 7;
6982 switch(op) {
6983 case 0: /* sgdt */
6984 if (mod == 3)
6985 goto illegal_op;
6986 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
6987 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6988 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
6989 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6990 gen_add_A0_im(s, 2);
6991 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
6992 if (!s->dflag)
6993 gen_op_andl_T0_im(0xffffff);
6994 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6995 break;
6996 case 1:
6997 if (mod == 3) {
6998 switch (rm) {
6999 case 0: /* monitor */
7000 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7001 s->cpl != 0)
7002 goto illegal_op;
7003 if (s->cc_op != CC_OP_DYNAMIC)
7004 gen_op_set_cc_op(s->cc_op);
7005 gen_jmp_im(pc_start - s->cs_base);
7006 #ifdef TARGET_X86_64
7007 if (s->aflag == 2) {
7008 gen_op_movq_A0_reg(R_EAX);
7009 } else
7010 #endif
7012 gen_op_movl_A0_reg(R_EAX);
7013 if (s->aflag == 0)
7014 gen_op_andl_A0_ffff();
7016 gen_add_A0_ds_seg(s);
7017 gen_helper_monitor(cpu_A0);
7018 break;
7019 case 1: /* mwait */
7020 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
7021 s->cpl != 0)
7022 goto illegal_op;
7023 if (s->cc_op != CC_OP_DYNAMIC) {
7024 gen_op_set_cc_op(s->cc_op);
7025 s->cc_op = CC_OP_DYNAMIC;
7027 gen_jmp_im(pc_start - s->cs_base);
7028 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
7029 gen_eob(s);
7030 break;
7031 default:
7032 goto illegal_op;
7034 } else { /* sidt */
7035 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
7036 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7037 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
7038 gen_op_st_T0_A0(OT_WORD + s->mem_index);
7039 gen_add_A0_im(s, 2);
7040 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
7041 if (!s->dflag)
7042 gen_op_andl_T0_im(0xffffff);
7043 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7045 break;
7046 case 2: /* lgdt */
7047 case 3: /* lidt */
7048 if (mod == 3) {
7049 if (s->cc_op != CC_OP_DYNAMIC)
7050 gen_op_set_cc_op(s->cc_op);
7051 gen_jmp_im(pc_start - s->cs_base);
7052 switch(rm) {
7053 case 0: /* VMRUN */
7054 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7055 goto illegal_op;
7056 if (s->cpl != 0) {
7057 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7058 break;
7059 } else {
7060 gen_helper_vmrun(tcg_const_i32(s->aflag),
7061 tcg_const_i32(s->pc - pc_start));
7062 tcg_gen_exit_tb(0);
7063 s->is_jmp = 3;
7065 break;
7066 case 1: /* VMMCALL */
7067 if (!(s->flags & HF_SVME_MASK))
7068 goto illegal_op;
7069 gen_helper_vmmcall();
7070 break;
7071 case 2: /* VMLOAD */
7072 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7073 goto illegal_op;
7074 if (s->cpl != 0) {
7075 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7076 break;
7077 } else {
7078 gen_helper_vmload(tcg_const_i32(s->aflag));
7080 break;
7081 case 3: /* VMSAVE */
7082 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7083 goto illegal_op;
7084 if (s->cpl != 0) {
7085 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7086 break;
7087 } else {
7088 gen_helper_vmsave(tcg_const_i32(s->aflag));
7090 break;
7091 case 4: /* STGI */
7092 if ((!(s->flags & HF_SVME_MASK) &&
7093 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7094 !s->pe)
7095 goto illegal_op;
7096 if (s->cpl != 0) {
7097 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7098 break;
7099 } else {
7100 gen_helper_stgi();
7102 break;
7103 case 5: /* CLGI */
7104 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7105 goto illegal_op;
7106 if (s->cpl != 0) {
7107 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7108 break;
7109 } else {
7110 gen_helper_clgi();
7112 break;
7113 case 6: /* SKINIT */
7114 if ((!(s->flags & HF_SVME_MASK) &&
7115 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7116 !s->pe)
7117 goto illegal_op;
7118 gen_helper_skinit();
7119 break;
7120 case 7: /* INVLPGA */
7121 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7122 goto illegal_op;
7123 if (s->cpl != 0) {
7124 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7125 break;
7126 } else {
7127 gen_helper_invlpga(tcg_const_i32(s->aflag));
7129 break;
7130 default:
7131 goto illegal_op;
7133 } else if (s->cpl != 0) {
7134 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7135 } else {
7136 gen_svm_check_intercept(s, pc_start,
7137 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7138 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7139 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7140 gen_add_A0_im(s, 2);
7141 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7142 if (!s->dflag)
7143 gen_op_andl_T0_im(0xffffff);
7144 if (op == 2) {
7145 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7146 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7147 } else {
7148 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7149 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7152 break;
7153 case 4: /* smsw */
7154 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7155 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN
7156 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7157 #else
7158 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7159 #endif
7160 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7161 break;
7162 case 6: /* lmsw */
7163 if (s->cpl != 0) {
7164 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7165 } else {
7166 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7167 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7168 gen_helper_lmsw(cpu_T[0]);
7169 gen_jmp_im(s->pc - s->cs_base);
7170 gen_eob(s);
7172 break;
7173 case 7: /* invlpg */
7174 if (s->cpl != 0) {
7175 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7176 } else {
7177 if (mod == 3) {
7178 #ifdef TARGET_X86_64
7179 if (CODE64(s) && rm == 0) {
7180 /* swapgs */
7181 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
7182 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
7183 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
7184 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
7185 } else
7186 #endif
7188 goto illegal_op;
7190 } else {
7191 if (s->cc_op != CC_OP_DYNAMIC)
7192 gen_op_set_cc_op(s->cc_op);
7193 gen_jmp_im(pc_start - s->cs_base);
7194 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7195 gen_helper_invlpg(cpu_A0);
7196 gen_jmp_im(s->pc - s->cs_base);
7197 gen_eob(s);
7200 break;
7201 default:
7202 goto illegal_op;
7204 break;
7205 case 0x108: /* invd */
7206 case 0x109: /* wbinvd */
7207 if (s->cpl != 0) {
7208 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7209 } else {
7210 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7211 /* nothing to do */
7213 break;
7214 case 0x63: /* arpl or movslS (x86_64) */
7215 #ifdef TARGET_X86_64
7216 if (CODE64(s)) {
7217 int d_ot;
7218 /* d_ot is the size of destination */
7219 d_ot = dflag + OT_WORD;
7221 modrm = ldub_code(s->pc++);
7222 reg = ((modrm >> 3) & 7) | rex_r;
7223 mod = (modrm >> 6) & 3;
7224 rm = (modrm & 7) | REX_B(s);
7226 if (mod == 3) {
7227 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7228 /* sign extend */
7229 if (d_ot == OT_QUAD)
7230 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7231 gen_op_mov_reg_T0(d_ot, reg);
7232 } else {
7233 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7234 if (d_ot == OT_QUAD) {
7235 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7236 } else {
7237 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7239 gen_op_mov_reg_T0(d_ot, reg);
7241 } else
7242 #endif
7244 int label1;
7245 TCGv t0, t1, t2;
7247 if (!s->pe || s->vm86)
7248 goto illegal_op;
7249 t0 = tcg_temp_local_new();
7250 t1 = tcg_temp_local_new();
7251 t2 = tcg_temp_local_new();
7252 ot = OT_WORD;
7253 modrm = ldub_code(s->pc++);
7254 reg = (modrm >> 3) & 7;
7255 mod = (modrm >> 6) & 3;
7256 rm = modrm & 7;
7257 if (mod != 3) {
7258 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7259 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7260 } else {
7261 gen_op_mov_v_reg(ot, t0, rm);
7263 gen_op_mov_v_reg(ot, t1, reg);
7264 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7265 tcg_gen_andi_tl(t1, t1, 3);
7266 tcg_gen_movi_tl(t2, 0);
7267 label1 = gen_new_label();
7268 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7269 tcg_gen_andi_tl(t0, t0, ~3);
7270 tcg_gen_or_tl(t0, t0, t1);
7271 tcg_gen_movi_tl(t2, CC_Z);
7272 gen_set_label(label1);
7273 if (mod != 3) {
7274 gen_op_st_v(ot + s->mem_index, t0, cpu_A0);
7275 } else {
7276 gen_op_mov_reg_v(ot, rm, t0);
7278 if (s->cc_op != CC_OP_DYNAMIC)
7279 gen_op_set_cc_op(s->cc_op);
7280 gen_compute_eflags(cpu_cc_src);
7281 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7282 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7283 s->cc_op = CC_OP_EFLAGS;
7284 tcg_temp_free(t0);
7285 tcg_temp_free(t1);
7286 tcg_temp_free(t2);
7288 break;
7289 case 0x102: /* lar */
7290 case 0x103: /* lsl */
7292 int label1;
7293 TCGv t0;
7294 if (!s->pe || s->vm86)
7295 goto illegal_op;
7296 ot = dflag ? OT_LONG : OT_WORD;
7297 modrm = ldub_code(s->pc++);
7298 reg = ((modrm >> 3) & 7) | rex_r;
7299 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7300 t0 = tcg_temp_local_new();
7301 if (s->cc_op != CC_OP_DYNAMIC)
7302 gen_op_set_cc_op(s->cc_op);
7303 if (b == 0x102)
7304 gen_helper_lar(t0, cpu_T[0]);
7305 else
7306 gen_helper_lsl(t0, cpu_T[0]);
7307 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7308 label1 = gen_new_label();
7309 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7310 gen_op_mov_reg_v(ot, reg, t0);
7311 gen_set_label(label1);
7312 s->cc_op = CC_OP_EFLAGS;
7313 tcg_temp_free(t0);
7315 break;
7316 case 0x118:
7317 modrm = ldub_code(s->pc++);
7318 mod = (modrm >> 6) & 3;
7319 op = (modrm >> 3) & 7;
7320 switch(op) {
7321 case 0: /* prefetchnta */
7322 case 1: /* prefetchnt0 */
7323 case 2: /* prefetchnt0 */
7324 case 3: /* prefetchnt0 */
7325 if (mod == 3)
7326 goto illegal_op;
7327 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7328 /* nothing more to do */
7329 break;
7330 default: /* nop (multi byte) */
7331 gen_nop_modrm(s, modrm);
7332 break;
7334 break;
7335 case 0x119 ... 0x11f: /* nop (multi byte) */
7336 modrm = ldub_code(s->pc++);
7337 gen_nop_modrm(s, modrm);
7338 break;
7339 case 0x120: /* mov reg, crN */
7340 case 0x122: /* mov crN, reg */
7341 if (s->cpl != 0) {
7342 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7343 } else {
7344 modrm = ldub_code(s->pc++);
7345 if ((modrm & 0xc0) != 0xc0)
7346 goto illegal_op;
7347 rm = (modrm & 7) | REX_B(s);
7348 reg = ((modrm >> 3) & 7) | rex_r;
7349 if (CODE64(s))
7350 ot = OT_QUAD;
7351 else
7352 ot = OT_LONG;
7353 switch(reg) {
7354 case 0:
7355 case 2:
7356 case 3:
7357 case 4:
7358 case 8:
7359 if (s->cc_op != CC_OP_DYNAMIC)
7360 gen_op_set_cc_op(s->cc_op);
7361 gen_jmp_im(pc_start - s->cs_base);
7362 if (b & 2) {
7363 gen_op_mov_TN_reg(ot, 0, rm);
7364 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7365 gen_jmp_im(s->pc - s->cs_base);
7366 gen_eob(s);
7367 } else {
7368 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7369 gen_op_mov_reg_T0(ot, rm);
7371 break;
7372 default:
7373 goto illegal_op;
7376 break;
7377 case 0x121: /* mov reg, drN */
7378 case 0x123: /* mov drN, reg */
7379 if (s->cpl != 0) {
7380 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7381 } else {
7382 modrm = ldub_code(s->pc++);
7383 if ((modrm & 0xc0) != 0xc0)
7384 goto illegal_op;
7385 rm = (modrm & 7) | REX_B(s);
7386 reg = ((modrm >> 3) & 7) | rex_r;
7387 if (CODE64(s))
7388 ot = OT_QUAD;
7389 else
7390 ot = OT_LONG;
7391 /* XXX: do it dynamically with CR4.DE bit */
7392 if (reg == 4 || reg == 5 || reg >= 8)
7393 goto illegal_op;
7394 if (b & 2) {
7395 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7396 gen_op_mov_TN_reg(ot, 0, rm);
7397 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7398 gen_jmp_im(s->pc - s->cs_base);
7399 gen_eob(s);
7400 } else {
7401 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7402 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7403 gen_op_mov_reg_T0(ot, rm);
7406 break;
7407 case 0x106: /* clts */
7408 if (s->cpl != 0) {
7409 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7410 } else {
7411 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7412 gen_helper_clts();
7413 /* abort block because static cpu state changed */
7414 gen_jmp_im(s->pc - s->cs_base);
7415 gen_eob(s);
7417 break;
7418 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7419 case 0x1c3: /* MOVNTI reg, mem */
7420 if (!(s->cpuid_features & CPUID_SSE2))
7421 goto illegal_op;
7422 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7423 modrm = ldub_code(s->pc++);
7424 mod = (modrm >> 6) & 3;
7425 if (mod == 3)
7426 goto illegal_op;
7427 reg = ((modrm >> 3) & 7) | rex_r;
7428 /* generate a generic store */
7429 gen_ldst_modrm(s, modrm, ot, reg, 1);
7430 break;
7431 case 0x1ae:
7432 modrm = ldub_code(s->pc++);
7433 mod = (modrm >> 6) & 3;
7434 op = (modrm >> 3) & 7;
7435 switch(op) {
7436 case 0: /* fxsave */
7437 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7438 (s->flags & HF_EM_MASK))
7439 goto illegal_op;
7440 if (s->flags & HF_TS_MASK) {
7441 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7442 break;
7444 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7445 if (s->cc_op != CC_OP_DYNAMIC)
7446 gen_op_set_cc_op(s->cc_op);
7447 gen_jmp_im(pc_start - s->cs_base);
7448 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7449 break;
7450 case 1: /* fxrstor */
7451 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7452 (s->flags & HF_EM_MASK))
7453 goto illegal_op;
7454 if (s->flags & HF_TS_MASK) {
7455 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7456 break;
7458 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7459 if (s->cc_op != CC_OP_DYNAMIC)
7460 gen_op_set_cc_op(s->cc_op);
7461 gen_jmp_im(pc_start - s->cs_base);
7462 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7463 break;
7464 case 2: /* ldmxcsr */
7465 case 3: /* stmxcsr */
7466 if (s->flags & HF_TS_MASK) {
7467 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7468 break;
7470 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7471 mod == 3)
7472 goto illegal_op;
7473 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7474 if (op == 2) {
7475 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7476 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7477 } else {
7478 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7479 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7481 break;
7482 case 5: /* lfence */
7483 case 6: /* mfence */
7484 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7485 goto illegal_op;
7486 break;
7487 case 7: /* sfence / clflush */
7488 if ((modrm & 0xc7) == 0xc0) {
7489 /* sfence */
7490 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7491 if (!(s->cpuid_features & CPUID_SSE))
7492 goto illegal_op;
7493 } else {
7494 /* clflush */
7495 if (!(s->cpuid_features & CPUID_CLFLUSH))
7496 goto illegal_op;
7497 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7499 break;
7500 default:
7501 goto illegal_op;
7503 break;
7504 case 0x10d: /* 3DNow! prefetch(w) */
7505 modrm = ldub_code(s->pc++);
7506 mod = (modrm >> 6) & 3;
7507 if (mod == 3)
7508 goto illegal_op;
7509 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7510 /* ignore for now */
7511 break;
7512 case 0x1aa: /* rsm */
7513 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7514 if (!(s->flags & HF_SMM_MASK))
7515 goto illegal_op;
7516 if (s->cc_op != CC_OP_DYNAMIC) {
7517 gen_op_set_cc_op(s->cc_op);
7518 s->cc_op = CC_OP_DYNAMIC;
7520 gen_jmp_im(s->pc - s->cs_base);
7521 gen_helper_rsm();
7522 gen_eob(s);
7523 break;
7524 case 0x1b8: /* SSE4.2 popcnt */
7525 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7526 PREFIX_REPZ)
7527 goto illegal_op;
7528 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7529 goto illegal_op;
7531 modrm = ldub_code(s->pc++);
7532 reg = ((modrm >> 3) & 7);
7534 if (s->prefix & PREFIX_DATA)
7535 ot = OT_WORD;
7536 else if (s->dflag != 2)
7537 ot = OT_LONG;
7538 else
7539 ot = OT_QUAD;
7541 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7542 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
7543 gen_op_mov_reg_T0(ot, reg);
7545 s->cc_op = CC_OP_EFLAGS;
7546 break;
7547 case 0x10e ... 0x10f:
7548 /* 3DNow! instructions, ignore prefixes */
7549 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7550 case 0x110 ... 0x117:
7551 case 0x128 ... 0x12f:
7552 case 0x138 ... 0x13a:
7553 case 0x150 ... 0x177:
7554 case 0x17c ... 0x17f:
7555 case 0x1c2:
7556 case 0x1c4 ... 0x1c6:
7557 case 0x1d0 ... 0x1fe:
7558 gen_sse(s, b, pc_start, rex_r);
7559 break;
7560 default:
7561 goto illegal_op;
7563 /* lock generation */
7564 if (s->prefix & PREFIX_LOCK)
7565 gen_helper_unlock();
7566 return s->pc;
7567 illegal_op:
7568 if (s->prefix & PREFIX_LOCK)
7569 gen_helper_unlock();
7570 /* XXX: ensure that no lock was generated */
7571 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7572 return s->pc;
7575 void optimize_flags_init(void)
7577 #if TCG_TARGET_REG_BITS == 32
7578 assert(sizeof(CCTable) == (1 << 3));
7579 #else
7580 assert(sizeof(CCTable) == (1 << 4));
7581 #endif
7582 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7583 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7584 offsetof(CPUState, cc_op), "cc_op");
7585 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
7586 "cc_src");
7587 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
7588 "cc_dst");
7589 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
7590 "cc_tmp");
7592 /* register helpers */
7593 #define GEN_HELPER 2
7594 #include "helper.h"
7597 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7598 basic block 'tb'. If search_pc is TRUE, also generate PC
7599 information for each intermediate instruction. */
7600 static inline void gen_intermediate_code_internal(CPUState *env,
7601 TranslationBlock *tb,
7602 int search_pc)
7604 DisasContext dc1, *dc = &dc1;
7605 target_ulong pc_ptr;
7606 uint16_t *gen_opc_end;
7607 CPUBreakpoint *bp;
7608 int j, lj, cflags;
7609 uint64_t flags;
7610 target_ulong pc_start;
7611 target_ulong cs_base;
7612 int num_insns;
7613 int max_insns;
7615 /* generate intermediate code */
7616 pc_start = tb->pc;
7617 cs_base = tb->cs_base;
7618 flags = tb->flags;
7619 cflags = tb->cflags;
7621 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7622 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7623 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7624 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7625 dc->f_st = 0;
7626 dc->vm86 = (flags >> VM_SHIFT) & 1;
7627 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7628 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7629 dc->tf = (flags >> TF_SHIFT) & 1;
7630 dc->singlestep_enabled = env->singlestep_enabled;
7631 dc->cc_op = CC_OP_DYNAMIC;
7632 dc->cs_base = cs_base;
7633 dc->tb = tb;
7634 dc->popl_esp_hack = 0;
7635 /* select memory access functions */
7636 dc->mem_index = 0;
7637 if (flags & HF_SOFTMMU_MASK) {
7638 if (dc->cpl == 3)
7639 dc->mem_index = 2 * 4;
7640 else
7641 dc->mem_index = 1 * 4;
7643 dc->cpuid_features = env->cpuid_features;
7644 dc->cpuid_ext_features = env->cpuid_ext_features;
7645 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7646 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7647 #ifdef TARGET_X86_64
7648 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7649 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7650 #endif
7651 dc->flags = flags;
7652 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7653 (flags & HF_INHIBIT_IRQ_MASK)
7654 #ifndef CONFIG_SOFTMMU
7655 || (flags & HF_SOFTMMU_MASK)
7656 #endif
7658 #if 0
7659 /* check addseg logic */
7660 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7661 printf("ERROR addseg\n");
7662 #endif
7664 cpu_T[0] = tcg_temp_new();
7665 cpu_T[1] = tcg_temp_new();
7666 cpu_A0 = tcg_temp_new();
7667 cpu_T3 = tcg_temp_new();
7669 cpu_tmp0 = tcg_temp_new();
7670 cpu_tmp1_i64 = tcg_temp_new_i64();
7671 cpu_tmp2_i32 = tcg_temp_new_i32();
7672 cpu_tmp3_i32 = tcg_temp_new_i32();
7673 cpu_tmp4 = tcg_temp_new();
7674 cpu_tmp5 = tcg_temp_new();
7675 cpu_tmp6 = tcg_temp_new();
7676 cpu_ptr0 = tcg_temp_new_ptr();
7677 cpu_ptr1 = tcg_temp_new_ptr();
7679 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7681 dc->is_jmp = DISAS_NEXT;
7682 pc_ptr = pc_start;
7683 lj = -1;
7684 num_insns = 0;
7685 max_insns = tb->cflags & CF_COUNT_MASK;
7686 if (max_insns == 0)
7687 max_insns = CF_COUNT_MASK;
7689 gen_icount_start();
7690 for(;;) {
7691 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
7692 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
7693 if (bp->pc == pc_ptr &&
7694 !((bp->flags & BP_CPU) && (tb->flags & HF_RF_MASK))) {
7695 gen_debug(dc, pc_ptr - dc->cs_base);
7696 break;
7700 if (search_pc) {
7701 j = gen_opc_ptr - gen_opc_buf;
7702 if (lj < j) {
7703 lj++;
7704 while (lj < j)
7705 gen_opc_instr_start[lj++] = 0;
7707 gen_opc_pc[lj] = pc_ptr;
7708 gen_opc_cc_op[lj] = dc->cc_op;
7709 gen_opc_instr_start[lj] = 1;
7710 gen_opc_icount[lj] = num_insns;
7712 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7713 gen_io_start();
7715 pc_ptr = disas_insn(dc, pc_ptr);
7716 num_insns++;
7717 /* stop translation if indicated */
7718 if (dc->is_jmp)
7719 break;
7720 /* if single step mode, we generate only one instruction and
7721 generate an exception */
7722 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7723 the flag and abort the translation to give the irqs a
7724 change to be happen */
7725 if (dc->tf || dc->singlestep_enabled ||
7726 (flags & HF_INHIBIT_IRQ_MASK)) {
7727 gen_jmp_im(pc_ptr - dc->cs_base);
7728 gen_eob(dc);
7729 break;
7731 /* if too long translation, stop generation too */
7732 if (gen_opc_ptr >= gen_opc_end ||
7733 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7734 num_insns >= max_insns) {
7735 gen_jmp_im(pc_ptr - dc->cs_base);
7736 gen_eob(dc);
7737 break;
7739 if (singlestep) {
7740 gen_jmp_im(pc_ptr - dc->cs_base);
7741 gen_eob(dc);
7742 break;
7745 if (tb->cflags & CF_LAST_IO)
7746 gen_io_end();
7747 gen_icount_end(tb, num_insns);
7748 *gen_opc_ptr = INDEX_op_end;
7749 /* we don't forget to fill the last values */
7750 if (search_pc) {
7751 j = gen_opc_ptr - gen_opc_buf;
7752 lj++;
7753 while (lj <= j)
7754 gen_opc_instr_start[lj++] = 0;
7757 #ifdef DEBUG_DISAS
7758 log_cpu_state_mask(CPU_LOG_TB_CPU, env, X86_DUMP_CCOP);
7759 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7760 int disas_flags;
7761 qemu_log("----------------\n");
7762 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7763 #ifdef TARGET_X86_64
7764 if (dc->code64)
7765 disas_flags = 2;
7766 else
7767 #endif
7768 disas_flags = !dc->code32;
7769 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
7770 qemu_log("\n");
7772 #endif
7774 if (!search_pc) {
7775 tb->size = pc_ptr - pc_start;
7776 tb->icount = num_insns;
7780 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7782 gen_intermediate_code_internal(env, tb, 0);
7785 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7787 gen_intermediate_code_internal(env, tb, 1);
7790 void gen_pc_load(CPUState *env, TranslationBlock *tb,
7791 unsigned long searched_pc, int pc_pos, void *puc)
7793 int cc_op;
7794 #ifdef DEBUG_DISAS
7795 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7796 int i;
7797 qemu_log("RESTORE:\n");
7798 for(i = 0;i <= pc_pos; i++) {
7799 if (gen_opc_instr_start[i]) {
7800 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7803 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7804 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7805 (uint32_t)tb->cs_base);
7807 #endif
7808 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7809 cc_op = gen_opc_cc_op[pc_pos];
7810 if (cc_op != CC_OP_DYNAMIC)
7811 env->cc_op = cc_op;