Include qemu-kvm.h unconditionally
[qemu-kvm/fedora.git] / target-i386 / translate.c
blob4b894fd70883156b43f0bcdc71380a80af8e1cfe
1 /*
2 * i386 translation
4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston MA 02110-1301 USA
20 #include <stdarg.h>
21 #include <stdlib.h>
22 #include <stdio.h>
23 #include <string.h>
24 #include <inttypes.h>
25 #include <signal.h>
26 #include <assert.h>
28 #include "cpu.h"
29 #include "exec-all.h"
30 #include "disas.h"
31 #include "tcg-op.h"
33 #include "helper.h"
34 #define GEN_HELPER 1
35 #include "helper.h"
37 #define PREFIX_REPZ 0x01
38 #define PREFIX_REPNZ 0x02
39 #define PREFIX_LOCK 0x04
40 #define PREFIX_DATA 0x08
41 #define PREFIX_ADR 0x10
43 #ifdef TARGET_X86_64
44 #define X86_64_ONLY(x) x
45 #define X86_64_DEF(x...) x
46 #define CODE64(s) ((s)->code64)
47 #define REX_X(s) ((s)->rex_x)
48 #define REX_B(s) ((s)->rex_b)
49 /* XXX: gcc generates push/pop in some opcodes, so we cannot use them */
50 #if 1
51 #define BUGGY_64(x) NULL
52 #endif
53 #else
54 #define X86_64_ONLY(x) NULL
55 #define X86_64_DEF(x...)
56 #define CODE64(s) 0
57 #define REX_X(s) 0
58 #define REX_B(s) 0
59 #endif
61 //#define MACRO_TEST 1
63 /* global register indexes */
64 static TCGv_ptr cpu_env;
65 static TCGv cpu_A0, cpu_cc_src, cpu_cc_dst, cpu_cc_tmp;
66 static TCGv_i32 cpu_cc_op;
67 /* local temps */
68 static TCGv cpu_T[2], cpu_T3;
69 /* local register indexes (only used inside old micro ops) */
70 static TCGv cpu_tmp0, cpu_tmp4;
71 static TCGv_ptr cpu_ptr0, cpu_ptr1;
72 static TCGv_i32 cpu_tmp2_i32, cpu_tmp3_i32;
73 static TCGv_i64 cpu_tmp1_i64;
74 static TCGv cpu_tmp5, cpu_tmp6;
76 #include "gen-icount.h"
78 #ifdef TARGET_X86_64
79 static int x86_64_hregs;
80 #endif
82 typedef struct DisasContext {
83 /* current insn context */
84 int override; /* -1 if no override */
85 int prefix;
86 int aflag, dflag;
87 target_ulong pc; /* pc = eip + cs_base */
88 int is_jmp; /* 1 = means jump (stop translation), 2 means CPU
89 static state change (stop translation) */
90 /* current block context */
91 target_ulong cs_base; /* base of CS segment */
92 int pe; /* protected mode */
93 int code32; /* 32 bit code segment */
94 #ifdef TARGET_X86_64
95 int lma; /* long mode active */
96 int code64; /* 64 bit code segment */
97 int rex_x, rex_b;
98 #endif
99 int ss32; /* 32 bit stack segment */
100 int cc_op; /* current CC operation */
101 int addseg; /* non zero if either DS/ES/SS have a non zero base */
102 int f_st; /* currently unused */
103 int vm86; /* vm86 mode */
104 int cpl;
105 int iopl;
106 int tf; /* TF cpu flag */
107 int singlestep_enabled; /* "hardware" single step enabled */
108 int jmp_opt; /* use direct block chaining for direct jumps */
109 int mem_index; /* select memory access functions */
110 uint64_t flags; /* all execution flags */
111 struct TranslationBlock *tb;
112 int popl_esp_hack; /* for correct popl with esp base handling */
113 int rip_offset; /* only used in x86_64, but left for simplicity */
114 int cpuid_features;
115 int cpuid_ext_features;
116 int cpuid_ext2_features;
117 int cpuid_ext3_features;
118 } DisasContext;
120 static void gen_eob(DisasContext *s);
121 static void gen_jmp(DisasContext *s, target_ulong eip);
122 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num);
124 /* i386 arith/logic operations */
125 enum {
126 OP_ADDL,
127 OP_ORL,
128 OP_ADCL,
129 OP_SBBL,
130 OP_ANDL,
131 OP_SUBL,
132 OP_XORL,
133 OP_CMPL,
136 /* i386 shift ops */
137 enum {
138 OP_ROL,
139 OP_ROR,
140 OP_RCL,
141 OP_RCR,
142 OP_SHL,
143 OP_SHR,
144 OP_SHL1, /* undocumented */
145 OP_SAR = 7,
148 enum {
149 JCC_O,
150 JCC_B,
151 JCC_Z,
152 JCC_BE,
153 JCC_S,
154 JCC_P,
155 JCC_L,
156 JCC_LE,
159 /* operand size */
160 enum {
161 OT_BYTE = 0,
162 OT_WORD,
163 OT_LONG,
164 OT_QUAD,
167 enum {
168 /* I386 int registers */
169 OR_EAX, /* MUST be even numbered */
170 OR_ECX,
171 OR_EDX,
172 OR_EBX,
173 OR_ESP,
174 OR_EBP,
175 OR_ESI,
176 OR_EDI,
178 OR_TMP0 = 16, /* temporary operand register */
179 OR_TMP1,
180 OR_A0, /* temporary register used when doing address evaluation */
183 static inline void gen_op_movl_T0_0(void)
185 tcg_gen_movi_tl(cpu_T[0], 0);
188 static inline void gen_op_movl_T0_im(int32_t val)
190 tcg_gen_movi_tl(cpu_T[0], val);
193 static inline void gen_op_movl_T0_imu(uint32_t val)
195 tcg_gen_movi_tl(cpu_T[0], val);
198 static inline void gen_op_movl_T1_im(int32_t val)
200 tcg_gen_movi_tl(cpu_T[1], val);
203 static inline void gen_op_movl_T1_imu(uint32_t val)
205 tcg_gen_movi_tl(cpu_T[1], val);
208 static inline void gen_op_movl_A0_im(uint32_t val)
210 tcg_gen_movi_tl(cpu_A0, val);
213 #ifdef TARGET_X86_64
214 static inline void gen_op_movq_A0_im(int64_t val)
216 tcg_gen_movi_tl(cpu_A0, val);
218 #endif
220 static inline void gen_movtl_T0_im(target_ulong val)
222 tcg_gen_movi_tl(cpu_T[0], val);
225 static inline void gen_movtl_T1_im(target_ulong val)
227 tcg_gen_movi_tl(cpu_T[1], val);
230 static inline void gen_op_andl_T0_ffff(void)
232 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
235 static inline void gen_op_andl_T0_im(uint32_t val)
237 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], val);
240 static inline void gen_op_movl_T0_T1(void)
242 tcg_gen_mov_tl(cpu_T[0], cpu_T[1]);
245 static inline void gen_op_andl_A0_ffff(void)
247 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffff);
250 #ifdef TARGET_X86_64
252 #define NB_OP_SIZES 4
254 #else /* !TARGET_X86_64 */
256 #define NB_OP_SIZES 3
258 #endif /* !TARGET_X86_64 */
260 #if defined(WORDS_BIGENDIAN)
261 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
262 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
263 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
264 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
265 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
266 #else
267 #define REG_B_OFFSET 0
268 #define REG_H_OFFSET 1
269 #define REG_W_OFFSET 0
270 #define REG_L_OFFSET 0
271 #define REG_LH_OFFSET 4
272 #endif
274 static inline void gen_op_mov_reg_v(int ot, int reg, TCGv t0)
276 switch(ot) {
277 case OT_BYTE:
278 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
279 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_B_OFFSET);
280 } else {
281 tcg_gen_st8_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
283 break;
284 case OT_WORD:
285 tcg_gen_st16_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
286 break;
287 #ifdef TARGET_X86_64
288 case OT_LONG:
289 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
290 /* high part of register set to zero */
291 tcg_gen_movi_tl(cpu_tmp0, 0);
292 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
293 break;
294 default:
295 case OT_QUAD:
296 tcg_gen_st_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
297 break;
298 #else
299 default:
300 case OT_LONG:
301 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
302 break;
303 #endif
307 static inline void gen_op_mov_reg_T0(int ot, int reg)
309 gen_op_mov_reg_v(ot, reg, cpu_T[0]);
312 static inline void gen_op_mov_reg_T1(int ot, int reg)
314 gen_op_mov_reg_v(ot, reg, cpu_T[1]);
317 static inline void gen_op_mov_reg_A0(int size, int reg)
319 switch(size) {
320 case 0:
321 tcg_gen_st16_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
322 break;
323 #ifdef TARGET_X86_64
324 case 1:
325 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
326 /* high part of register set to zero */
327 tcg_gen_movi_tl(cpu_tmp0, 0);
328 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
329 break;
330 default:
331 case 2:
332 tcg_gen_st_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
333 break;
334 #else
335 default:
336 case 1:
337 tcg_gen_st32_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
338 break;
339 #endif
343 static inline void gen_op_mov_v_reg(int ot, TCGv t0, int reg)
345 switch(ot) {
346 case OT_BYTE:
347 if (reg < 4 X86_64_DEF( || reg >= 8 || x86_64_hregs)) {
348 goto std_case;
349 } else {
350 tcg_gen_ld8u_tl(t0, cpu_env, offsetof(CPUState, regs[reg - 4]) + REG_H_OFFSET);
352 break;
353 default:
354 std_case:
355 tcg_gen_ld_tl(t0, cpu_env, offsetof(CPUState, regs[reg]));
356 break;
360 static inline void gen_op_mov_TN_reg(int ot, int t_index, int reg)
362 gen_op_mov_v_reg(ot, cpu_T[t_index], reg);
365 static inline void gen_op_movl_A0_reg(int reg)
367 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
370 static inline void gen_op_addl_A0_im(int32_t val)
372 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
373 #ifdef TARGET_X86_64
374 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
375 #endif
378 #ifdef TARGET_X86_64
379 static inline void gen_op_addq_A0_im(int64_t val)
381 tcg_gen_addi_tl(cpu_A0, cpu_A0, val);
383 #endif
385 static void gen_add_A0_im(DisasContext *s, int val)
387 #ifdef TARGET_X86_64
388 if (CODE64(s))
389 gen_op_addq_A0_im(val);
390 else
391 #endif
392 gen_op_addl_A0_im(val);
395 static inline void gen_op_addl_T0_T1(void)
397 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
400 static inline void gen_op_jmp_T0(void)
402 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUState, eip));
405 static inline void gen_op_add_reg_im(int size, int reg, int32_t val)
407 switch(size) {
408 case 0:
409 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
410 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
411 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
412 break;
413 case 1:
414 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
415 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
416 #ifdef TARGET_X86_64
417 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
418 #endif
419 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
420 break;
421 #ifdef TARGET_X86_64
422 case 2:
423 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
424 tcg_gen_addi_tl(cpu_tmp0, cpu_tmp0, val);
425 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
426 break;
427 #endif
431 static inline void gen_op_add_reg_T0(int size, int reg)
433 switch(size) {
434 case 0:
435 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
436 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
437 tcg_gen_st16_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_W_OFFSET);
438 break;
439 case 1:
440 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
441 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
442 #ifdef TARGET_X86_64
443 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffff);
444 #endif
445 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
446 break;
447 #ifdef TARGET_X86_64
448 case 2:
449 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
450 tcg_gen_add_tl(cpu_tmp0, cpu_tmp0, cpu_T[0]);
451 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
452 break;
453 #endif
457 static inline void gen_op_set_cc_op(int32_t val)
459 tcg_gen_movi_i32(cpu_cc_op, val);
462 static inline void gen_op_addl_A0_reg_sN(int shift, int reg)
464 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
465 if (shift != 0)
466 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
467 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
468 #ifdef TARGET_X86_64
469 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
470 #endif
473 static inline void gen_op_movl_A0_seg(int reg)
475 tcg_gen_ld32u_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base) + REG_L_OFFSET);
478 static inline void gen_op_addl_A0_seg(int reg)
480 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
481 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
482 #ifdef TARGET_X86_64
483 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
484 #endif
487 #ifdef TARGET_X86_64
488 static inline void gen_op_movq_A0_seg(int reg)
490 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, segs[reg].base));
493 static inline void gen_op_addq_A0_seg(int reg)
495 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, segs[reg].base));
496 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
499 static inline void gen_op_movq_A0_reg(int reg)
501 tcg_gen_ld_tl(cpu_A0, cpu_env, offsetof(CPUState, regs[reg]));
504 static inline void gen_op_addq_A0_reg_sN(int shift, int reg)
506 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]));
507 if (shift != 0)
508 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, shift);
509 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
511 #endif
513 static inline void gen_op_lds_T0_A0(int idx)
515 int mem_index = (idx >> 2) - 1;
516 switch(idx & 3) {
517 case 0:
518 tcg_gen_qemu_ld8s(cpu_T[0], cpu_A0, mem_index);
519 break;
520 case 1:
521 tcg_gen_qemu_ld16s(cpu_T[0], cpu_A0, mem_index);
522 break;
523 default:
524 case 2:
525 tcg_gen_qemu_ld32s(cpu_T[0], cpu_A0, mem_index);
526 break;
530 static inline void gen_op_ld_v(int idx, TCGv t0, TCGv a0)
532 int mem_index = (idx >> 2) - 1;
533 switch(idx & 3) {
534 case 0:
535 tcg_gen_qemu_ld8u(t0, a0, mem_index);
536 break;
537 case 1:
538 tcg_gen_qemu_ld16u(t0, a0, mem_index);
539 break;
540 case 2:
541 tcg_gen_qemu_ld32u(t0, a0, mem_index);
542 break;
543 default:
544 case 3:
545 /* Should never happen on 32-bit targets. */
546 #ifdef TARGET_X86_64
547 tcg_gen_qemu_ld64(t0, a0, mem_index);
548 #endif
549 break;
553 /* XXX: always use ldu or lds */
554 static inline void gen_op_ld_T0_A0(int idx)
556 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
559 static inline void gen_op_ldu_T0_A0(int idx)
561 gen_op_ld_v(idx, cpu_T[0], cpu_A0);
564 static inline void gen_op_ld_T1_A0(int idx)
566 gen_op_ld_v(idx, cpu_T[1], cpu_A0);
569 static inline void gen_op_st_v(int idx, TCGv t0, TCGv a0)
571 int mem_index = (idx >> 2) - 1;
572 switch(idx & 3) {
573 case 0:
574 tcg_gen_qemu_st8(t0, a0, mem_index);
575 break;
576 case 1:
577 tcg_gen_qemu_st16(t0, a0, mem_index);
578 break;
579 case 2:
580 tcg_gen_qemu_st32(t0, a0, mem_index);
581 break;
582 default:
583 case 3:
584 /* Should never happen on 32-bit targets. */
585 #ifdef TARGET_X86_64
586 tcg_gen_qemu_st64(t0, a0, mem_index);
587 #endif
588 break;
592 static inline void gen_op_st_T0_A0(int idx)
594 gen_op_st_v(idx, cpu_T[0], cpu_A0);
597 static inline void gen_op_st_T1_A0(int idx)
599 gen_op_st_v(idx, cpu_T[1], cpu_A0);
602 static inline void gen_jmp_im(target_ulong pc)
604 tcg_gen_movi_tl(cpu_tmp0, pc);
605 tcg_gen_st_tl(cpu_tmp0, cpu_env, offsetof(CPUState, eip));
608 static inline void gen_string_movl_A0_ESI(DisasContext *s)
610 int override;
612 override = s->override;
613 #ifdef TARGET_X86_64
614 if (s->aflag == 2) {
615 if (override >= 0) {
616 gen_op_movq_A0_seg(override);
617 gen_op_addq_A0_reg_sN(0, R_ESI);
618 } else {
619 gen_op_movq_A0_reg(R_ESI);
621 } else
622 #endif
623 if (s->aflag) {
624 /* 32 bit address */
625 if (s->addseg && override < 0)
626 override = R_DS;
627 if (override >= 0) {
628 gen_op_movl_A0_seg(override);
629 gen_op_addl_A0_reg_sN(0, R_ESI);
630 } else {
631 gen_op_movl_A0_reg(R_ESI);
633 } else {
634 /* 16 address, always override */
635 if (override < 0)
636 override = R_DS;
637 gen_op_movl_A0_reg(R_ESI);
638 gen_op_andl_A0_ffff();
639 gen_op_addl_A0_seg(override);
643 static inline void gen_string_movl_A0_EDI(DisasContext *s)
645 #ifdef TARGET_X86_64
646 if (s->aflag == 2) {
647 gen_op_movq_A0_reg(R_EDI);
648 } else
649 #endif
650 if (s->aflag) {
651 if (s->addseg) {
652 gen_op_movl_A0_seg(R_ES);
653 gen_op_addl_A0_reg_sN(0, R_EDI);
654 } else {
655 gen_op_movl_A0_reg(R_EDI);
657 } else {
658 gen_op_movl_A0_reg(R_EDI);
659 gen_op_andl_A0_ffff();
660 gen_op_addl_A0_seg(R_ES);
664 static inline void gen_op_movl_T0_Dshift(int ot)
666 tcg_gen_ld32s_tl(cpu_T[0], cpu_env, offsetof(CPUState, df));
667 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], ot);
670 static void gen_extu(int ot, TCGv reg)
672 switch(ot) {
673 case OT_BYTE:
674 tcg_gen_ext8u_tl(reg, reg);
675 break;
676 case OT_WORD:
677 tcg_gen_ext16u_tl(reg, reg);
678 break;
679 case OT_LONG:
680 tcg_gen_ext32u_tl(reg, reg);
681 break;
682 default:
683 break;
687 static void gen_exts(int ot, TCGv reg)
689 switch(ot) {
690 case OT_BYTE:
691 tcg_gen_ext8s_tl(reg, reg);
692 break;
693 case OT_WORD:
694 tcg_gen_ext16s_tl(reg, reg);
695 break;
696 case OT_LONG:
697 tcg_gen_ext32s_tl(reg, reg);
698 break;
699 default:
700 break;
704 static inline void gen_op_jnz_ecx(int size, int label1)
706 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
707 gen_extu(size + 1, cpu_tmp0);
708 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, label1);
711 static inline void gen_op_jz_ecx(int size, int label1)
713 tcg_gen_ld_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[R_ECX]));
714 gen_extu(size + 1, cpu_tmp0);
715 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
718 static void gen_helper_in_func(int ot, TCGv v, TCGv_i32 n)
720 switch (ot) {
721 case 0: gen_helper_inb(v, n); break;
722 case 1: gen_helper_inw(v, n); break;
723 case 2: gen_helper_inl(v, n); break;
728 static void gen_helper_out_func(int ot, TCGv_i32 v, TCGv_i32 n)
730 switch (ot) {
731 case 0: gen_helper_outb(v, n); break;
732 case 1: gen_helper_outw(v, n); break;
733 case 2: gen_helper_outl(v, n); break;
738 static void gen_check_io(DisasContext *s, int ot, target_ulong cur_eip,
739 uint32_t svm_flags)
741 int state_saved;
742 target_ulong next_eip;
744 state_saved = 0;
745 if (s->pe && (s->cpl > s->iopl || s->vm86)) {
746 if (s->cc_op != CC_OP_DYNAMIC)
747 gen_op_set_cc_op(s->cc_op);
748 gen_jmp_im(cur_eip);
749 state_saved = 1;
750 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
751 switch (ot) {
752 case 0: gen_helper_check_iob(cpu_tmp2_i32); break;
753 case 1: gen_helper_check_iow(cpu_tmp2_i32); break;
754 case 2: gen_helper_check_iol(cpu_tmp2_i32); break;
757 if(s->flags & HF_SVMI_MASK) {
758 if (!state_saved) {
759 if (s->cc_op != CC_OP_DYNAMIC)
760 gen_op_set_cc_op(s->cc_op);
761 gen_jmp_im(cur_eip);
762 state_saved = 1;
764 svm_flags |= (1 << (4 + ot));
765 next_eip = s->pc - s->cs_base;
766 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
767 gen_helper_svm_check_io(cpu_tmp2_i32, tcg_const_i32(svm_flags),
768 tcg_const_i32(next_eip - cur_eip));
772 static inline void gen_movs(DisasContext *s, int ot)
774 gen_string_movl_A0_ESI(s);
775 gen_op_ld_T0_A0(ot + s->mem_index);
776 gen_string_movl_A0_EDI(s);
777 gen_op_st_T0_A0(ot + s->mem_index);
778 gen_op_movl_T0_Dshift(ot);
779 gen_op_add_reg_T0(s->aflag, R_ESI);
780 gen_op_add_reg_T0(s->aflag, R_EDI);
783 static inline void gen_update_cc_op(DisasContext *s)
785 if (s->cc_op != CC_OP_DYNAMIC) {
786 gen_op_set_cc_op(s->cc_op);
787 s->cc_op = CC_OP_DYNAMIC;
791 static void gen_op_update1_cc(void)
793 tcg_gen_discard_tl(cpu_cc_src);
794 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
797 static void gen_op_update2_cc(void)
799 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
800 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
803 static inline void gen_op_cmpl_T0_T1_cc(void)
805 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
806 tcg_gen_sub_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
809 static inline void gen_op_testl_T0_T1_cc(void)
811 tcg_gen_discard_tl(cpu_cc_src);
812 tcg_gen_and_tl(cpu_cc_dst, cpu_T[0], cpu_T[1]);
815 static void gen_op_update_neg_cc(void)
817 tcg_gen_neg_tl(cpu_cc_src, cpu_T[0]);
818 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
821 /* compute eflags.C to reg */
822 static void gen_compute_eflags_c(TCGv reg)
824 gen_helper_cc_compute_c(cpu_tmp2_i32, cpu_cc_op);
825 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
828 /* compute all eflags to cc_src */
829 static void gen_compute_eflags(TCGv reg)
831 gen_helper_cc_compute_all(cpu_tmp2_i32, cpu_cc_op);
832 tcg_gen_extu_i32_tl(reg, cpu_tmp2_i32);
835 static inline void gen_setcc_slow_T0(DisasContext *s, int jcc_op)
837 if (s->cc_op != CC_OP_DYNAMIC)
838 gen_op_set_cc_op(s->cc_op);
839 switch(jcc_op) {
840 case JCC_O:
841 gen_compute_eflags(cpu_T[0]);
842 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 11);
843 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
844 break;
845 case JCC_B:
846 gen_compute_eflags_c(cpu_T[0]);
847 break;
848 case JCC_Z:
849 gen_compute_eflags(cpu_T[0]);
850 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 6);
851 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
852 break;
853 case JCC_BE:
854 gen_compute_eflags(cpu_tmp0);
855 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 6);
856 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
857 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
858 break;
859 case JCC_S:
860 gen_compute_eflags(cpu_T[0]);
861 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 7);
862 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
863 break;
864 case JCC_P:
865 gen_compute_eflags(cpu_T[0]);
866 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 2);
867 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
868 break;
869 case JCC_L:
870 gen_compute_eflags(cpu_tmp0);
871 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
872 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 7); /* CC_S */
873 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
874 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
875 break;
876 default:
877 case JCC_LE:
878 gen_compute_eflags(cpu_tmp0);
879 tcg_gen_shri_tl(cpu_T[0], cpu_tmp0, 11); /* CC_O */
880 tcg_gen_shri_tl(cpu_tmp4, cpu_tmp0, 7); /* CC_S */
881 tcg_gen_shri_tl(cpu_tmp0, cpu_tmp0, 6); /* CC_Z */
882 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
883 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
884 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 1);
885 break;
889 /* return true if setcc_slow is not needed (WARNING: must be kept in
890 sync with gen_jcc1) */
891 static int is_fast_jcc_case(DisasContext *s, int b)
893 int jcc_op;
894 jcc_op = (b >> 1) & 7;
895 switch(s->cc_op) {
896 /* we optimize the cmp/jcc case */
897 case CC_OP_SUBB:
898 case CC_OP_SUBW:
899 case CC_OP_SUBL:
900 case CC_OP_SUBQ:
901 if (jcc_op == JCC_O || jcc_op == JCC_P)
902 goto slow_jcc;
903 break;
905 /* some jumps are easy to compute */
906 case CC_OP_ADDB:
907 case CC_OP_ADDW:
908 case CC_OP_ADDL:
909 case CC_OP_ADDQ:
911 case CC_OP_LOGICB:
912 case CC_OP_LOGICW:
913 case CC_OP_LOGICL:
914 case CC_OP_LOGICQ:
916 case CC_OP_INCB:
917 case CC_OP_INCW:
918 case CC_OP_INCL:
919 case CC_OP_INCQ:
921 case CC_OP_DECB:
922 case CC_OP_DECW:
923 case CC_OP_DECL:
924 case CC_OP_DECQ:
926 case CC_OP_SHLB:
927 case CC_OP_SHLW:
928 case CC_OP_SHLL:
929 case CC_OP_SHLQ:
930 if (jcc_op != JCC_Z && jcc_op != JCC_S)
931 goto slow_jcc;
932 break;
933 default:
934 slow_jcc:
935 return 0;
937 return 1;
940 /* generate a conditional jump to label 'l1' according to jump opcode
941 value 'b'. In the fast case, T0 is guaranted not to be used. */
942 static inline void gen_jcc1(DisasContext *s, int cc_op, int b, int l1)
944 int inv, jcc_op, size, cond;
945 TCGv t0;
947 inv = b & 1;
948 jcc_op = (b >> 1) & 7;
950 switch(cc_op) {
951 /* we optimize the cmp/jcc case */
952 case CC_OP_SUBB:
953 case CC_OP_SUBW:
954 case CC_OP_SUBL:
955 case CC_OP_SUBQ:
957 size = cc_op - CC_OP_SUBB;
958 switch(jcc_op) {
959 case JCC_Z:
960 fast_jcc_z:
961 switch(size) {
962 case 0:
963 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xff);
964 t0 = cpu_tmp0;
965 break;
966 case 1:
967 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffff);
968 t0 = cpu_tmp0;
969 break;
970 #ifdef TARGET_X86_64
971 case 2:
972 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0xffffffff);
973 t0 = cpu_tmp0;
974 break;
975 #endif
976 default:
977 t0 = cpu_cc_dst;
978 break;
980 tcg_gen_brcondi_tl(inv ? TCG_COND_NE : TCG_COND_EQ, t0, 0, l1);
981 break;
982 case JCC_S:
983 fast_jcc_s:
984 switch(size) {
985 case 0:
986 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80);
987 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
988 0, l1);
989 break;
990 case 1:
991 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x8000);
992 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
993 0, l1);
994 break;
995 #ifdef TARGET_X86_64
996 case 2:
997 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_dst, 0x80000000);
998 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE, cpu_tmp0,
999 0, l1);
1000 break;
1001 #endif
1002 default:
1003 tcg_gen_brcondi_tl(inv ? TCG_COND_GE : TCG_COND_LT, cpu_cc_dst,
1004 0, l1);
1005 break;
1007 break;
1009 case JCC_B:
1010 cond = inv ? TCG_COND_GEU : TCG_COND_LTU;
1011 goto fast_jcc_b;
1012 case JCC_BE:
1013 cond = inv ? TCG_COND_GTU : TCG_COND_LEU;
1014 fast_jcc_b:
1015 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1016 switch(size) {
1017 case 0:
1018 t0 = cpu_tmp0;
1019 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xff);
1020 tcg_gen_andi_tl(t0, cpu_cc_src, 0xff);
1021 break;
1022 case 1:
1023 t0 = cpu_tmp0;
1024 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffff);
1025 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffff);
1026 break;
1027 #ifdef TARGET_X86_64
1028 case 2:
1029 t0 = cpu_tmp0;
1030 tcg_gen_andi_tl(cpu_tmp4, cpu_tmp4, 0xffffffff);
1031 tcg_gen_andi_tl(t0, cpu_cc_src, 0xffffffff);
1032 break;
1033 #endif
1034 default:
1035 t0 = cpu_cc_src;
1036 break;
1038 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1039 break;
1041 case JCC_L:
1042 cond = inv ? TCG_COND_GE : TCG_COND_LT;
1043 goto fast_jcc_l;
1044 case JCC_LE:
1045 cond = inv ? TCG_COND_GT : TCG_COND_LE;
1046 fast_jcc_l:
1047 tcg_gen_add_tl(cpu_tmp4, cpu_cc_dst, cpu_cc_src);
1048 switch(size) {
1049 case 0:
1050 t0 = cpu_tmp0;
1051 tcg_gen_ext8s_tl(cpu_tmp4, cpu_tmp4);
1052 tcg_gen_ext8s_tl(t0, cpu_cc_src);
1053 break;
1054 case 1:
1055 t0 = cpu_tmp0;
1056 tcg_gen_ext16s_tl(cpu_tmp4, cpu_tmp4);
1057 tcg_gen_ext16s_tl(t0, cpu_cc_src);
1058 break;
1059 #ifdef TARGET_X86_64
1060 case 2:
1061 t0 = cpu_tmp0;
1062 tcg_gen_ext32s_tl(cpu_tmp4, cpu_tmp4);
1063 tcg_gen_ext32s_tl(t0, cpu_cc_src);
1064 break;
1065 #endif
1066 default:
1067 t0 = cpu_cc_src;
1068 break;
1070 tcg_gen_brcond_tl(cond, cpu_tmp4, t0, l1);
1071 break;
1073 default:
1074 goto slow_jcc;
1076 break;
1078 /* some jumps are easy to compute */
1079 case CC_OP_ADDB:
1080 case CC_OP_ADDW:
1081 case CC_OP_ADDL:
1082 case CC_OP_ADDQ:
1084 case CC_OP_ADCB:
1085 case CC_OP_ADCW:
1086 case CC_OP_ADCL:
1087 case CC_OP_ADCQ:
1089 case CC_OP_SBBB:
1090 case CC_OP_SBBW:
1091 case CC_OP_SBBL:
1092 case CC_OP_SBBQ:
1094 case CC_OP_LOGICB:
1095 case CC_OP_LOGICW:
1096 case CC_OP_LOGICL:
1097 case CC_OP_LOGICQ:
1099 case CC_OP_INCB:
1100 case CC_OP_INCW:
1101 case CC_OP_INCL:
1102 case CC_OP_INCQ:
1104 case CC_OP_DECB:
1105 case CC_OP_DECW:
1106 case CC_OP_DECL:
1107 case CC_OP_DECQ:
1109 case CC_OP_SHLB:
1110 case CC_OP_SHLW:
1111 case CC_OP_SHLL:
1112 case CC_OP_SHLQ:
1114 case CC_OP_SARB:
1115 case CC_OP_SARW:
1116 case CC_OP_SARL:
1117 case CC_OP_SARQ:
1118 switch(jcc_op) {
1119 case JCC_Z:
1120 size = (cc_op - CC_OP_ADDB) & 3;
1121 goto fast_jcc_z;
1122 case JCC_S:
1123 size = (cc_op - CC_OP_ADDB) & 3;
1124 goto fast_jcc_s;
1125 default:
1126 goto slow_jcc;
1128 break;
1129 default:
1130 slow_jcc:
1131 gen_setcc_slow_T0(s, jcc_op);
1132 tcg_gen_brcondi_tl(inv ? TCG_COND_EQ : TCG_COND_NE,
1133 cpu_T[0], 0, l1);
1134 break;
1138 /* XXX: does not work with gdbstub "ice" single step - not a
1139 serious problem */
1140 static int gen_jz_ecx_string(DisasContext *s, target_ulong next_eip)
1142 int l1, l2;
1144 l1 = gen_new_label();
1145 l2 = gen_new_label();
1146 gen_op_jnz_ecx(s->aflag, l1);
1147 gen_set_label(l2);
1148 gen_jmp_tb(s, next_eip, 1);
1149 gen_set_label(l1);
1150 return l2;
1153 static inline void gen_stos(DisasContext *s, int ot)
1155 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1156 gen_string_movl_A0_EDI(s);
1157 gen_op_st_T0_A0(ot + s->mem_index);
1158 gen_op_movl_T0_Dshift(ot);
1159 gen_op_add_reg_T0(s->aflag, R_EDI);
1162 static inline void gen_lods(DisasContext *s, int ot)
1164 gen_string_movl_A0_ESI(s);
1165 gen_op_ld_T0_A0(ot + s->mem_index);
1166 gen_op_mov_reg_T0(ot, R_EAX);
1167 gen_op_movl_T0_Dshift(ot);
1168 gen_op_add_reg_T0(s->aflag, R_ESI);
1171 static inline void gen_scas(DisasContext *s, int ot)
1173 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
1174 gen_string_movl_A0_EDI(s);
1175 gen_op_ld_T1_A0(ot + s->mem_index);
1176 gen_op_cmpl_T0_T1_cc();
1177 gen_op_movl_T0_Dshift(ot);
1178 gen_op_add_reg_T0(s->aflag, R_EDI);
1181 static inline void gen_cmps(DisasContext *s, int ot)
1183 gen_string_movl_A0_ESI(s);
1184 gen_op_ld_T0_A0(ot + s->mem_index);
1185 gen_string_movl_A0_EDI(s);
1186 gen_op_ld_T1_A0(ot + s->mem_index);
1187 gen_op_cmpl_T0_T1_cc();
1188 gen_op_movl_T0_Dshift(ot);
1189 gen_op_add_reg_T0(s->aflag, R_ESI);
1190 gen_op_add_reg_T0(s->aflag, R_EDI);
1193 static inline void gen_ins(DisasContext *s, int ot)
1195 if (use_icount)
1196 gen_io_start();
1197 gen_string_movl_A0_EDI(s);
1198 /* Note: we must do this dummy write first to be restartable in
1199 case of page fault. */
1200 gen_op_movl_T0_0();
1201 gen_op_st_T0_A0(ot + s->mem_index);
1202 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1203 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1204 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1205 gen_helper_in_func(ot, cpu_T[0], cpu_tmp2_i32);
1206 gen_op_st_T0_A0(ot + s->mem_index);
1207 gen_op_movl_T0_Dshift(ot);
1208 gen_op_add_reg_T0(s->aflag, R_EDI);
1209 if (use_icount)
1210 gen_io_end();
1213 static inline void gen_outs(DisasContext *s, int ot)
1215 if (use_icount)
1216 gen_io_start();
1217 gen_string_movl_A0_ESI(s);
1218 gen_op_ld_T0_A0(ot + s->mem_index);
1220 gen_op_mov_TN_reg(OT_WORD, 1, R_EDX);
1221 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[1]);
1222 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
1223 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[0]);
1224 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
1226 gen_op_movl_T0_Dshift(ot);
1227 gen_op_add_reg_T0(s->aflag, R_ESI);
1228 if (use_icount)
1229 gen_io_end();
1232 /* same method as Valgrind : we generate jumps to current or next
1233 instruction */
1234 #define GEN_REPZ(op) \
1235 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1236 target_ulong cur_eip, target_ulong next_eip) \
1238 int l2;\
1239 gen_update_cc_op(s); \
1240 l2 = gen_jz_ecx_string(s, next_eip); \
1241 gen_ ## op(s, ot); \
1242 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1243 /* a loop would cause two single step exceptions if ECX = 1 \
1244 before rep string_insn */ \
1245 if (!s->jmp_opt) \
1246 gen_op_jz_ecx(s->aflag, l2); \
1247 gen_jmp(s, cur_eip); \
1250 #define GEN_REPZ2(op) \
1251 static inline void gen_repz_ ## op(DisasContext *s, int ot, \
1252 target_ulong cur_eip, \
1253 target_ulong next_eip, \
1254 int nz) \
1256 int l2;\
1257 gen_update_cc_op(s); \
1258 l2 = gen_jz_ecx_string(s, next_eip); \
1259 gen_ ## op(s, ot); \
1260 gen_op_add_reg_im(s->aflag, R_ECX, -1); \
1261 gen_op_set_cc_op(CC_OP_SUBB + ot); \
1262 gen_jcc1(s, CC_OP_SUBB + ot, (JCC_Z << 1) | (nz ^ 1), l2); \
1263 if (!s->jmp_opt) \
1264 gen_op_jz_ecx(s->aflag, l2); \
1265 gen_jmp(s, cur_eip); \
1268 GEN_REPZ(movs)
1269 GEN_REPZ(stos)
1270 GEN_REPZ(lods)
1271 GEN_REPZ(ins)
1272 GEN_REPZ(outs)
1273 GEN_REPZ2(scas)
1274 GEN_REPZ2(cmps)
1276 static void gen_helper_fp_arith_ST0_FT0(int op)
1278 switch (op) {
1279 case 0: gen_helper_fadd_ST0_FT0(); break;
1280 case 1: gen_helper_fmul_ST0_FT0(); break;
1281 case 2: gen_helper_fcom_ST0_FT0(); break;
1282 case 3: gen_helper_fcom_ST0_FT0(); break;
1283 case 4: gen_helper_fsub_ST0_FT0(); break;
1284 case 5: gen_helper_fsubr_ST0_FT0(); break;
1285 case 6: gen_helper_fdiv_ST0_FT0(); break;
1286 case 7: gen_helper_fdivr_ST0_FT0(); break;
1290 /* NOTE the exception in "r" op ordering */
1291 static void gen_helper_fp_arith_STN_ST0(int op, int opreg)
1293 TCGv_i32 tmp = tcg_const_i32(opreg);
1294 switch (op) {
1295 case 0: gen_helper_fadd_STN_ST0(tmp); break;
1296 case 1: gen_helper_fmul_STN_ST0(tmp); break;
1297 case 4: gen_helper_fsubr_STN_ST0(tmp); break;
1298 case 5: gen_helper_fsub_STN_ST0(tmp); break;
1299 case 6: gen_helper_fdivr_STN_ST0(tmp); break;
1300 case 7: gen_helper_fdiv_STN_ST0(tmp); break;
1304 /* if d == OR_TMP0, it means memory operand (address in A0) */
1305 static void gen_op(DisasContext *s1, int op, int ot, int d)
1307 if (d != OR_TMP0) {
1308 gen_op_mov_TN_reg(ot, 0, d);
1309 } else {
1310 gen_op_ld_T0_A0(ot + s1->mem_index);
1312 switch(op) {
1313 case OP_ADCL:
1314 if (s1->cc_op != CC_OP_DYNAMIC)
1315 gen_op_set_cc_op(s1->cc_op);
1316 gen_compute_eflags_c(cpu_tmp4);
1317 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1318 tcg_gen_add_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1319 if (d != OR_TMP0)
1320 gen_op_mov_reg_T0(ot, d);
1321 else
1322 gen_op_st_T0_A0(ot + s1->mem_index);
1323 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1324 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1325 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1326 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1327 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_ADDB + ot);
1328 s1->cc_op = CC_OP_DYNAMIC;
1329 break;
1330 case OP_SBBL:
1331 if (s1->cc_op != CC_OP_DYNAMIC)
1332 gen_op_set_cc_op(s1->cc_op);
1333 gen_compute_eflags_c(cpu_tmp4);
1334 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1335 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_tmp4);
1336 if (d != OR_TMP0)
1337 gen_op_mov_reg_T0(ot, d);
1338 else
1339 gen_op_st_T0_A0(ot + s1->mem_index);
1340 tcg_gen_mov_tl(cpu_cc_src, cpu_T[1]);
1341 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1342 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp4);
1343 tcg_gen_shli_i32(cpu_tmp2_i32, cpu_tmp2_i32, 2);
1344 tcg_gen_addi_i32(cpu_cc_op, cpu_tmp2_i32, CC_OP_SUBB + ot);
1345 s1->cc_op = CC_OP_DYNAMIC;
1346 break;
1347 case OP_ADDL:
1348 gen_op_addl_T0_T1();
1349 if (d != OR_TMP0)
1350 gen_op_mov_reg_T0(ot, d);
1351 else
1352 gen_op_st_T0_A0(ot + s1->mem_index);
1353 gen_op_update2_cc();
1354 s1->cc_op = CC_OP_ADDB + ot;
1355 break;
1356 case OP_SUBL:
1357 tcg_gen_sub_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1358 if (d != OR_TMP0)
1359 gen_op_mov_reg_T0(ot, d);
1360 else
1361 gen_op_st_T0_A0(ot + s1->mem_index);
1362 gen_op_update2_cc();
1363 s1->cc_op = CC_OP_SUBB + ot;
1364 break;
1365 default:
1366 case OP_ANDL:
1367 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1368 if (d != OR_TMP0)
1369 gen_op_mov_reg_T0(ot, d);
1370 else
1371 gen_op_st_T0_A0(ot + s1->mem_index);
1372 gen_op_update1_cc();
1373 s1->cc_op = CC_OP_LOGICB + ot;
1374 break;
1375 case OP_ORL:
1376 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1377 if (d != OR_TMP0)
1378 gen_op_mov_reg_T0(ot, d);
1379 else
1380 gen_op_st_T0_A0(ot + s1->mem_index);
1381 gen_op_update1_cc();
1382 s1->cc_op = CC_OP_LOGICB + ot;
1383 break;
1384 case OP_XORL:
1385 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1386 if (d != OR_TMP0)
1387 gen_op_mov_reg_T0(ot, d);
1388 else
1389 gen_op_st_T0_A0(ot + s1->mem_index);
1390 gen_op_update1_cc();
1391 s1->cc_op = CC_OP_LOGICB + ot;
1392 break;
1393 case OP_CMPL:
1394 gen_op_cmpl_T0_T1_cc();
1395 s1->cc_op = CC_OP_SUBB + ot;
1396 break;
1400 /* if d == OR_TMP0, it means memory operand (address in A0) */
1401 static void gen_inc(DisasContext *s1, int ot, int d, int c)
1403 if (d != OR_TMP0)
1404 gen_op_mov_TN_reg(ot, 0, d);
1405 else
1406 gen_op_ld_T0_A0(ot + s1->mem_index);
1407 if (s1->cc_op != CC_OP_DYNAMIC)
1408 gen_op_set_cc_op(s1->cc_op);
1409 if (c > 0) {
1410 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], 1);
1411 s1->cc_op = CC_OP_INCB + ot;
1412 } else {
1413 tcg_gen_addi_tl(cpu_T[0], cpu_T[0], -1);
1414 s1->cc_op = CC_OP_DECB + ot;
1416 if (d != OR_TMP0)
1417 gen_op_mov_reg_T0(ot, d);
1418 else
1419 gen_op_st_T0_A0(ot + s1->mem_index);
1420 gen_compute_eflags_c(cpu_cc_src);
1421 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1424 static void gen_shift_rm_T1(DisasContext *s, int ot, int op1,
1425 int is_right, int is_arith)
1427 target_ulong mask;
1428 int shift_label;
1429 TCGv t0, t1;
1431 if (ot == OT_QUAD)
1432 mask = 0x3f;
1433 else
1434 mask = 0x1f;
1436 /* load */
1437 if (op1 == OR_TMP0)
1438 gen_op_ld_T0_A0(ot + s->mem_index);
1439 else
1440 gen_op_mov_TN_reg(ot, 0, op1);
1442 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], mask);
1444 tcg_gen_addi_tl(cpu_tmp5, cpu_T[1], -1);
1446 if (is_right) {
1447 if (is_arith) {
1448 gen_exts(ot, cpu_T[0]);
1449 tcg_gen_sar_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1450 tcg_gen_sar_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1451 } else {
1452 gen_extu(ot, cpu_T[0]);
1453 tcg_gen_shr_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1454 tcg_gen_shr_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1456 } else {
1457 tcg_gen_shl_tl(cpu_T3, cpu_T[0], cpu_tmp5);
1458 tcg_gen_shl_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
1461 /* store */
1462 if (op1 == OR_TMP0)
1463 gen_op_st_T0_A0(ot + s->mem_index);
1464 else
1465 gen_op_mov_reg_T0(ot, op1);
1467 /* update eflags if non zero shift */
1468 if (s->cc_op != CC_OP_DYNAMIC)
1469 gen_op_set_cc_op(s->cc_op);
1471 /* XXX: inefficient */
1472 t0 = tcg_temp_local_new();
1473 t1 = tcg_temp_local_new();
1475 tcg_gen_mov_tl(t0, cpu_T[0]);
1476 tcg_gen_mov_tl(t1, cpu_T3);
1478 shift_label = gen_new_label();
1479 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_T[1], 0, shift_label);
1481 tcg_gen_mov_tl(cpu_cc_src, t1);
1482 tcg_gen_mov_tl(cpu_cc_dst, t0);
1483 if (is_right)
1484 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1485 else
1486 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1488 gen_set_label(shift_label);
1489 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1491 tcg_temp_free(t0);
1492 tcg_temp_free(t1);
1495 static void gen_shift_rm_im(DisasContext *s, int ot, int op1, int op2,
1496 int is_right, int is_arith)
1498 int mask;
1500 if (ot == OT_QUAD)
1501 mask = 0x3f;
1502 else
1503 mask = 0x1f;
1505 /* load */
1506 if (op1 == OR_TMP0)
1507 gen_op_ld_T0_A0(ot + s->mem_index);
1508 else
1509 gen_op_mov_TN_reg(ot, 0, op1);
1511 op2 &= mask;
1512 if (op2 != 0) {
1513 if (is_right) {
1514 if (is_arith) {
1515 gen_exts(ot, cpu_T[0]);
1516 tcg_gen_sari_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1517 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], op2);
1518 } else {
1519 gen_extu(ot, cpu_T[0]);
1520 tcg_gen_shri_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1521 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], op2);
1523 } else {
1524 tcg_gen_shli_tl(cpu_tmp4, cpu_T[0], op2 - 1);
1525 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], op2);
1529 /* store */
1530 if (op1 == OR_TMP0)
1531 gen_op_st_T0_A0(ot + s->mem_index);
1532 else
1533 gen_op_mov_reg_T0(ot, op1);
1535 /* update eflags if non zero shift */
1536 if (op2 != 0) {
1537 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
1538 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
1539 if (is_right)
1540 s->cc_op = CC_OP_SARB + ot;
1541 else
1542 s->cc_op = CC_OP_SHLB + ot;
1546 static inline void tcg_gen_lshift(TCGv ret, TCGv arg1, target_long arg2)
1548 if (arg2 >= 0)
1549 tcg_gen_shli_tl(ret, arg1, arg2);
1550 else
1551 tcg_gen_shri_tl(ret, arg1, -arg2);
1554 /* XXX: add faster immediate case */
1555 static void gen_rot_rm_T1(DisasContext *s, int ot, int op1,
1556 int is_right)
1558 target_ulong mask;
1559 int label1, label2, data_bits;
1560 TCGv t0, t1, t2, a0;
1562 /* XXX: inefficient, but we must use local temps */
1563 t0 = tcg_temp_local_new();
1564 t1 = tcg_temp_local_new();
1565 t2 = tcg_temp_local_new();
1566 a0 = tcg_temp_local_new();
1568 if (ot == OT_QUAD)
1569 mask = 0x3f;
1570 else
1571 mask = 0x1f;
1573 /* load */
1574 if (op1 == OR_TMP0) {
1575 tcg_gen_mov_tl(a0, cpu_A0);
1576 gen_op_ld_v(ot + s->mem_index, t0, a0);
1577 } else {
1578 gen_op_mov_v_reg(ot, t0, op1);
1581 tcg_gen_mov_tl(t1, cpu_T[1]);
1583 tcg_gen_andi_tl(t1, t1, mask);
1585 /* Must test zero case to avoid using undefined behaviour in TCG
1586 shifts. */
1587 label1 = gen_new_label();
1588 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label1);
1590 if (ot <= OT_WORD)
1591 tcg_gen_andi_tl(cpu_tmp0, t1, (1 << (3 + ot)) - 1);
1592 else
1593 tcg_gen_mov_tl(cpu_tmp0, t1);
1595 gen_extu(ot, t0);
1596 tcg_gen_mov_tl(t2, t0);
1598 data_bits = 8 << ot;
1599 /* XXX: rely on behaviour of shifts when operand 2 overflows (XXX:
1600 fix TCG definition) */
1601 if (is_right) {
1602 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp0);
1603 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1604 tcg_gen_shl_tl(t0, t0, cpu_tmp0);
1605 } else {
1606 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp0);
1607 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(data_bits), cpu_tmp0);
1608 tcg_gen_shr_tl(t0, t0, cpu_tmp0);
1610 tcg_gen_or_tl(t0, t0, cpu_tmp4);
1612 gen_set_label(label1);
1613 /* store */
1614 if (op1 == OR_TMP0) {
1615 gen_op_st_v(ot + s->mem_index, t0, a0);
1616 } else {
1617 gen_op_mov_reg_v(ot, op1, t0);
1620 /* update eflags */
1621 if (s->cc_op != CC_OP_DYNAMIC)
1622 gen_op_set_cc_op(s->cc_op);
1624 label2 = gen_new_label();
1625 tcg_gen_brcondi_tl(TCG_COND_EQ, t1, 0, label2);
1627 gen_compute_eflags(cpu_cc_src);
1628 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~(CC_O | CC_C));
1629 tcg_gen_xor_tl(cpu_tmp0, t2, t0);
1630 tcg_gen_lshift(cpu_tmp0, cpu_tmp0, 11 - (data_bits - 1));
1631 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_O);
1632 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_tmp0);
1633 if (is_right) {
1634 tcg_gen_shri_tl(t0, t0, data_bits - 1);
1636 tcg_gen_andi_tl(t0, t0, CC_C);
1637 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t0);
1639 tcg_gen_discard_tl(cpu_cc_dst);
1640 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1642 gen_set_label(label2);
1643 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1645 tcg_temp_free(t0);
1646 tcg_temp_free(t1);
1647 tcg_temp_free(t2);
1648 tcg_temp_free(a0);
1651 /* XXX: add faster immediate = 1 case */
1652 static void gen_rotc_rm_T1(DisasContext *s, int ot, int op1,
1653 int is_right)
1655 int label1;
1657 if (s->cc_op != CC_OP_DYNAMIC)
1658 gen_op_set_cc_op(s->cc_op);
1660 /* load */
1661 if (op1 == OR_TMP0)
1662 gen_op_ld_T0_A0(ot + s->mem_index);
1663 else
1664 gen_op_mov_TN_reg(ot, 0, op1);
1666 if (is_right) {
1667 switch (ot) {
1668 case 0: gen_helper_rcrb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1669 case 1: gen_helper_rcrw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1670 case 2: gen_helper_rcrl(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1671 #ifdef TARGET_X86_64
1672 case 3: gen_helper_rcrq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1673 #endif
1675 } else {
1676 switch (ot) {
1677 case 0: gen_helper_rclb(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1678 case 1: gen_helper_rclw(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1679 case 2: gen_helper_rcll(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1680 #ifdef TARGET_X86_64
1681 case 3: gen_helper_rclq(cpu_T[0], cpu_T[0], cpu_T[1]); break;
1682 #endif
1685 /* store */
1686 if (op1 == OR_TMP0)
1687 gen_op_st_T0_A0(ot + s->mem_index);
1688 else
1689 gen_op_mov_reg_T0(ot, op1);
1691 /* update eflags */
1692 label1 = gen_new_label();
1693 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_cc_tmp, -1, label1);
1695 tcg_gen_mov_tl(cpu_cc_src, cpu_cc_tmp);
1696 tcg_gen_discard_tl(cpu_cc_dst);
1697 tcg_gen_movi_i32(cpu_cc_op, CC_OP_EFLAGS);
1699 gen_set_label(label1);
1700 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1703 /* XXX: add faster immediate case */
1704 static void gen_shiftd_rm_T1_T3(DisasContext *s, int ot, int op1,
1705 int is_right)
1707 int label1, label2, data_bits;
1708 target_ulong mask;
1709 TCGv t0, t1, t2, a0;
1711 t0 = tcg_temp_local_new();
1712 t1 = tcg_temp_local_new();
1713 t2 = tcg_temp_local_new();
1714 a0 = tcg_temp_local_new();
1716 if (ot == OT_QUAD)
1717 mask = 0x3f;
1718 else
1719 mask = 0x1f;
1721 /* load */
1722 if (op1 == OR_TMP0) {
1723 tcg_gen_mov_tl(a0, cpu_A0);
1724 gen_op_ld_v(ot + s->mem_index, t0, a0);
1725 } else {
1726 gen_op_mov_v_reg(ot, t0, op1);
1729 tcg_gen_andi_tl(cpu_T3, cpu_T3, mask);
1731 tcg_gen_mov_tl(t1, cpu_T[1]);
1732 tcg_gen_mov_tl(t2, cpu_T3);
1734 /* Must test zero case to avoid using undefined behaviour in TCG
1735 shifts. */
1736 label1 = gen_new_label();
1737 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
1739 tcg_gen_addi_tl(cpu_tmp5, t2, -1);
1740 if (ot == OT_WORD) {
1741 /* Note: we implement the Intel behaviour for shift count > 16 */
1742 if (is_right) {
1743 tcg_gen_andi_tl(t0, t0, 0xffff);
1744 tcg_gen_shli_tl(cpu_tmp0, t1, 16);
1745 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1746 tcg_gen_ext32u_tl(t0, t0);
1748 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1750 /* only needed if count > 16, but a test would complicate */
1751 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1752 tcg_gen_shl_tl(cpu_tmp0, t0, cpu_tmp5);
1754 tcg_gen_shr_tl(t0, t0, t2);
1756 tcg_gen_or_tl(t0, t0, cpu_tmp0);
1757 } else {
1758 /* XXX: not optimal */
1759 tcg_gen_andi_tl(t0, t0, 0xffff);
1760 tcg_gen_shli_tl(t1, t1, 16);
1761 tcg_gen_or_tl(t1, t1, t0);
1762 tcg_gen_ext32u_tl(t1, t1);
1764 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1765 tcg_gen_sub_tl(cpu_tmp0, tcg_const_tl(32), cpu_tmp5);
1766 tcg_gen_shr_tl(cpu_tmp6, t1, cpu_tmp0);
1767 tcg_gen_or_tl(cpu_tmp4, cpu_tmp4, cpu_tmp6);
1769 tcg_gen_shl_tl(t0, t0, t2);
1770 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(32), t2);
1771 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1772 tcg_gen_or_tl(t0, t0, t1);
1774 } else {
1775 data_bits = 8 << ot;
1776 if (is_right) {
1777 if (ot == OT_LONG)
1778 tcg_gen_ext32u_tl(t0, t0);
1780 tcg_gen_shr_tl(cpu_tmp4, t0, cpu_tmp5);
1782 tcg_gen_shr_tl(t0, t0, t2);
1783 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1784 tcg_gen_shl_tl(t1, t1, cpu_tmp5);
1785 tcg_gen_or_tl(t0, t0, t1);
1787 } else {
1788 if (ot == OT_LONG)
1789 tcg_gen_ext32u_tl(t1, t1);
1791 tcg_gen_shl_tl(cpu_tmp4, t0, cpu_tmp5);
1793 tcg_gen_shl_tl(t0, t0, t2);
1794 tcg_gen_sub_tl(cpu_tmp5, tcg_const_tl(data_bits), t2);
1795 tcg_gen_shr_tl(t1, t1, cpu_tmp5);
1796 tcg_gen_or_tl(t0, t0, t1);
1799 tcg_gen_mov_tl(t1, cpu_tmp4);
1801 gen_set_label(label1);
1802 /* store */
1803 if (op1 == OR_TMP0) {
1804 gen_op_st_v(ot + s->mem_index, t0, a0);
1805 } else {
1806 gen_op_mov_reg_v(ot, op1, t0);
1809 /* update eflags */
1810 if (s->cc_op != CC_OP_DYNAMIC)
1811 gen_op_set_cc_op(s->cc_op);
1813 label2 = gen_new_label();
1814 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label2);
1816 tcg_gen_mov_tl(cpu_cc_src, t1);
1817 tcg_gen_mov_tl(cpu_cc_dst, t0);
1818 if (is_right) {
1819 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SARB + ot);
1820 } else {
1821 tcg_gen_movi_i32(cpu_cc_op, CC_OP_SHLB + ot);
1823 gen_set_label(label2);
1824 s->cc_op = CC_OP_DYNAMIC; /* cannot predict flags after */
1826 tcg_temp_free(t0);
1827 tcg_temp_free(t1);
1828 tcg_temp_free(t2);
1829 tcg_temp_free(a0);
1832 static void gen_shift(DisasContext *s1, int op, int ot, int d, int s)
1834 if (s != OR_TMP1)
1835 gen_op_mov_TN_reg(ot, 1, s);
1836 switch(op) {
1837 case OP_ROL:
1838 gen_rot_rm_T1(s1, ot, d, 0);
1839 break;
1840 case OP_ROR:
1841 gen_rot_rm_T1(s1, ot, d, 1);
1842 break;
1843 case OP_SHL:
1844 case OP_SHL1:
1845 gen_shift_rm_T1(s1, ot, d, 0, 0);
1846 break;
1847 case OP_SHR:
1848 gen_shift_rm_T1(s1, ot, d, 1, 0);
1849 break;
1850 case OP_SAR:
1851 gen_shift_rm_T1(s1, ot, d, 1, 1);
1852 break;
1853 case OP_RCL:
1854 gen_rotc_rm_T1(s1, ot, d, 0);
1855 break;
1856 case OP_RCR:
1857 gen_rotc_rm_T1(s1, ot, d, 1);
1858 break;
1862 static void gen_shifti(DisasContext *s1, int op, int ot, int d, int c)
1864 switch(op) {
1865 case OP_SHL:
1866 case OP_SHL1:
1867 gen_shift_rm_im(s1, ot, d, c, 0, 0);
1868 break;
1869 case OP_SHR:
1870 gen_shift_rm_im(s1, ot, d, c, 1, 0);
1871 break;
1872 case OP_SAR:
1873 gen_shift_rm_im(s1, ot, d, c, 1, 1);
1874 break;
1875 default:
1876 /* currently not optimized */
1877 gen_op_movl_T1_im(c);
1878 gen_shift(s1, op, ot, d, OR_TMP1);
1879 break;
1883 static void gen_lea_modrm(DisasContext *s, int modrm, int *reg_ptr, int *offset_ptr)
1885 target_long disp;
1886 int havesib;
1887 int base;
1888 int index;
1889 int scale;
1890 int opreg;
1891 int mod, rm, code, override, must_add_seg;
1893 override = s->override;
1894 must_add_seg = s->addseg;
1895 if (override >= 0)
1896 must_add_seg = 1;
1897 mod = (modrm >> 6) & 3;
1898 rm = modrm & 7;
1900 if (s->aflag) {
1902 havesib = 0;
1903 base = rm;
1904 index = 0;
1905 scale = 0;
1907 if (base == 4) {
1908 havesib = 1;
1909 code = ldub_code(s->pc++);
1910 scale = (code >> 6) & 3;
1911 index = ((code >> 3) & 7) | REX_X(s);
1912 base = (code & 7);
1914 base |= REX_B(s);
1916 switch (mod) {
1917 case 0:
1918 if ((base & 7) == 5) {
1919 base = -1;
1920 disp = (int32_t)ldl_code(s->pc);
1921 s->pc += 4;
1922 if (CODE64(s) && !havesib) {
1923 disp += s->pc + s->rip_offset;
1925 } else {
1926 disp = 0;
1928 break;
1929 case 1:
1930 disp = (int8_t)ldub_code(s->pc++);
1931 break;
1932 default:
1933 case 2:
1934 disp = ldl_code(s->pc);
1935 s->pc += 4;
1936 break;
1939 if (base >= 0) {
1940 /* for correct popl handling with esp */
1941 if (base == 4 && s->popl_esp_hack)
1942 disp += s->popl_esp_hack;
1943 #ifdef TARGET_X86_64
1944 if (s->aflag == 2) {
1945 gen_op_movq_A0_reg(base);
1946 if (disp != 0) {
1947 gen_op_addq_A0_im(disp);
1949 } else
1950 #endif
1952 gen_op_movl_A0_reg(base);
1953 if (disp != 0)
1954 gen_op_addl_A0_im(disp);
1956 } else {
1957 #ifdef TARGET_X86_64
1958 if (s->aflag == 2) {
1959 gen_op_movq_A0_im(disp);
1960 } else
1961 #endif
1963 gen_op_movl_A0_im(disp);
1966 /* XXX: index == 4 is always invalid */
1967 if (havesib && (index != 4 || scale != 0)) {
1968 #ifdef TARGET_X86_64
1969 if (s->aflag == 2) {
1970 gen_op_addq_A0_reg_sN(scale, index);
1971 } else
1972 #endif
1974 gen_op_addl_A0_reg_sN(scale, index);
1977 if (must_add_seg) {
1978 if (override < 0) {
1979 if (base == R_EBP || base == R_ESP)
1980 override = R_SS;
1981 else
1982 override = R_DS;
1984 #ifdef TARGET_X86_64
1985 if (s->aflag == 2) {
1986 gen_op_addq_A0_seg(override);
1987 } else
1988 #endif
1990 gen_op_addl_A0_seg(override);
1993 } else {
1994 switch (mod) {
1995 case 0:
1996 if (rm == 6) {
1997 disp = lduw_code(s->pc);
1998 s->pc += 2;
1999 gen_op_movl_A0_im(disp);
2000 rm = 0; /* avoid SS override */
2001 goto no_rm;
2002 } else {
2003 disp = 0;
2005 break;
2006 case 1:
2007 disp = (int8_t)ldub_code(s->pc++);
2008 break;
2009 default:
2010 case 2:
2011 disp = lduw_code(s->pc);
2012 s->pc += 2;
2013 break;
2015 switch(rm) {
2016 case 0:
2017 gen_op_movl_A0_reg(R_EBX);
2018 gen_op_addl_A0_reg_sN(0, R_ESI);
2019 break;
2020 case 1:
2021 gen_op_movl_A0_reg(R_EBX);
2022 gen_op_addl_A0_reg_sN(0, R_EDI);
2023 break;
2024 case 2:
2025 gen_op_movl_A0_reg(R_EBP);
2026 gen_op_addl_A0_reg_sN(0, R_ESI);
2027 break;
2028 case 3:
2029 gen_op_movl_A0_reg(R_EBP);
2030 gen_op_addl_A0_reg_sN(0, R_EDI);
2031 break;
2032 case 4:
2033 gen_op_movl_A0_reg(R_ESI);
2034 break;
2035 case 5:
2036 gen_op_movl_A0_reg(R_EDI);
2037 break;
2038 case 6:
2039 gen_op_movl_A0_reg(R_EBP);
2040 break;
2041 default:
2042 case 7:
2043 gen_op_movl_A0_reg(R_EBX);
2044 break;
2046 if (disp != 0)
2047 gen_op_addl_A0_im(disp);
2048 gen_op_andl_A0_ffff();
2049 no_rm:
2050 if (must_add_seg) {
2051 if (override < 0) {
2052 if (rm == 2 || rm == 3 || rm == 6)
2053 override = R_SS;
2054 else
2055 override = R_DS;
2057 gen_op_addl_A0_seg(override);
2061 opreg = OR_A0;
2062 disp = 0;
2063 *reg_ptr = opreg;
2064 *offset_ptr = disp;
2067 static void gen_nop_modrm(DisasContext *s, int modrm)
2069 int mod, rm, base, code;
2071 mod = (modrm >> 6) & 3;
2072 if (mod == 3)
2073 return;
2074 rm = modrm & 7;
2076 if (s->aflag) {
2078 base = rm;
2080 if (base == 4) {
2081 code = ldub_code(s->pc++);
2082 base = (code & 7);
2085 switch (mod) {
2086 case 0:
2087 if (base == 5) {
2088 s->pc += 4;
2090 break;
2091 case 1:
2092 s->pc++;
2093 break;
2094 default:
2095 case 2:
2096 s->pc += 4;
2097 break;
2099 } else {
2100 switch (mod) {
2101 case 0:
2102 if (rm == 6) {
2103 s->pc += 2;
2105 break;
2106 case 1:
2107 s->pc++;
2108 break;
2109 default:
2110 case 2:
2111 s->pc += 2;
2112 break;
2117 /* used for LEA and MOV AX, mem */
2118 static void gen_add_A0_ds_seg(DisasContext *s)
2120 int override, must_add_seg;
2121 must_add_seg = s->addseg;
2122 override = R_DS;
2123 if (s->override >= 0) {
2124 override = s->override;
2125 must_add_seg = 1;
2126 } else {
2127 override = R_DS;
2129 if (must_add_seg) {
2130 #ifdef TARGET_X86_64
2131 if (CODE64(s)) {
2132 gen_op_addq_A0_seg(override);
2133 } else
2134 #endif
2136 gen_op_addl_A0_seg(override);
2141 /* generate modrm memory load or store of 'reg'. TMP0 is used if reg ==
2142 OR_TMP0 */
2143 static void gen_ldst_modrm(DisasContext *s, int modrm, int ot, int reg, int is_store)
2145 int mod, rm, opreg, disp;
2147 mod = (modrm >> 6) & 3;
2148 rm = (modrm & 7) | REX_B(s);
2149 if (mod == 3) {
2150 if (is_store) {
2151 if (reg != OR_TMP0)
2152 gen_op_mov_TN_reg(ot, 0, reg);
2153 gen_op_mov_reg_T0(ot, rm);
2154 } else {
2155 gen_op_mov_TN_reg(ot, 0, rm);
2156 if (reg != OR_TMP0)
2157 gen_op_mov_reg_T0(ot, reg);
2159 } else {
2160 gen_lea_modrm(s, modrm, &opreg, &disp);
2161 if (is_store) {
2162 if (reg != OR_TMP0)
2163 gen_op_mov_TN_reg(ot, 0, reg);
2164 gen_op_st_T0_A0(ot + s->mem_index);
2165 } else {
2166 gen_op_ld_T0_A0(ot + s->mem_index);
2167 if (reg != OR_TMP0)
2168 gen_op_mov_reg_T0(ot, reg);
2173 static inline uint32_t insn_get(DisasContext *s, int ot)
2175 uint32_t ret;
2177 switch(ot) {
2178 case OT_BYTE:
2179 ret = ldub_code(s->pc);
2180 s->pc++;
2181 break;
2182 case OT_WORD:
2183 ret = lduw_code(s->pc);
2184 s->pc += 2;
2185 break;
2186 default:
2187 case OT_LONG:
2188 ret = ldl_code(s->pc);
2189 s->pc += 4;
2190 break;
2192 return ret;
2195 static inline int insn_const_size(unsigned int ot)
2197 if (ot <= OT_LONG)
2198 return 1 << ot;
2199 else
2200 return 4;
2203 static inline void gen_goto_tb(DisasContext *s, int tb_num, target_ulong eip)
2205 TranslationBlock *tb;
2206 target_ulong pc;
2208 pc = s->cs_base + eip;
2209 tb = s->tb;
2210 /* NOTE: we handle the case where the TB spans two pages here */
2211 if ((pc & TARGET_PAGE_MASK) == (tb->pc & TARGET_PAGE_MASK) ||
2212 (pc & TARGET_PAGE_MASK) == ((s->pc - 1) & TARGET_PAGE_MASK)) {
2213 /* jump to same page: we can use a direct jump */
2214 tcg_gen_goto_tb(tb_num);
2215 gen_jmp_im(eip);
2216 tcg_gen_exit_tb((long)tb + tb_num);
2217 } else {
2218 /* jump to another page: currently not optimized */
2219 gen_jmp_im(eip);
2220 gen_eob(s);
2224 static inline void gen_jcc(DisasContext *s, int b,
2225 target_ulong val, target_ulong next_eip)
2227 int l1, l2, cc_op;
2229 cc_op = s->cc_op;
2230 if (s->cc_op != CC_OP_DYNAMIC) {
2231 gen_op_set_cc_op(s->cc_op);
2232 s->cc_op = CC_OP_DYNAMIC;
2234 if (s->jmp_opt) {
2235 l1 = gen_new_label();
2236 gen_jcc1(s, cc_op, b, l1);
2238 gen_goto_tb(s, 0, next_eip);
2240 gen_set_label(l1);
2241 gen_goto_tb(s, 1, val);
2242 s->is_jmp = 3;
2243 } else {
2245 l1 = gen_new_label();
2246 l2 = gen_new_label();
2247 gen_jcc1(s, cc_op, b, l1);
2249 gen_jmp_im(next_eip);
2250 tcg_gen_br(l2);
2252 gen_set_label(l1);
2253 gen_jmp_im(val);
2254 gen_set_label(l2);
2255 gen_eob(s);
2259 static void gen_setcc(DisasContext *s, int b)
2261 int inv, jcc_op, l1;
2262 TCGv t0;
2264 if (is_fast_jcc_case(s, b)) {
2265 /* nominal case: we use a jump */
2266 /* XXX: make it faster by adding new instructions in TCG */
2267 t0 = tcg_temp_local_new();
2268 tcg_gen_movi_tl(t0, 0);
2269 l1 = gen_new_label();
2270 gen_jcc1(s, s->cc_op, b ^ 1, l1);
2271 tcg_gen_movi_tl(t0, 1);
2272 gen_set_label(l1);
2273 tcg_gen_mov_tl(cpu_T[0], t0);
2274 tcg_temp_free(t0);
2275 } else {
2276 /* slow case: it is more efficient not to generate a jump,
2277 although it is questionnable whether this optimization is
2278 worth to */
2279 inv = b & 1;
2280 jcc_op = (b >> 1) & 7;
2281 gen_setcc_slow_T0(s, jcc_op);
2282 if (inv) {
2283 tcg_gen_xori_tl(cpu_T[0], cpu_T[0], 1);
2288 static inline void gen_op_movl_T0_seg(int seg_reg)
2290 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
2291 offsetof(CPUX86State,segs[seg_reg].selector));
2294 static inline void gen_op_movl_seg_T0_vm(int seg_reg)
2296 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xffff);
2297 tcg_gen_st32_tl(cpu_T[0], cpu_env,
2298 offsetof(CPUX86State,segs[seg_reg].selector));
2299 tcg_gen_shli_tl(cpu_T[0], cpu_T[0], 4);
2300 tcg_gen_st_tl(cpu_T[0], cpu_env,
2301 offsetof(CPUX86State,segs[seg_reg].base));
2304 /* move T0 to seg_reg and compute if the CPU state may change. Never
2305 call this function with seg_reg == R_CS */
2306 static void gen_movl_seg_T0(DisasContext *s, int seg_reg, target_ulong cur_eip)
2308 if (s->pe && !s->vm86) {
2309 /* XXX: optimize by finding processor state dynamically */
2310 if (s->cc_op != CC_OP_DYNAMIC)
2311 gen_op_set_cc_op(s->cc_op);
2312 gen_jmp_im(cur_eip);
2313 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
2314 gen_helper_load_seg(tcg_const_i32(seg_reg), cpu_tmp2_i32);
2315 /* abort translation because the addseg value may change or
2316 because ss32 may change. For R_SS, translation must always
2317 stop as a special handling must be done to disable hardware
2318 interrupts for the next instruction */
2319 if (seg_reg == R_SS || (s->code32 && seg_reg < R_FS))
2320 s->is_jmp = 3;
2321 } else {
2322 gen_op_movl_seg_T0_vm(seg_reg);
2323 if (seg_reg == R_SS)
2324 s->is_jmp = 3;
2328 static inline int svm_is_rep(int prefixes)
2330 return ((prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) ? 8 : 0);
2333 static inline void
2334 gen_svm_check_intercept_param(DisasContext *s, target_ulong pc_start,
2335 uint32_t type, uint64_t param)
2337 /* no SVM activated; fast case */
2338 if (likely(!(s->flags & HF_SVMI_MASK)))
2339 return;
2340 if (s->cc_op != CC_OP_DYNAMIC)
2341 gen_op_set_cc_op(s->cc_op);
2342 gen_jmp_im(pc_start - s->cs_base);
2343 gen_helper_svm_check_intercept_param(tcg_const_i32(type),
2344 tcg_const_i64(param));
2347 static inline void
2348 gen_svm_check_intercept(DisasContext *s, target_ulong pc_start, uint64_t type)
2350 gen_svm_check_intercept_param(s, pc_start, type, 0);
2353 static inline void gen_stack_update(DisasContext *s, int addend)
2355 #ifdef TARGET_X86_64
2356 if (CODE64(s)) {
2357 gen_op_add_reg_im(2, R_ESP, addend);
2358 } else
2359 #endif
2360 if (s->ss32) {
2361 gen_op_add_reg_im(1, R_ESP, addend);
2362 } else {
2363 gen_op_add_reg_im(0, R_ESP, addend);
2367 /* generate a push. It depends on ss32, addseg and dflag */
2368 static void gen_push_T0(DisasContext *s)
2370 #ifdef TARGET_X86_64
2371 if (CODE64(s)) {
2372 gen_op_movq_A0_reg(R_ESP);
2373 if (s->dflag) {
2374 gen_op_addq_A0_im(-8);
2375 gen_op_st_T0_A0(OT_QUAD + s->mem_index);
2376 } else {
2377 gen_op_addq_A0_im(-2);
2378 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2380 gen_op_mov_reg_A0(2, R_ESP);
2381 } else
2382 #endif
2384 gen_op_movl_A0_reg(R_ESP);
2385 if (!s->dflag)
2386 gen_op_addl_A0_im(-2);
2387 else
2388 gen_op_addl_A0_im(-4);
2389 if (s->ss32) {
2390 if (s->addseg) {
2391 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2392 gen_op_addl_A0_seg(R_SS);
2394 } else {
2395 gen_op_andl_A0_ffff();
2396 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2397 gen_op_addl_A0_seg(R_SS);
2399 gen_op_st_T0_A0(s->dflag + 1 + s->mem_index);
2400 if (s->ss32 && !s->addseg)
2401 gen_op_mov_reg_A0(1, R_ESP);
2402 else
2403 gen_op_mov_reg_T1(s->ss32 + 1, R_ESP);
2407 /* generate a push. It depends on ss32, addseg and dflag */
2408 /* slower version for T1, only used for call Ev */
2409 static void gen_push_T1(DisasContext *s)
2411 #ifdef TARGET_X86_64
2412 if (CODE64(s)) {
2413 gen_op_movq_A0_reg(R_ESP);
2414 if (s->dflag) {
2415 gen_op_addq_A0_im(-8);
2416 gen_op_st_T1_A0(OT_QUAD + s->mem_index);
2417 } else {
2418 gen_op_addq_A0_im(-2);
2419 gen_op_st_T0_A0(OT_WORD + s->mem_index);
2421 gen_op_mov_reg_A0(2, R_ESP);
2422 } else
2423 #endif
2425 gen_op_movl_A0_reg(R_ESP);
2426 if (!s->dflag)
2427 gen_op_addl_A0_im(-2);
2428 else
2429 gen_op_addl_A0_im(-4);
2430 if (s->ss32) {
2431 if (s->addseg) {
2432 gen_op_addl_A0_seg(R_SS);
2434 } else {
2435 gen_op_andl_A0_ffff();
2436 gen_op_addl_A0_seg(R_SS);
2438 gen_op_st_T1_A0(s->dflag + 1 + s->mem_index);
2440 if (s->ss32 && !s->addseg)
2441 gen_op_mov_reg_A0(1, R_ESP);
2442 else
2443 gen_stack_update(s, (-2) << s->dflag);
2447 /* two step pop is necessary for precise exceptions */
2448 static void gen_pop_T0(DisasContext *s)
2450 #ifdef TARGET_X86_64
2451 if (CODE64(s)) {
2452 gen_op_movq_A0_reg(R_ESP);
2453 gen_op_ld_T0_A0((s->dflag ? OT_QUAD : OT_WORD) + s->mem_index);
2454 } else
2455 #endif
2457 gen_op_movl_A0_reg(R_ESP);
2458 if (s->ss32) {
2459 if (s->addseg)
2460 gen_op_addl_A0_seg(R_SS);
2461 } else {
2462 gen_op_andl_A0_ffff();
2463 gen_op_addl_A0_seg(R_SS);
2465 gen_op_ld_T0_A0(s->dflag + 1 + s->mem_index);
2469 static void gen_pop_update(DisasContext *s)
2471 #ifdef TARGET_X86_64
2472 if (CODE64(s) && s->dflag) {
2473 gen_stack_update(s, 8);
2474 } else
2475 #endif
2477 gen_stack_update(s, 2 << s->dflag);
2481 static void gen_stack_A0(DisasContext *s)
2483 gen_op_movl_A0_reg(R_ESP);
2484 if (!s->ss32)
2485 gen_op_andl_A0_ffff();
2486 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2487 if (s->addseg)
2488 gen_op_addl_A0_seg(R_SS);
2491 /* NOTE: wrap around in 16 bit not fully handled */
2492 static void gen_pusha(DisasContext *s)
2494 int i;
2495 gen_op_movl_A0_reg(R_ESP);
2496 gen_op_addl_A0_im(-16 << s->dflag);
2497 if (!s->ss32)
2498 gen_op_andl_A0_ffff();
2499 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2500 if (s->addseg)
2501 gen_op_addl_A0_seg(R_SS);
2502 for(i = 0;i < 8; i++) {
2503 gen_op_mov_TN_reg(OT_LONG, 0, 7 - i);
2504 gen_op_st_T0_A0(OT_WORD + s->dflag + s->mem_index);
2505 gen_op_addl_A0_im(2 << s->dflag);
2507 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2510 /* NOTE: wrap around in 16 bit not fully handled */
2511 static void gen_popa(DisasContext *s)
2513 int i;
2514 gen_op_movl_A0_reg(R_ESP);
2515 if (!s->ss32)
2516 gen_op_andl_A0_ffff();
2517 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2518 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], 16 << s->dflag);
2519 if (s->addseg)
2520 gen_op_addl_A0_seg(R_SS);
2521 for(i = 0;i < 8; i++) {
2522 /* ESP is not reloaded */
2523 if (i != 3) {
2524 gen_op_ld_T0_A0(OT_WORD + s->dflag + s->mem_index);
2525 gen_op_mov_reg_T0(OT_WORD + s->dflag, 7 - i);
2527 gen_op_addl_A0_im(2 << s->dflag);
2529 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2532 static void gen_enter(DisasContext *s, int esp_addend, int level)
2534 int ot, opsize;
2536 level &= 0x1f;
2537 #ifdef TARGET_X86_64
2538 if (CODE64(s)) {
2539 ot = s->dflag ? OT_QUAD : OT_WORD;
2540 opsize = 1 << ot;
2542 gen_op_movl_A0_reg(R_ESP);
2543 gen_op_addq_A0_im(-opsize);
2544 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2546 /* push bp */
2547 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2548 gen_op_st_T0_A0(ot + s->mem_index);
2549 if (level) {
2550 /* XXX: must save state */
2551 gen_helper_enter64_level(tcg_const_i32(level),
2552 tcg_const_i32((ot == OT_QUAD)),
2553 cpu_T[1]);
2555 gen_op_mov_reg_T1(ot, R_EBP);
2556 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2557 gen_op_mov_reg_T1(OT_QUAD, R_ESP);
2558 } else
2559 #endif
2561 ot = s->dflag + OT_WORD;
2562 opsize = 2 << s->dflag;
2564 gen_op_movl_A0_reg(R_ESP);
2565 gen_op_addl_A0_im(-opsize);
2566 if (!s->ss32)
2567 gen_op_andl_A0_ffff();
2568 tcg_gen_mov_tl(cpu_T[1], cpu_A0);
2569 if (s->addseg)
2570 gen_op_addl_A0_seg(R_SS);
2571 /* push bp */
2572 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
2573 gen_op_st_T0_A0(ot + s->mem_index);
2574 if (level) {
2575 /* XXX: must save state */
2576 gen_helper_enter_level(tcg_const_i32(level),
2577 tcg_const_i32(s->dflag),
2578 cpu_T[1]);
2580 gen_op_mov_reg_T1(ot, R_EBP);
2581 tcg_gen_addi_tl(cpu_T[1], cpu_T[1], -esp_addend + (-opsize * level));
2582 gen_op_mov_reg_T1(OT_WORD + s->ss32, R_ESP);
2586 static void gen_exception(DisasContext *s, int trapno, target_ulong cur_eip)
2588 if (s->cc_op != CC_OP_DYNAMIC)
2589 gen_op_set_cc_op(s->cc_op);
2590 gen_jmp_im(cur_eip);
2591 gen_helper_raise_exception(tcg_const_i32(trapno));
2592 s->is_jmp = 3;
2595 /* an interrupt is different from an exception because of the
2596 privilege checks */
2597 static void gen_interrupt(DisasContext *s, int intno,
2598 target_ulong cur_eip, target_ulong next_eip)
2600 if (s->cc_op != CC_OP_DYNAMIC)
2601 gen_op_set_cc_op(s->cc_op);
2602 gen_jmp_im(cur_eip);
2603 gen_helper_raise_interrupt(tcg_const_i32(intno),
2604 tcg_const_i32(next_eip - cur_eip));
2605 s->is_jmp = 3;
2608 static void gen_debug(DisasContext *s, target_ulong cur_eip)
2610 if (s->cc_op != CC_OP_DYNAMIC)
2611 gen_op_set_cc_op(s->cc_op);
2612 gen_jmp_im(cur_eip);
2613 gen_helper_debug();
2614 s->is_jmp = 3;
2617 /* generate a generic end of block. Trace exception is also generated
2618 if needed */
2619 static void gen_eob(DisasContext *s)
2621 if (s->cc_op != CC_OP_DYNAMIC)
2622 gen_op_set_cc_op(s->cc_op);
2623 if (s->tb->flags & HF_INHIBIT_IRQ_MASK) {
2624 gen_helper_reset_inhibit_irq();
2626 if (s->singlestep_enabled) {
2627 gen_helper_debug();
2628 } else if (s->tf) {
2629 gen_helper_single_step();
2630 } else {
2631 tcg_gen_exit_tb(0);
2633 s->is_jmp = 3;
2636 /* generate a jump to eip. No segment change must happen before as a
2637 direct call to the next block may occur */
2638 static void gen_jmp_tb(DisasContext *s, target_ulong eip, int tb_num)
2640 if (s->jmp_opt) {
2641 if (s->cc_op != CC_OP_DYNAMIC) {
2642 gen_op_set_cc_op(s->cc_op);
2643 s->cc_op = CC_OP_DYNAMIC;
2645 gen_goto_tb(s, tb_num, eip);
2646 s->is_jmp = 3;
2647 } else {
2648 gen_jmp_im(eip);
2649 gen_eob(s);
2653 static void gen_jmp(DisasContext *s, target_ulong eip)
2655 gen_jmp_tb(s, eip, 0);
2658 static inline void gen_ldq_env_A0(int idx, int offset)
2660 int mem_index = (idx >> 2) - 1;
2661 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2662 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset);
2665 static inline void gen_stq_env_A0(int idx, int offset)
2667 int mem_index = (idx >> 2) - 1;
2668 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset);
2669 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2672 static inline void gen_ldo_env_A0(int idx, int offset)
2674 int mem_index = (idx >> 2) - 1;
2675 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0, mem_index);
2676 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2677 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2678 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2679 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2682 static inline void gen_sto_env_A0(int idx, int offset)
2684 int mem_index = (idx >> 2) - 1;
2685 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(0)));
2686 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0, mem_index);
2687 tcg_gen_addi_tl(cpu_tmp0, cpu_A0, 8);
2688 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, offset + offsetof(XMMReg, XMM_Q(1)));
2689 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_tmp0, mem_index);
2692 static inline void gen_op_movo(int d_offset, int s_offset)
2694 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2695 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2696 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset + 8);
2697 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset + 8);
2700 static inline void gen_op_movq(int d_offset, int s_offset)
2702 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env, s_offset);
2703 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2706 static inline void gen_op_movl(int d_offset, int s_offset)
2708 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env, s_offset);
2709 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, d_offset);
2712 static inline void gen_op_movq_env_0(int d_offset)
2714 tcg_gen_movi_i64(cpu_tmp1_i64, 0);
2715 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env, d_offset);
2718 #define SSE_SPECIAL ((void *)1)
2719 #define SSE_DUMMY ((void *)2)
2721 #define MMX_OP2(x) { gen_helper_ ## x ## _mmx, gen_helper_ ## x ## _xmm }
2722 #define SSE_FOP(x) { gen_helper_ ## x ## ps, gen_helper_ ## x ## pd, \
2723 gen_helper_ ## x ## ss, gen_helper_ ## x ## sd, }
2725 static void *sse_op_table1[256][4] = {
2726 /* 3DNow! extensions */
2727 [0x0e] = { SSE_DUMMY }, /* femms */
2728 [0x0f] = { SSE_DUMMY }, /* pf... */
2729 /* pure SSE operations */
2730 [0x10] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2731 [0x11] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movups, movupd, movss, movsd */
2732 [0x12] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd, movsldup, movddup */
2733 [0x13] = { SSE_SPECIAL, SSE_SPECIAL }, /* movlps, movlpd */
2734 [0x14] = { gen_helper_punpckldq_xmm, gen_helper_punpcklqdq_xmm },
2735 [0x15] = { gen_helper_punpckhdq_xmm, gen_helper_punpckhqdq_xmm },
2736 [0x16] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd, movshdup */
2737 [0x17] = { SSE_SPECIAL, SSE_SPECIAL }, /* movhps, movhpd */
2739 [0x28] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2740 [0x29] = { SSE_SPECIAL, SSE_SPECIAL }, /* movaps, movapd */
2741 [0x2a] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtpi2ps, cvtpi2pd, cvtsi2ss, cvtsi2sd */
2742 [0x2b] = { SSE_SPECIAL, SSE_SPECIAL }, /* movntps, movntpd */
2743 [0x2c] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvttps2pi, cvttpd2pi, cvttsd2si, cvttss2si */
2744 [0x2d] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* cvtps2pi, cvtpd2pi, cvtsd2si, cvtss2si */
2745 [0x2e] = { gen_helper_ucomiss, gen_helper_ucomisd },
2746 [0x2f] = { gen_helper_comiss, gen_helper_comisd },
2747 [0x50] = { SSE_SPECIAL, SSE_SPECIAL }, /* movmskps, movmskpd */
2748 [0x51] = SSE_FOP(sqrt),
2749 [0x52] = { gen_helper_rsqrtps, NULL, gen_helper_rsqrtss, NULL },
2750 [0x53] = { gen_helper_rcpps, NULL, gen_helper_rcpss, NULL },
2751 [0x54] = { gen_helper_pand_xmm, gen_helper_pand_xmm }, /* andps, andpd */
2752 [0x55] = { gen_helper_pandn_xmm, gen_helper_pandn_xmm }, /* andnps, andnpd */
2753 [0x56] = { gen_helper_por_xmm, gen_helper_por_xmm }, /* orps, orpd */
2754 [0x57] = { gen_helper_pxor_xmm, gen_helper_pxor_xmm }, /* xorps, xorpd */
2755 [0x58] = SSE_FOP(add),
2756 [0x59] = SSE_FOP(mul),
2757 [0x5a] = { gen_helper_cvtps2pd, gen_helper_cvtpd2ps,
2758 gen_helper_cvtss2sd, gen_helper_cvtsd2ss },
2759 [0x5b] = { gen_helper_cvtdq2ps, gen_helper_cvtps2dq, gen_helper_cvttps2dq },
2760 [0x5c] = SSE_FOP(sub),
2761 [0x5d] = SSE_FOP(min),
2762 [0x5e] = SSE_FOP(div),
2763 [0x5f] = SSE_FOP(max),
2765 [0xc2] = SSE_FOP(cmpeq),
2766 [0xc6] = { gen_helper_shufps, gen_helper_shufpd },
2768 [0x38] = { SSE_SPECIAL, SSE_SPECIAL, NULL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2769 [0x3a] = { SSE_SPECIAL, SSE_SPECIAL }, /* SSSE3/SSE4 */
2771 /* MMX ops and their SSE extensions */
2772 [0x60] = MMX_OP2(punpcklbw),
2773 [0x61] = MMX_OP2(punpcklwd),
2774 [0x62] = MMX_OP2(punpckldq),
2775 [0x63] = MMX_OP2(packsswb),
2776 [0x64] = MMX_OP2(pcmpgtb),
2777 [0x65] = MMX_OP2(pcmpgtw),
2778 [0x66] = MMX_OP2(pcmpgtl),
2779 [0x67] = MMX_OP2(packuswb),
2780 [0x68] = MMX_OP2(punpckhbw),
2781 [0x69] = MMX_OP2(punpckhwd),
2782 [0x6a] = MMX_OP2(punpckhdq),
2783 [0x6b] = MMX_OP2(packssdw),
2784 [0x6c] = { NULL, gen_helper_punpcklqdq_xmm },
2785 [0x6d] = { NULL, gen_helper_punpckhqdq_xmm },
2786 [0x6e] = { SSE_SPECIAL, SSE_SPECIAL }, /* movd mm, ea */
2787 [0x6f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, , movqdu */
2788 [0x70] = { gen_helper_pshufw_mmx,
2789 gen_helper_pshufd_xmm,
2790 gen_helper_pshufhw_xmm,
2791 gen_helper_pshuflw_xmm },
2792 [0x71] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftw */
2793 [0x72] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftd */
2794 [0x73] = { SSE_SPECIAL, SSE_SPECIAL }, /* shiftq */
2795 [0x74] = MMX_OP2(pcmpeqb),
2796 [0x75] = MMX_OP2(pcmpeqw),
2797 [0x76] = MMX_OP2(pcmpeql),
2798 [0x77] = { SSE_DUMMY }, /* emms */
2799 [0x7c] = { NULL, gen_helper_haddpd, NULL, gen_helper_haddps },
2800 [0x7d] = { NULL, gen_helper_hsubpd, NULL, gen_helper_hsubps },
2801 [0x7e] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movd, movd, , movq */
2802 [0x7f] = { SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL }, /* movq, movdqa, movdqu */
2803 [0xc4] = { SSE_SPECIAL, SSE_SPECIAL }, /* pinsrw */
2804 [0xc5] = { SSE_SPECIAL, SSE_SPECIAL }, /* pextrw */
2805 [0xd0] = { NULL, gen_helper_addsubpd, NULL, gen_helper_addsubps },
2806 [0xd1] = MMX_OP2(psrlw),
2807 [0xd2] = MMX_OP2(psrld),
2808 [0xd3] = MMX_OP2(psrlq),
2809 [0xd4] = MMX_OP2(paddq),
2810 [0xd5] = MMX_OP2(pmullw),
2811 [0xd6] = { NULL, SSE_SPECIAL, SSE_SPECIAL, SSE_SPECIAL },
2812 [0xd7] = { SSE_SPECIAL, SSE_SPECIAL }, /* pmovmskb */
2813 [0xd8] = MMX_OP2(psubusb),
2814 [0xd9] = MMX_OP2(psubusw),
2815 [0xda] = MMX_OP2(pminub),
2816 [0xdb] = MMX_OP2(pand),
2817 [0xdc] = MMX_OP2(paddusb),
2818 [0xdd] = MMX_OP2(paddusw),
2819 [0xde] = MMX_OP2(pmaxub),
2820 [0xdf] = MMX_OP2(pandn),
2821 [0xe0] = MMX_OP2(pavgb),
2822 [0xe1] = MMX_OP2(psraw),
2823 [0xe2] = MMX_OP2(psrad),
2824 [0xe3] = MMX_OP2(pavgw),
2825 [0xe4] = MMX_OP2(pmulhuw),
2826 [0xe5] = MMX_OP2(pmulhw),
2827 [0xe6] = { NULL, gen_helper_cvttpd2dq, gen_helper_cvtdq2pd, gen_helper_cvtpd2dq },
2828 [0xe7] = { SSE_SPECIAL , SSE_SPECIAL }, /* movntq, movntq */
2829 [0xe8] = MMX_OP2(psubsb),
2830 [0xe9] = MMX_OP2(psubsw),
2831 [0xea] = MMX_OP2(pminsw),
2832 [0xeb] = MMX_OP2(por),
2833 [0xec] = MMX_OP2(paddsb),
2834 [0xed] = MMX_OP2(paddsw),
2835 [0xee] = MMX_OP2(pmaxsw),
2836 [0xef] = MMX_OP2(pxor),
2837 [0xf0] = { NULL, NULL, NULL, SSE_SPECIAL }, /* lddqu */
2838 [0xf1] = MMX_OP2(psllw),
2839 [0xf2] = MMX_OP2(pslld),
2840 [0xf3] = MMX_OP2(psllq),
2841 [0xf4] = MMX_OP2(pmuludq),
2842 [0xf5] = MMX_OP2(pmaddwd),
2843 [0xf6] = MMX_OP2(psadbw),
2844 [0xf7] = MMX_OP2(maskmov),
2845 [0xf8] = MMX_OP2(psubb),
2846 [0xf9] = MMX_OP2(psubw),
2847 [0xfa] = MMX_OP2(psubl),
2848 [0xfb] = MMX_OP2(psubq),
2849 [0xfc] = MMX_OP2(paddb),
2850 [0xfd] = MMX_OP2(paddw),
2851 [0xfe] = MMX_OP2(paddl),
2854 static void *sse_op_table2[3 * 8][2] = {
2855 [0 + 2] = MMX_OP2(psrlw),
2856 [0 + 4] = MMX_OP2(psraw),
2857 [0 + 6] = MMX_OP2(psllw),
2858 [8 + 2] = MMX_OP2(psrld),
2859 [8 + 4] = MMX_OP2(psrad),
2860 [8 + 6] = MMX_OP2(pslld),
2861 [16 + 2] = MMX_OP2(psrlq),
2862 [16 + 3] = { NULL, gen_helper_psrldq_xmm },
2863 [16 + 6] = MMX_OP2(psllq),
2864 [16 + 7] = { NULL, gen_helper_pslldq_xmm },
2867 static void *sse_op_table3[4 * 3] = {
2868 gen_helper_cvtsi2ss,
2869 gen_helper_cvtsi2sd,
2870 X86_64_ONLY(gen_helper_cvtsq2ss),
2871 X86_64_ONLY(gen_helper_cvtsq2sd),
2873 gen_helper_cvttss2si,
2874 gen_helper_cvttsd2si,
2875 X86_64_ONLY(gen_helper_cvttss2sq),
2876 X86_64_ONLY(gen_helper_cvttsd2sq),
2878 gen_helper_cvtss2si,
2879 gen_helper_cvtsd2si,
2880 X86_64_ONLY(gen_helper_cvtss2sq),
2881 X86_64_ONLY(gen_helper_cvtsd2sq),
2884 static void *sse_op_table4[8][4] = {
2885 SSE_FOP(cmpeq),
2886 SSE_FOP(cmplt),
2887 SSE_FOP(cmple),
2888 SSE_FOP(cmpunord),
2889 SSE_FOP(cmpneq),
2890 SSE_FOP(cmpnlt),
2891 SSE_FOP(cmpnle),
2892 SSE_FOP(cmpord),
2895 static void *sse_op_table5[256] = {
2896 [0x0c] = gen_helper_pi2fw,
2897 [0x0d] = gen_helper_pi2fd,
2898 [0x1c] = gen_helper_pf2iw,
2899 [0x1d] = gen_helper_pf2id,
2900 [0x8a] = gen_helper_pfnacc,
2901 [0x8e] = gen_helper_pfpnacc,
2902 [0x90] = gen_helper_pfcmpge,
2903 [0x94] = gen_helper_pfmin,
2904 [0x96] = gen_helper_pfrcp,
2905 [0x97] = gen_helper_pfrsqrt,
2906 [0x9a] = gen_helper_pfsub,
2907 [0x9e] = gen_helper_pfadd,
2908 [0xa0] = gen_helper_pfcmpgt,
2909 [0xa4] = gen_helper_pfmax,
2910 [0xa6] = gen_helper_movq, /* pfrcpit1; no need to actually increase precision */
2911 [0xa7] = gen_helper_movq, /* pfrsqit1 */
2912 [0xaa] = gen_helper_pfsubr,
2913 [0xae] = gen_helper_pfacc,
2914 [0xb0] = gen_helper_pfcmpeq,
2915 [0xb4] = gen_helper_pfmul,
2916 [0xb6] = gen_helper_movq, /* pfrcpit2 */
2917 [0xb7] = gen_helper_pmulhrw_mmx,
2918 [0xbb] = gen_helper_pswapd,
2919 [0xbf] = gen_helper_pavgb_mmx /* pavgusb */
2922 struct sse_op_helper_s {
2923 void *op[2]; uint32_t ext_mask;
2925 #define SSSE3_OP(x) { MMX_OP2(x), CPUID_EXT_SSSE3 }
2926 #define SSE41_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE41 }
2927 #define SSE42_OP(x) { { NULL, gen_helper_ ## x ## _xmm }, CPUID_EXT_SSE42 }
2928 #define SSE41_SPECIAL { { NULL, SSE_SPECIAL }, CPUID_EXT_SSE41 }
2929 static struct sse_op_helper_s sse_op_table6[256] = {
2930 [0x00] = SSSE3_OP(pshufb),
2931 [0x01] = SSSE3_OP(phaddw),
2932 [0x02] = SSSE3_OP(phaddd),
2933 [0x03] = SSSE3_OP(phaddsw),
2934 [0x04] = SSSE3_OP(pmaddubsw),
2935 [0x05] = SSSE3_OP(phsubw),
2936 [0x06] = SSSE3_OP(phsubd),
2937 [0x07] = SSSE3_OP(phsubsw),
2938 [0x08] = SSSE3_OP(psignb),
2939 [0x09] = SSSE3_OP(psignw),
2940 [0x0a] = SSSE3_OP(psignd),
2941 [0x0b] = SSSE3_OP(pmulhrsw),
2942 [0x10] = SSE41_OP(pblendvb),
2943 [0x14] = SSE41_OP(blendvps),
2944 [0x15] = SSE41_OP(blendvpd),
2945 [0x17] = SSE41_OP(ptest),
2946 [0x1c] = SSSE3_OP(pabsb),
2947 [0x1d] = SSSE3_OP(pabsw),
2948 [0x1e] = SSSE3_OP(pabsd),
2949 [0x20] = SSE41_OP(pmovsxbw),
2950 [0x21] = SSE41_OP(pmovsxbd),
2951 [0x22] = SSE41_OP(pmovsxbq),
2952 [0x23] = SSE41_OP(pmovsxwd),
2953 [0x24] = SSE41_OP(pmovsxwq),
2954 [0x25] = SSE41_OP(pmovsxdq),
2955 [0x28] = SSE41_OP(pmuldq),
2956 [0x29] = SSE41_OP(pcmpeqq),
2957 [0x2a] = SSE41_SPECIAL, /* movntqda */
2958 [0x2b] = SSE41_OP(packusdw),
2959 [0x30] = SSE41_OP(pmovzxbw),
2960 [0x31] = SSE41_OP(pmovzxbd),
2961 [0x32] = SSE41_OP(pmovzxbq),
2962 [0x33] = SSE41_OP(pmovzxwd),
2963 [0x34] = SSE41_OP(pmovzxwq),
2964 [0x35] = SSE41_OP(pmovzxdq),
2965 [0x37] = SSE42_OP(pcmpgtq),
2966 [0x38] = SSE41_OP(pminsb),
2967 [0x39] = SSE41_OP(pminsd),
2968 [0x3a] = SSE41_OP(pminuw),
2969 [0x3b] = SSE41_OP(pminud),
2970 [0x3c] = SSE41_OP(pmaxsb),
2971 [0x3d] = SSE41_OP(pmaxsd),
2972 [0x3e] = SSE41_OP(pmaxuw),
2973 [0x3f] = SSE41_OP(pmaxud),
2974 [0x40] = SSE41_OP(pmulld),
2975 [0x41] = SSE41_OP(phminposuw),
2978 static struct sse_op_helper_s sse_op_table7[256] = {
2979 [0x08] = SSE41_OP(roundps),
2980 [0x09] = SSE41_OP(roundpd),
2981 [0x0a] = SSE41_OP(roundss),
2982 [0x0b] = SSE41_OP(roundsd),
2983 [0x0c] = SSE41_OP(blendps),
2984 [0x0d] = SSE41_OP(blendpd),
2985 [0x0e] = SSE41_OP(pblendw),
2986 [0x0f] = SSSE3_OP(palignr),
2987 [0x14] = SSE41_SPECIAL, /* pextrb */
2988 [0x15] = SSE41_SPECIAL, /* pextrw */
2989 [0x16] = SSE41_SPECIAL, /* pextrd/pextrq */
2990 [0x17] = SSE41_SPECIAL, /* extractps */
2991 [0x20] = SSE41_SPECIAL, /* pinsrb */
2992 [0x21] = SSE41_SPECIAL, /* insertps */
2993 [0x22] = SSE41_SPECIAL, /* pinsrd/pinsrq */
2994 [0x40] = SSE41_OP(dpps),
2995 [0x41] = SSE41_OP(dppd),
2996 [0x42] = SSE41_OP(mpsadbw),
2997 [0x60] = SSE42_OP(pcmpestrm),
2998 [0x61] = SSE42_OP(pcmpestri),
2999 [0x62] = SSE42_OP(pcmpistrm),
3000 [0x63] = SSE42_OP(pcmpistri),
3003 static void gen_sse(DisasContext *s, int b, target_ulong pc_start, int rex_r)
3005 int b1, op1_offset, op2_offset, is_xmm, val, ot;
3006 int modrm, mod, rm, reg, reg_addr, offset_addr;
3007 void *sse_op2;
3009 b &= 0xff;
3010 if (s->prefix & PREFIX_DATA)
3011 b1 = 1;
3012 else if (s->prefix & PREFIX_REPZ)
3013 b1 = 2;
3014 else if (s->prefix & PREFIX_REPNZ)
3015 b1 = 3;
3016 else
3017 b1 = 0;
3018 sse_op2 = sse_op_table1[b][b1];
3019 if (!sse_op2)
3020 goto illegal_op;
3021 if ((b <= 0x5f && b >= 0x10) || b == 0xc6 || b == 0xc2) {
3022 is_xmm = 1;
3023 } else {
3024 if (b1 == 0) {
3025 /* MMX case */
3026 is_xmm = 0;
3027 } else {
3028 is_xmm = 1;
3031 /* simple MMX/SSE operation */
3032 if (s->flags & HF_TS_MASK) {
3033 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
3034 return;
3036 if (s->flags & HF_EM_MASK) {
3037 illegal_op:
3038 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
3039 return;
3041 if (is_xmm && !(s->flags & HF_OSFXSR_MASK))
3042 if ((b != 0x38 && b != 0x3a) || (s->prefix & PREFIX_DATA))
3043 goto illegal_op;
3044 if (b == 0x0e) {
3045 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3046 goto illegal_op;
3047 /* femms */
3048 gen_helper_emms();
3049 return;
3051 if (b == 0x77) {
3052 /* emms */
3053 gen_helper_emms();
3054 return;
3056 /* prepare MMX state (XXX: optimize by storing fptt and fptags in
3057 the static cpu state) */
3058 if (!is_xmm) {
3059 gen_helper_enter_mmx();
3062 modrm = ldub_code(s->pc++);
3063 reg = ((modrm >> 3) & 7);
3064 if (is_xmm)
3065 reg |= rex_r;
3066 mod = (modrm >> 6) & 3;
3067 if (sse_op2 == SSE_SPECIAL) {
3068 b |= (b1 << 8);
3069 switch(b) {
3070 case 0x0e7: /* movntq */
3071 if (mod == 3)
3072 goto illegal_op;
3073 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3074 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3075 break;
3076 case 0x1e7: /* movntdq */
3077 case 0x02b: /* movntps */
3078 case 0x12b: /* movntps */
3079 case 0x3f0: /* lddqu */
3080 if (mod == 3)
3081 goto illegal_op;
3082 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3083 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3084 break;
3085 case 0x6e: /* movd mm, ea */
3086 #ifdef TARGET_X86_64
3087 if (s->dflag == 2) {
3088 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3089 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,fpregs[reg].mmx));
3090 } else
3091 #endif
3093 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3094 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3095 offsetof(CPUX86State,fpregs[reg].mmx));
3096 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3097 gen_helper_movl_mm_T0_mmx(cpu_ptr0, cpu_tmp2_i32);
3099 break;
3100 case 0x16e: /* movd xmm, ea */
3101 #ifdef TARGET_X86_64
3102 if (s->dflag == 2) {
3103 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 0);
3104 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3105 offsetof(CPUX86State,xmm_regs[reg]));
3106 gen_helper_movq_mm_T0_xmm(cpu_ptr0, cpu_T[0]);
3107 } else
3108 #endif
3110 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 0);
3111 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3112 offsetof(CPUX86State,xmm_regs[reg]));
3113 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3114 gen_helper_movl_mm_T0_xmm(cpu_ptr0, cpu_tmp2_i32);
3116 break;
3117 case 0x6f: /* movq mm, ea */
3118 if (mod != 3) {
3119 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3120 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3121 } else {
3122 rm = (modrm & 7);
3123 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3124 offsetof(CPUX86State,fpregs[rm].mmx));
3125 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3126 offsetof(CPUX86State,fpregs[reg].mmx));
3128 break;
3129 case 0x010: /* movups */
3130 case 0x110: /* movupd */
3131 case 0x028: /* movaps */
3132 case 0x128: /* movapd */
3133 case 0x16f: /* movdqa xmm, ea */
3134 case 0x26f: /* movdqu xmm, ea */
3135 if (mod != 3) {
3136 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3137 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3138 } else {
3139 rm = (modrm & 7) | REX_B(s);
3140 gen_op_movo(offsetof(CPUX86State,xmm_regs[reg]),
3141 offsetof(CPUX86State,xmm_regs[rm]));
3143 break;
3144 case 0x210: /* movss xmm, ea */
3145 if (mod != 3) {
3146 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3147 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3148 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3149 gen_op_movl_T0_0();
3150 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3151 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3152 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3153 } else {
3154 rm = (modrm & 7) | REX_B(s);
3155 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3156 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3158 break;
3159 case 0x310: /* movsd xmm, ea */
3160 if (mod != 3) {
3161 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3162 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3163 gen_op_movl_T0_0();
3164 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3165 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3166 } else {
3167 rm = (modrm & 7) | REX_B(s);
3168 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3169 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3171 break;
3172 case 0x012: /* movlps */
3173 case 0x112: /* movlpd */
3174 if (mod != 3) {
3175 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3176 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3177 } else {
3178 /* movhlps */
3179 rm = (modrm & 7) | REX_B(s);
3180 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3181 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3183 break;
3184 case 0x212: /* movsldup */
3185 if (mod != 3) {
3186 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3187 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3188 } else {
3189 rm = (modrm & 7) | REX_B(s);
3190 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3191 offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)));
3192 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3193 offsetof(CPUX86State,xmm_regs[rm].XMM_L(2)));
3195 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3196 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3197 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3198 offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)));
3199 break;
3200 case 0x312: /* movddup */
3201 if (mod != 3) {
3202 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3203 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3204 } else {
3205 rm = (modrm & 7) | REX_B(s);
3206 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3207 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3209 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3210 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3211 break;
3212 case 0x016: /* movhps */
3213 case 0x116: /* movhpd */
3214 if (mod != 3) {
3215 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3216 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3217 } else {
3218 /* movlhps */
3219 rm = (modrm & 7) | REX_B(s);
3220 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)),
3221 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3223 break;
3224 case 0x216: /* movshdup */
3225 if (mod != 3) {
3226 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3227 gen_ldo_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3228 } else {
3229 rm = (modrm & 7) | REX_B(s);
3230 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)),
3231 offsetof(CPUX86State,xmm_regs[rm].XMM_L(1)));
3232 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)),
3233 offsetof(CPUX86State,xmm_regs[rm].XMM_L(3)));
3235 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)),
3236 offsetof(CPUX86State,xmm_regs[reg].XMM_L(1)));
3237 gen_op_movl(offsetof(CPUX86State,xmm_regs[reg].XMM_L(2)),
3238 offsetof(CPUX86State,xmm_regs[reg].XMM_L(3)));
3239 break;
3240 case 0x7e: /* movd ea, mm */
3241 #ifdef TARGET_X86_64
3242 if (s->dflag == 2) {
3243 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3244 offsetof(CPUX86State,fpregs[reg].mmx));
3245 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3246 } else
3247 #endif
3249 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3250 offsetof(CPUX86State,fpregs[reg].mmx.MMX_L(0)));
3251 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3253 break;
3254 case 0x17e: /* movd ea, xmm */
3255 #ifdef TARGET_X86_64
3256 if (s->dflag == 2) {
3257 tcg_gen_ld_i64(cpu_T[0], cpu_env,
3258 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3259 gen_ldst_modrm(s, modrm, OT_QUAD, OR_TMP0, 1);
3260 } else
3261 #endif
3263 tcg_gen_ld32u_tl(cpu_T[0], cpu_env,
3264 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3265 gen_ldst_modrm(s, modrm, OT_LONG, OR_TMP0, 1);
3267 break;
3268 case 0x27e: /* movq xmm, ea */
3269 if (mod != 3) {
3270 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3271 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3272 } else {
3273 rm = (modrm & 7) | REX_B(s);
3274 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3275 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3277 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3278 break;
3279 case 0x7f: /* movq ea, mm */
3280 if (mod != 3) {
3281 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3282 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,fpregs[reg].mmx));
3283 } else {
3284 rm = (modrm & 7);
3285 gen_op_movq(offsetof(CPUX86State,fpregs[rm].mmx),
3286 offsetof(CPUX86State,fpregs[reg].mmx));
3288 break;
3289 case 0x011: /* movups */
3290 case 0x111: /* movupd */
3291 case 0x029: /* movaps */
3292 case 0x129: /* movapd */
3293 case 0x17f: /* movdqa ea, xmm */
3294 case 0x27f: /* movdqu ea, xmm */
3295 if (mod != 3) {
3296 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3297 gen_sto_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg]));
3298 } else {
3299 rm = (modrm & 7) | REX_B(s);
3300 gen_op_movo(offsetof(CPUX86State,xmm_regs[rm]),
3301 offsetof(CPUX86State,xmm_regs[reg]));
3303 break;
3304 case 0x211: /* movss ea, xmm */
3305 if (mod != 3) {
3306 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3307 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3308 gen_op_st_T0_A0(OT_LONG + s->mem_index);
3309 } else {
3310 rm = (modrm & 7) | REX_B(s);
3311 gen_op_movl(offsetof(CPUX86State,xmm_regs[rm].XMM_L(0)),
3312 offsetof(CPUX86State,xmm_regs[reg].XMM_L(0)));
3314 break;
3315 case 0x311: /* movsd ea, xmm */
3316 if (mod != 3) {
3317 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3318 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3319 } else {
3320 rm = (modrm & 7) | REX_B(s);
3321 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3322 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3324 break;
3325 case 0x013: /* movlps */
3326 case 0x113: /* movlpd */
3327 if (mod != 3) {
3328 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3329 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3330 } else {
3331 goto illegal_op;
3333 break;
3334 case 0x017: /* movhps */
3335 case 0x117: /* movhpd */
3336 if (mod != 3) {
3337 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3338 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3339 } else {
3340 goto illegal_op;
3342 break;
3343 case 0x71: /* shift mm, im */
3344 case 0x72:
3345 case 0x73:
3346 case 0x171: /* shift xmm, im */
3347 case 0x172:
3348 case 0x173:
3349 val = ldub_code(s->pc++);
3350 if (is_xmm) {
3351 gen_op_movl_T0_im(val);
3352 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3353 gen_op_movl_T0_0();
3354 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(1)));
3355 op1_offset = offsetof(CPUX86State,xmm_t0);
3356 } else {
3357 gen_op_movl_T0_im(val);
3358 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(0)));
3359 gen_op_movl_T0_0();
3360 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,mmx_t0.MMX_L(1)));
3361 op1_offset = offsetof(CPUX86State,mmx_t0);
3363 sse_op2 = sse_op_table2[((b - 1) & 3) * 8 + (((modrm >> 3)) & 7)][b1];
3364 if (!sse_op2)
3365 goto illegal_op;
3366 if (is_xmm) {
3367 rm = (modrm & 7) | REX_B(s);
3368 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3369 } else {
3370 rm = (modrm & 7);
3371 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3373 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3374 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op1_offset);
3375 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3376 break;
3377 case 0x050: /* movmskps */
3378 rm = (modrm & 7) | REX_B(s);
3379 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3380 offsetof(CPUX86State,xmm_regs[rm]));
3381 gen_helper_movmskps(cpu_tmp2_i32, cpu_ptr0);
3382 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3383 gen_op_mov_reg_T0(OT_LONG, reg);
3384 break;
3385 case 0x150: /* movmskpd */
3386 rm = (modrm & 7) | REX_B(s);
3387 tcg_gen_addi_ptr(cpu_ptr0, cpu_env,
3388 offsetof(CPUX86State,xmm_regs[rm]));
3389 gen_helper_movmskpd(cpu_tmp2_i32, cpu_ptr0);
3390 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3391 gen_op_mov_reg_T0(OT_LONG, reg);
3392 break;
3393 case 0x02a: /* cvtpi2ps */
3394 case 0x12a: /* cvtpi2pd */
3395 gen_helper_enter_mmx();
3396 if (mod != 3) {
3397 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3398 op2_offset = offsetof(CPUX86State,mmx_t0);
3399 gen_ldq_env_A0(s->mem_index, op2_offset);
3400 } else {
3401 rm = (modrm & 7);
3402 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3404 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3405 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3406 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3407 switch(b >> 8) {
3408 case 0x0:
3409 gen_helper_cvtpi2ps(cpu_ptr0, cpu_ptr1);
3410 break;
3411 default:
3412 case 0x1:
3413 gen_helper_cvtpi2pd(cpu_ptr0, cpu_ptr1);
3414 break;
3416 break;
3417 case 0x22a: /* cvtsi2ss */
3418 case 0x32a: /* cvtsi2sd */
3419 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3420 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3421 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3422 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3423 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2)];
3424 if (ot == OT_LONG) {
3425 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3426 ((void (*)(TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_tmp2_i32);
3427 } else {
3428 ((void (*)(TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_T[0]);
3430 break;
3431 case 0x02c: /* cvttps2pi */
3432 case 0x12c: /* cvttpd2pi */
3433 case 0x02d: /* cvtps2pi */
3434 case 0x12d: /* cvtpd2pi */
3435 gen_helper_enter_mmx();
3436 if (mod != 3) {
3437 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3438 op2_offset = offsetof(CPUX86State,xmm_t0);
3439 gen_ldo_env_A0(s->mem_index, op2_offset);
3440 } else {
3441 rm = (modrm & 7) | REX_B(s);
3442 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3444 op1_offset = offsetof(CPUX86State,fpregs[reg & 7].mmx);
3445 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3446 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3447 switch(b) {
3448 case 0x02c:
3449 gen_helper_cvttps2pi(cpu_ptr0, cpu_ptr1);
3450 break;
3451 case 0x12c:
3452 gen_helper_cvttpd2pi(cpu_ptr0, cpu_ptr1);
3453 break;
3454 case 0x02d:
3455 gen_helper_cvtps2pi(cpu_ptr0, cpu_ptr1);
3456 break;
3457 case 0x12d:
3458 gen_helper_cvtpd2pi(cpu_ptr0, cpu_ptr1);
3459 break;
3461 break;
3462 case 0x22c: /* cvttss2si */
3463 case 0x32c: /* cvttsd2si */
3464 case 0x22d: /* cvtss2si */
3465 case 0x32d: /* cvtsd2si */
3466 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3467 if (mod != 3) {
3468 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3469 if ((b >> 8) & 1) {
3470 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_Q(0)));
3471 } else {
3472 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3473 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3475 op2_offset = offsetof(CPUX86State,xmm_t0);
3476 } else {
3477 rm = (modrm & 7) | REX_B(s);
3478 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3480 sse_op2 = sse_op_table3[(s->dflag == 2) * 2 + ((b >> 8) - 2) + 4 +
3481 (b & 1) * 4];
3482 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op2_offset);
3483 if (ot == OT_LONG) {
3484 ((void (*)(TCGv_i32, TCGv_ptr))sse_op2)(cpu_tmp2_i32, cpu_ptr0);
3485 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3486 } else {
3487 ((void (*)(TCGv, TCGv_ptr))sse_op2)(cpu_T[0], cpu_ptr0);
3489 gen_op_mov_reg_T0(ot, reg);
3490 break;
3491 case 0xc4: /* pinsrw */
3492 case 0x1c4:
3493 s->rip_offset = 1;
3494 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
3495 val = ldub_code(s->pc++);
3496 if (b1) {
3497 val &= 7;
3498 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3499 offsetof(CPUX86State,xmm_regs[reg].XMM_W(val)));
3500 } else {
3501 val &= 3;
3502 tcg_gen_st16_tl(cpu_T[0], cpu_env,
3503 offsetof(CPUX86State,fpregs[reg].mmx.MMX_W(val)));
3505 break;
3506 case 0xc5: /* pextrw */
3507 case 0x1c5:
3508 if (mod != 3)
3509 goto illegal_op;
3510 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3511 val = ldub_code(s->pc++);
3512 if (b1) {
3513 val &= 7;
3514 rm = (modrm & 7) | REX_B(s);
3515 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3516 offsetof(CPUX86State,xmm_regs[rm].XMM_W(val)));
3517 } else {
3518 val &= 3;
3519 rm = (modrm & 7);
3520 tcg_gen_ld16u_tl(cpu_T[0], cpu_env,
3521 offsetof(CPUX86State,fpregs[rm].mmx.MMX_W(val)));
3523 reg = ((modrm >> 3) & 7) | rex_r;
3524 gen_op_mov_reg_T0(ot, reg);
3525 break;
3526 case 0x1d6: /* movq ea, xmm */
3527 if (mod != 3) {
3528 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3529 gen_stq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3530 } else {
3531 rm = (modrm & 7) | REX_B(s);
3532 gen_op_movq(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)),
3533 offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)));
3534 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[rm].XMM_Q(1)));
3536 break;
3537 case 0x2d6: /* movq2dq */
3538 gen_helper_enter_mmx();
3539 rm = (modrm & 7);
3540 gen_op_movq(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(0)),
3541 offsetof(CPUX86State,fpregs[rm].mmx));
3542 gen_op_movq_env_0(offsetof(CPUX86State,xmm_regs[reg].XMM_Q(1)));
3543 break;
3544 case 0x3d6: /* movdq2q */
3545 gen_helper_enter_mmx();
3546 rm = (modrm & 7) | REX_B(s);
3547 gen_op_movq(offsetof(CPUX86State,fpregs[reg & 7].mmx),
3548 offsetof(CPUX86State,xmm_regs[rm].XMM_Q(0)));
3549 break;
3550 case 0xd7: /* pmovmskb */
3551 case 0x1d7:
3552 if (mod != 3)
3553 goto illegal_op;
3554 if (b1) {
3555 rm = (modrm & 7) | REX_B(s);
3556 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,xmm_regs[rm]));
3557 gen_helper_pmovmskb_xmm(cpu_tmp2_i32, cpu_ptr0);
3558 } else {
3559 rm = (modrm & 7);
3560 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, offsetof(CPUX86State,fpregs[rm].mmx));
3561 gen_helper_pmovmskb_mmx(cpu_tmp2_i32, cpu_ptr0);
3563 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3564 reg = ((modrm >> 3) & 7) | rex_r;
3565 gen_op_mov_reg_T0(OT_LONG, reg);
3566 break;
3567 case 0x138:
3568 if (s->prefix & PREFIX_REPNZ)
3569 goto crc32;
3570 case 0x038:
3571 b = modrm;
3572 modrm = ldub_code(s->pc++);
3573 rm = modrm & 7;
3574 reg = ((modrm >> 3) & 7) | rex_r;
3575 mod = (modrm >> 6) & 3;
3577 sse_op2 = sse_op_table6[b].op[b1];
3578 if (!sse_op2)
3579 goto illegal_op;
3580 if (!(s->cpuid_ext_features & sse_op_table6[b].ext_mask))
3581 goto illegal_op;
3583 if (b1) {
3584 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3585 if (mod == 3) {
3586 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3587 } else {
3588 op2_offset = offsetof(CPUX86State,xmm_t0);
3589 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3590 switch (b) {
3591 case 0x20: case 0x30: /* pmovsxbw, pmovzxbw */
3592 case 0x23: case 0x33: /* pmovsxwd, pmovzxwd */
3593 case 0x25: case 0x35: /* pmovsxdq, pmovzxdq */
3594 gen_ldq_env_A0(s->mem_index, op2_offset +
3595 offsetof(XMMReg, XMM_Q(0)));
3596 break;
3597 case 0x21: case 0x31: /* pmovsxbd, pmovzxbd */
3598 case 0x24: case 0x34: /* pmovsxwq, pmovzxwq */
3599 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3600 (s->mem_index >> 2) - 1);
3601 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3602 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, op2_offset +
3603 offsetof(XMMReg, XMM_L(0)));
3604 break;
3605 case 0x22: case 0x32: /* pmovsxbq, pmovzxbq */
3606 tcg_gen_qemu_ld16u(cpu_tmp0, cpu_A0,
3607 (s->mem_index >> 2) - 1);
3608 tcg_gen_st16_tl(cpu_tmp0, cpu_env, op2_offset +
3609 offsetof(XMMReg, XMM_W(0)));
3610 break;
3611 case 0x2a: /* movntqda */
3612 gen_ldo_env_A0(s->mem_index, op1_offset);
3613 return;
3614 default:
3615 gen_ldo_env_A0(s->mem_index, op2_offset);
3618 } else {
3619 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3620 if (mod == 3) {
3621 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3622 } else {
3623 op2_offset = offsetof(CPUX86State,mmx_t0);
3624 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3625 gen_ldq_env_A0(s->mem_index, op2_offset);
3628 if (sse_op2 == SSE_SPECIAL)
3629 goto illegal_op;
3631 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3632 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3633 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3635 if (b == 0x17)
3636 s->cc_op = CC_OP_EFLAGS;
3637 break;
3638 case 0x338: /* crc32 */
3639 crc32:
3640 b = modrm;
3641 modrm = ldub_code(s->pc++);
3642 reg = ((modrm >> 3) & 7) | rex_r;
3644 if (b != 0xf0 && b != 0xf1)
3645 goto illegal_op;
3646 if (!(s->cpuid_ext_features & CPUID_EXT_SSE42))
3647 goto illegal_op;
3649 if (b == 0xf0)
3650 ot = OT_BYTE;
3651 else if (b == 0xf1 && s->dflag != 2)
3652 if (s->prefix & PREFIX_DATA)
3653 ot = OT_WORD;
3654 else
3655 ot = OT_LONG;
3656 else
3657 ot = OT_QUAD;
3659 gen_op_mov_TN_reg(OT_LONG, 0, reg);
3660 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
3661 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
3662 gen_helper_crc32(cpu_T[0], cpu_tmp2_i32,
3663 cpu_T[0], tcg_const_i32(8 << ot));
3665 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3666 gen_op_mov_reg_T0(ot, reg);
3667 break;
3668 case 0x03a:
3669 case 0x13a:
3670 b = modrm;
3671 modrm = ldub_code(s->pc++);
3672 rm = modrm & 7;
3673 reg = ((modrm >> 3) & 7) | rex_r;
3674 mod = (modrm >> 6) & 3;
3676 sse_op2 = sse_op_table7[b].op[b1];
3677 if (!sse_op2)
3678 goto illegal_op;
3679 if (!(s->cpuid_ext_features & sse_op_table7[b].ext_mask))
3680 goto illegal_op;
3682 if (sse_op2 == SSE_SPECIAL) {
3683 ot = (s->dflag == 2) ? OT_QUAD : OT_LONG;
3684 rm = (modrm & 7) | REX_B(s);
3685 if (mod != 3)
3686 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3687 reg = ((modrm >> 3) & 7) | rex_r;
3688 val = ldub_code(s->pc++);
3689 switch (b) {
3690 case 0x14: /* pextrb */
3691 tcg_gen_ld8u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3692 xmm_regs[reg].XMM_B(val & 15)));
3693 if (mod == 3)
3694 gen_op_mov_reg_T0(ot, rm);
3695 else
3696 tcg_gen_qemu_st8(cpu_T[0], cpu_A0,
3697 (s->mem_index >> 2) - 1);
3698 break;
3699 case 0x15: /* pextrw */
3700 tcg_gen_ld16u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3701 xmm_regs[reg].XMM_W(val & 7)));
3702 if (mod == 3)
3703 gen_op_mov_reg_T0(ot, rm);
3704 else
3705 tcg_gen_qemu_st16(cpu_T[0], cpu_A0,
3706 (s->mem_index >> 2) - 1);
3707 break;
3708 case 0x16:
3709 if (ot == OT_LONG) { /* pextrd */
3710 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3711 offsetof(CPUX86State,
3712 xmm_regs[reg].XMM_L(val & 3)));
3713 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
3714 if (mod == 3)
3715 gen_op_mov_reg_v(ot, rm, cpu_T[0]);
3716 else
3717 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3718 (s->mem_index >> 2) - 1);
3719 } else { /* pextrq */
3720 #ifdef TARGET_X86_64
3721 tcg_gen_ld_i64(cpu_tmp1_i64, cpu_env,
3722 offsetof(CPUX86State,
3723 xmm_regs[reg].XMM_Q(val & 1)));
3724 if (mod == 3)
3725 gen_op_mov_reg_v(ot, rm, cpu_tmp1_i64);
3726 else
3727 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
3728 (s->mem_index >> 2) - 1);
3729 #else
3730 goto illegal_op;
3731 #endif
3733 break;
3734 case 0x17: /* extractps */
3735 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,
3736 xmm_regs[reg].XMM_L(val & 3)));
3737 if (mod == 3)
3738 gen_op_mov_reg_T0(ot, rm);
3739 else
3740 tcg_gen_qemu_st32(cpu_T[0], cpu_A0,
3741 (s->mem_index >> 2) - 1);
3742 break;
3743 case 0x20: /* pinsrb */
3744 if (mod == 3)
3745 gen_op_mov_TN_reg(OT_LONG, 0, rm);
3746 else
3747 tcg_gen_qemu_ld8u(cpu_tmp0, cpu_A0,
3748 (s->mem_index >> 2) - 1);
3749 tcg_gen_st8_tl(cpu_tmp0, cpu_env, offsetof(CPUX86State,
3750 xmm_regs[reg].XMM_B(val & 15)));
3751 break;
3752 case 0x21: /* insertps */
3753 if (mod == 3) {
3754 tcg_gen_ld_i32(cpu_tmp2_i32, cpu_env,
3755 offsetof(CPUX86State,xmm_regs[rm]
3756 .XMM_L((val >> 6) & 3)));
3757 } else {
3758 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3759 (s->mem_index >> 2) - 1);
3760 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3762 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3763 offsetof(CPUX86State,xmm_regs[reg]
3764 .XMM_L((val >> 4) & 3)));
3765 if ((val >> 0) & 1)
3766 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3767 cpu_env, offsetof(CPUX86State,
3768 xmm_regs[reg].XMM_L(0)));
3769 if ((val >> 1) & 1)
3770 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3771 cpu_env, offsetof(CPUX86State,
3772 xmm_regs[reg].XMM_L(1)));
3773 if ((val >> 2) & 1)
3774 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3775 cpu_env, offsetof(CPUX86State,
3776 xmm_regs[reg].XMM_L(2)));
3777 if ((val >> 3) & 1)
3778 tcg_gen_st_i32(tcg_const_i32(0 /*float32_zero*/),
3779 cpu_env, offsetof(CPUX86State,
3780 xmm_regs[reg].XMM_L(3)));
3781 break;
3782 case 0x22:
3783 if (ot == OT_LONG) { /* pinsrd */
3784 if (mod == 3)
3785 gen_op_mov_v_reg(ot, cpu_tmp0, rm);
3786 else
3787 tcg_gen_qemu_ld32u(cpu_tmp0, cpu_A0,
3788 (s->mem_index >> 2) - 1);
3789 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_tmp0);
3790 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env,
3791 offsetof(CPUX86State,
3792 xmm_regs[reg].XMM_L(val & 3)));
3793 } else { /* pinsrq */
3794 #ifdef TARGET_X86_64
3795 if (mod == 3)
3796 gen_op_mov_v_reg(ot, cpu_tmp1_i64, rm);
3797 else
3798 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
3799 (s->mem_index >> 2) - 1);
3800 tcg_gen_st_i64(cpu_tmp1_i64, cpu_env,
3801 offsetof(CPUX86State,
3802 xmm_regs[reg].XMM_Q(val & 1)));
3803 #else
3804 goto illegal_op;
3805 #endif
3807 break;
3809 return;
3812 if (b1) {
3813 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3814 if (mod == 3) {
3815 op2_offset = offsetof(CPUX86State,xmm_regs[rm | REX_B(s)]);
3816 } else {
3817 op2_offset = offsetof(CPUX86State,xmm_t0);
3818 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3819 gen_ldo_env_A0(s->mem_index, op2_offset);
3821 } else {
3822 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3823 if (mod == 3) {
3824 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3825 } else {
3826 op2_offset = offsetof(CPUX86State,mmx_t0);
3827 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3828 gen_ldq_env_A0(s->mem_index, op2_offset);
3831 val = ldub_code(s->pc++);
3833 if ((b & 0xfc) == 0x60) { /* pcmpXstrX */
3834 s->cc_op = CC_OP_EFLAGS;
3836 if (s->dflag == 2)
3837 /* The helper must use entire 64-bit gp registers */
3838 val |= 1 << 8;
3841 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3842 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3843 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3844 break;
3845 default:
3846 goto illegal_op;
3848 } else {
3849 /* generic MMX or SSE operation */
3850 switch(b) {
3851 case 0x70: /* pshufx insn */
3852 case 0xc6: /* pshufx insn */
3853 case 0xc2: /* compare insns */
3854 s->rip_offset = 1;
3855 break;
3856 default:
3857 break;
3859 if (is_xmm) {
3860 op1_offset = offsetof(CPUX86State,xmm_regs[reg]);
3861 if (mod != 3) {
3862 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3863 op2_offset = offsetof(CPUX86State,xmm_t0);
3864 if (b1 >= 2 && ((b >= 0x50 && b <= 0x5f && b != 0x5b) ||
3865 b == 0xc2)) {
3866 /* specific case for SSE single instructions */
3867 if (b1 == 2) {
3868 /* 32 bit access */
3869 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
3870 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,xmm_t0.XMM_L(0)));
3871 } else {
3872 /* 64 bit access */
3873 gen_ldq_env_A0(s->mem_index, offsetof(CPUX86State,xmm_t0.XMM_D(0)));
3875 } else {
3876 gen_ldo_env_A0(s->mem_index, op2_offset);
3878 } else {
3879 rm = (modrm & 7) | REX_B(s);
3880 op2_offset = offsetof(CPUX86State,xmm_regs[rm]);
3882 } else {
3883 op1_offset = offsetof(CPUX86State,fpregs[reg].mmx);
3884 if (mod != 3) {
3885 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
3886 op2_offset = offsetof(CPUX86State,mmx_t0);
3887 gen_ldq_env_A0(s->mem_index, op2_offset);
3888 } else {
3889 rm = (modrm & 7);
3890 op2_offset = offsetof(CPUX86State,fpregs[rm].mmx);
3893 switch(b) {
3894 case 0x0f: /* 3DNow! data insns */
3895 if (!(s->cpuid_ext2_features & CPUID_EXT2_3DNOW))
3896 goto illegal_op;
3897 val = ldub_code(s->pc++);
3898 sse_op2 = sse_op_table5[val];
3899 if (!sse_op2)
3900 goto illegal_op;
3901 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3902 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3903 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3904 break;
3905 case 0x70: /* pshufx insn */
3906 case 0xc6: /* pshufx insn */
3907 val = ldub_code(s->pc++);
3908 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3909 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3910 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv_i32))sse_op2)(cpu_ptr0, cpu_ptr1, tcg_const_i32(val));
3911 break;
3912 case 0xc2:
3913 /* compare insns */
3914 val = ldub_code(s->pc++);
3915 if (val >= 8)
3916 goto illegal_op;
3917 sse_op2 = sse_op_table4[val][b1];
3918 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3919 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3920 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3921 break;
3922 case 0xf7:
3923 /* maskmov : we must prepare A0 */
3924 if (mod != 3)
3925 goto illegal_op;
3926 #ifdef TARGET_X86_64
3927 if (s->aflag == 2) {
3928 gen_op_movq_A0_reg(R_EDI);
3929 } else
3930 #endif
3932 gen_op_movl_A0_reg(R_EDI);
3933 if (s->aflag == 0)
3934 gen_op_andl_A0_ffff();
3936 gen_add_A0_ds_seg(s);
3938 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3939 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3940 ((void (*)(TCGv_ptr, TCGv_ptr, TCGv))sse_op2)(cpu_ptr0, cpu_ptr1, cpu_A0);
3941 break;
3942 default:
3943 tcg_gen_addi_ptr(cpu_ptr0, cpu_env, op1_offset);
3944 tcg_gen_addi_ptr(cpu_ptr1, cpu_env, op2_offset);
3945 ((void (*)(TCGv_ptr, TCGv_ptr))sse_op2)(cpu_ptr0, cpu_ptr1);
3946 break;
3948 if (b == 0x2e || b == 0x2f) {
3949 s->cc_op = CC_OP_EFLAGS;
3954 /* convert one instruction. s->is_jmp is set if the translation must
3955 be stopped. Return the next pc value */
3956 static target_ulong disas_insn(DisasContext *s, target_ulong pc_start)
3958 int b, prefixes, aflag, dflag;
3959 int shift, ot;
3960 int modrm, reg, rm, mod, reg_addr, op, opreg, offset_addr, val;
3961 target_ulong next_eip, tval;
3962 int rex_w, rex_r;
3964 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP)))
3965 tcg_gen_debug_insn_start(pc_start);
3966 s->pc = pc_start;
3967 prefixes = 0;
3968 aflag = s->code32;
3969 dflag = s->code32;
3970 s->override = -1;
3971 rex_w = -1;
3972 rex_r = 0;
3973 #ifdef TARGET_X86_64
3974 s->rex_x = 0;
3975 s->rex_b = 0;
3976 x86_64_hregs = 0;
3977 #endif
3978 s->rip_offset = 0; /* for relative ip address */
3979 next_byte:
3980 b = ldub_code(s->pc);
3981 s->pc++;
3982 /* check prefixes */
3983 #ifdef TARGET_X86_64
3984 if (CODE64(s)) {
3985 switch (b) {
3986 case 0xf3:
3987 prefixes |= PREFIX_REPZ;
3988 goto next_byte;
3989 case 0xf2:
3990 prefixes |= PREFIX_REPNZ;
3991 goto next_byte;
3992 case 0xf0:
3993 prefixes |= PREFIX_LOCK;
3994 goto next_byte;
3995 case 0x2e:
3996 s->override = R_CS;
3997 goto next_byte;
3998 case 0x36:
3999 s->override = R_SS;
4000 goto next_byte;
4001 case 0x3e:
4002 s->override = R_DS;
4003 goto next_byte;
4004 case 0x26:
4005 s->override = R_ES;
4006 goto next_byte;
4007 case 0x64:
4008 s->override = R_FS;
4009 goto next_byte;
4010 case 0x65:
4011 s->override = R_GS;
4012 goto next_byte;
4013 case 0x66:
4014 prefixes |= PREFIX_DATA;
4015 goto next_byte;
4016 case 0x67:
4017 prefixes |= PREFIX_ADR;
4018 goto next_byte;
4019 case 0x40 ... 0x4f:
4020 /* REX prefix */
4021 rex_w = (b >> 3) & 1;
4022 rex_r = (b & 0x4) << 1;
4023 s->rex_x = (b & 0x2) << 2;
4024 REX_B(s) = (b & 0x1) << 3;
4025 x86_64_hregs = 1; /* select uniform byte register addressing */
4026 goto next_byte;
4028 if (rex_w == 1) {
4029 /* 0x66 is ignored if rex.w is set */
4030 dflag = 2;
4031 } else {
4032 if (prefixes & PREFIX_DATA)
4033 dflag ^= 1;
4035 if (!(prefixes & PREFIX_ADR))
4036 aflag = 2;
4037 } else
4038 #endif
4040 switch (b) {
4041 case 0xf3:
4042 prefixes |= PREFIX_REPZ;
4043 goto next_byte;
4044 case 0xf2:
4045 prefixes |= PREFIX_REPNZ;
4046 goto next_byte;
4047 case 0xf0:
4048 prefixes |= PREFIX_LOCK;
4049 goto next_byte;
4050 case 0x2e:
4051 s->override = R_CS;
4052 goto next_byte;
4053 case 0x36:
4054 s->override = R_SS;
4055 goto next_byte;
4056 case 0x3e:
4057 s->override = R_DS;
4058 goto next_byte;
4059 case 0x26:
4060 s->override = R_ES;
4061 goto next_byte;
4062 case 0x64:
4063 s->override = R_FS;
4064 goto next_byte;
4065 case 0x65:
4066 s->override = R_GS;
4067 goto next_byte;
4068 case 0x66:
4069 prefixes |= PREFIX_DATA;
4070 goto next_byte;
4071 case 0x67:
4072 prefixes |= PREFIX_ADR;
4073 goto next_byte;
4075 if (prefixes & PREFIX_DATA)
4076 dflag ^= 1;
4077 if (prefixes & PREFIX_ADR)
4078 aflag ^= 1;
4081 s->prefix = prefixes;
4082 s->aflag = aflag;
4083 s->dflag = dflag;
4085 /* lock generation */
4086 if (prefixes & PREFIX_LOCK)
4087 gen_helper_lock();
4089 /* now check op code */
4090 reswitch:
4091 switch(b) {
4092 case 0x0f:
4093 /**************************/
4094 /* extended op code */
4095 b = ldub_code(s->pc++) | 0x100;
4096 goto reswitch;
4098 /**************************/
4099 /* arith & logic */
4100 case 0x00 ... 0x05:
4101 case 0x08 ... 0x0d:
4102 case 0x10 ... 0x15:
4103 case 0x18 ... 0x1d:
4104 case 0x20 ... 0x25:
4105 case 0x28 ... 0x2d:
4106 case 0x30 ... 0x35:
4107 case 0x38 ... 0x3d:
4109 int op, f, val;
4110 op = (b >> 3) & 7;
4111 f = (b >> 1) & 3;
4113 if ((b & 1) == 0)
4114 ot = OT_BYTE;
4115 else
4116 ot = dflag + OT_WORD;
4118 switch(f) {
4119 case 0: /* OP Ev, Gv */
4120 modrm = ldub_code(s->pc++);
4121 reg = ((modrm >> 3) & 7) | rex_r;
4122 mod = (modrm >> 6) & 3;
4123 rm = (modrm & 7) | REX_B(s);
4124 if (mod != 3) {
4125 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4126 opreg = OR_TMP0;
4127 } else if (op == OP_XORL && rm == reg) {
4128 xor_zero:
4129 /* xor reg, reg optimisation */
4130 gen_op_movl_T0_0();
4131 s->cc_op = CC_OP_LOGICB + ot;
4132 gen_op_mov_reg_T0(ot, reg);
4133 gen_op_update1_cc();
4134 break;
4135 } else {
4136 opreg = rm;
4138 gen_op_mov_TN_reg(ot, 1, reg);
4139 gen_op(s, op, ot, opreg);
4140 break;
4141 case 1: /* OP Gv, Ev */
4142 modrm = ldub_code(s->pc++);
4143 mod = (modrm >> 6) & 3;
4144 reg = ((modrm >> 3) & 7) | rex_r;
4145 rm = (modrm & 7) | REX_B(s);
4146 if (mod != 3) {
4147 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4148 gen_op_ld_T1_A0(ot + s->mem_index);
4149 } else if (op == OP_XORL && rm == reg) {
4150 goto xor_zero;
4151 } else {
4152 gen_op_mov_TN_reg(ot, 1, rm);
4154 gen_op(s, op, ot, reg);
4155 break;
4156 case 2: /* OP A, Iv */
4157 val = insn_get(s, ot);
4158 gen_op_movl_T1_im(val);
4159 gen_op(s, op, ot, OR_EAX);
4160 break;
4163 break;
4165 case 0x82:
4166 if (CODE64(s))
4167 goto illegal_op;
4168 case 0x80: /* GRP1 */
4169 case 0x81:
4170 case 0x83:
4172 int val;
4174 if ((b & 1) == 0)
4175 ot = OT_BYTE;
4176 else
4177 ot = dflag + OT_WORD;
4179 modrm = ldub_code(s->pc++);
4180 mod = (modrm >> 6) & 3;
4181 rm = (modrm & 7) | REX_B(s);
4182 op = (modrm >> 3) & 7;
4184 if (mod != 3) {
4185 if (b == 0x83)
4186 s->rip_offset = 1;
4187 else
4188 s->rip_offset = insn_const_size(ot);
4189 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4190 opreg = OR_TMP0;
4191 } else {
4192 opreg = rm;
4195 switch(b) {
4196 default:
4197 case 0x80:
4198 case 0x81:
4199 case 0x82:
4200 val = insn_get(s, ot);
4201 break;
4202 case 0x83:
4203 val = (int8_t)insn_get(s, OT_BYTE);
4204 break;
4206 gen_op_movl_T1_im(val);
4207 gen_op(s, op, ot, opreg);
4209 break;
4211 /**************************/
4212 /* inc, dec, and other misc arith */
4213 case 0x40 ... 0x47: /* inc Gv */
4214 ot = dflag ? OT_LONG : OT_WORD;
4215 gen_inc(s, ot, OR_EAX + (b & 7), 1);
4216 break;
4217 case 0x48 ... 0x4f: /* dec Gv */
4218 ot = dflag ? OT_LONG : OT_WORD;
4219 gen_inc(s, ot, OR_EAX + (b & 7), -1);
4220 break;
4221 case 0xf6: /* GRP3 */
4222 case 0xf7:
4223 if ((b & 1) == 0)
4224 ot = OT_BYTE;
4225 else
4226 ot = dflag + OT_WORD;
4228 modrm = ldub_code(s->pc++);
4229 mod = (modrm >> 6) & 3;
4230 rm = (modrm & 7) | REX_B(s);
4231 op = (modrm >> 3) & 7;
4232 if (mod != 3) {
4233 if (op == 0)
4234 s->rip_offset = insn_const_size(ot);
4235 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4236 gen_op_ld_T0_A0(ot + s->mem_index);
4237 } else {
4238 gen_op_mov_TN_reg(ot, 0, rm);
4241 switch(op) {
4242 case 0: /* test */
4243 val = insn_get(s, ot);
4244 gen_op_movl_T1_im(val);
4245 gen_op_testl_T0_T1_cc();
4246 s->cc_op = CC_OP_LOGICB + ot;
4247 break;
4248 case 2: /* not */
4249 tcg_gen_not_tl(cpu_T[0], cpu_T[0]);
4250 if (mod != 3) {
4251 gen_op_st_T0_A0(ot + s->mem_index);
4252 } else {
4253 gen_op_mov_reg_T0(ot, rm);
4255 break;
4256 case 3: /* neg */
4257 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
4258 if (mod != 3) {
4259 gen_op_st_T0_A0(ot + s->mem_index);
4260 } else {
4261 gen_op_mov_reg_T0(ot, rm);
4263 gen_op_update_neg_cc();
4264 s->cc_op = CC_OP_SUBB + ot;
4265 break;
4266 case 4: /* mul */
4267 switch(ot) {
4268 case OT_BYTE:
4269 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4270 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
4271 tcg_gen_ext8u_tl(cpu_T[1], cpu_T[1]);
4272 /* XXX: use 32 bit mul which could be faster */
4273 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4274 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4275 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4276 tcg_gen_andi_tl(cpu_cc_src, cpu_T[0], 0xff00);
4277 s->cc_op = CC_OP_MULB;
4278 break;
4279 case OT_WORD:
4280 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4281 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
4282 tcg_gen_ext16u_tl(cpu_T[1], cpu_T[1]);
4283 /* XXX: use 32 bit mul which could be faster */
4284 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4285 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4286 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4287 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4288 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4289 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4290 s->cc_op = CC_OP_MULW;
4291 break;
4292 default:
4293 case OT_LONG:
4294 #ifdef TARGET_X86_64
4295 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4296 tcg_gen_ext32u_tl(cpu_T[0], cpu_T[0]);
4297 tcg_gen_ext32u_tl(cpu_T[1], cpu_T[1]);
4298 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4299 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4300 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4301 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4302 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4303 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4304 #else
4306 TCGv_i64 t0, t1;
4307 t0 = tcg_temp_new_i64();
4308 t1 = tcg_temp_new_i64();
4309 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4310 tcg_gen_extu_i32_i64(t0, cpu_T[0]);
4311 tcg_gen_extu_i32_i64(t1, cpu_T[1]);
4312 tcg_gen_mul_i64(t0, t0, t1);
4313 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4314 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4315 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4316 tcg_gen_shri_i64(t0, t0, 32);
4317 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4318 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4319 tcg_gen_mov_tl(cpu_cc_src, cpu_T[0]);
4321 #endif
4322 s->cc_op = CC_OP_MULL;
4323 break;
4324 #ifdef TARGET_X86_64
4325 case OT_QUAD:
4326 gen_helper_mulq_EAX_T0(cpu_T[0]);
4327 s->cc_op = CC_OP_MULQ;
4328 break;
4329 #endif
4331 break;
4332 case 5: /* imul */
4333 switch(ot) {
4334 case OT_BYTE:
4335 gen_op_mov_TN_reg(OT_BYTE, 1, R_EAX);
4336 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4337 tcg_gen_ext8s_tl(cpu_T[1], cpu_T[1]);
4338 /* XXX: use 32 bit mul which could be faster */
4339 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4340 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4341 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4342 tcg_gen_ext8s_tl(cpu_tmp0, cpu_T[0]);
4343 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4344 s->cc_op = CC_OP_MULB;
4345 break;
4346 case OT_WORD:
4347 gen_op_mov_TN_reg(OT_WORD, 1, R_EAX);
4348 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4349 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4350 /* XXX: use 32 bit mul which could be faster */
4351 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4352 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4353 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4354 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4355 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4356 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 16);
4357 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4358 s->cc_op = CC_OP_MULW;
4359 break;
4360 default:
4361 case OT_LONG:
4362 #ifdef TARGET_X86_64
4363 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4364 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4365 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4366 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4367 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4368 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4369 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4370 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4371 tcg_gen_shri_tl(cpu_T[0], cpu_T[0], 32);
4372 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4373 #else
4375 TCGv_i64 t0, t1;
4376 t0 = tcg_temp_new_i64();
4377 t1 = tcg_temp_new_i64();
4378 gen_op_mov_TN_reg(OT_LONG, 1, R_EAX);
4379 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4380 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4381 tcg_gen_mul_i64(t0, t0, t1);
4382 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4383 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4384 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4385 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4386 tcg_gen_shri_i64(t0, t0, 32);
4387 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4388 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4389 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4391 #endif
4392 s->cc_op = CC_OP_MULL;
4393 break;
4394 #ifdef TARGET_X86_64
4395 case OT_QUAD:
4396 gen_helper_imulq_EAX_T0(cpu_T[0]);
4397 s->cc_op = CC_OP_MULQ;
4398 break;
4399 #endif
4401 break;
4402 case 6: /* div */
4403 switch(ot) {
4404 case OT_BYTE:
4405 gen_jmp_im(pc_start - s->cs_base);
4406 gen_helper_divb_AL(cpu_T[0]);
4407 break;
4408 case OT_WORD:
4409 gen_jmp_im(pc_start - s->cs_base);
4410 gen_helper_divw_AX(cpu_T[0]);
4411 break;
4412 default:
4413 case OT_LONG:
4414 gen_jmp_im(pc_start - s->cs_base);
4415 gen_helper_divl_EAX(cpu_T[0]);
4416 break;
4417 #ifdef TARGET_X86_64
4418 case OT_QUAD:
4419 gen_jmp_im(pc_start - s->cs_base);
4420 gen_helper_divq_EAX(cpu_T[0]);
4421 break;
4422 #endif
4424 break;
4425 case 7: /* idiv */
4426 switch(ot) {
4427 case OT_BYTE:
4428 gen_jmp_im(pc_start - s->cs_base);
4429 gen_helper_idivb_AL(cpu_T[0]);
4430 break;
4431 case OT_WORD:
4432 gen_jmp_im(pc_start - s->cs_base);
4433 gen_helper_idivw_AX(cpu_T[0]);
4434 break;
4435 default:
4436 case OT_LONG:
4437 gen_jmp_im(pc_start - s->cs_base);
4438 gen_helper_idivl_EAX(cpu_T[0]);
4439 break;
4440 #ifdef TARGET_X86_64
4441 case OT_QUAD:
4442 gen_jmp_im(pc_start - s->cs_base);
4443 gen_helper_idivq_EAX(cpu_T[0]);
4444 break;
4445 #endif
4447 break;
4448 default:
4449 goto illegal_op;
4451 break;
4453 case 0xfe: /* GRP4 */
4454 case 0xff: /* GRP5 */
4455 if ((b & 1) == 0)
4456 ot = OT_BYTE;
4457 else
4458 ot = dflag + OT_WORD;
4460 modrm = ldub_code(s->pc++);
4461 mod = (modrm >> 6) & 3;
4462 rm = (modrm & 7) | REX_B(s);
4463 op = (modrm >> 3) & 7;
4464 if (op >= 2 && b == 0xfe) {
4465 goto illegal_op;
4467 if (CODE64(s)) {
4468 if (op == 2 || op == 4) {
4469 /* operand size for jumps is 64 bit */
4470 ot = OT_QUAD;
4471 } else if (op == 3 || op == 5) {
4472 /* for call calls, the operand is 16 or 32 bit, even
4473 in long mode */
4474 ot = dflag ? OT_LONG : OT_WORD;
4475 } else if (op == 6) {
4476 /* default push size is 64 bit */
4477 ot = dflag ? OT_QUAD : OT_WORD;
4480 if (mod != 3) {
4481 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4482 if (op >= 2 && op != 3 && op != 5)
4483 gen_op_ld_T0_A0(ot + s->mem_index);
4484 } else {
4485 gen_op_mov_TN_reg(ot, 0, rm);
4488 switch(op) {
4489 case 0: /* inc Ev */
4490 if (mod != 3)
4491 opreg = OR_TMP0;
4492 else
4493 opreg = rm;
4494 gen_inc(s, ot, opreg, 1);
4495 break;
4496 case 1: /* dec Ev */
4497 if (mod != 3)
4498 opreg = OR_TMP0;
4499 else
4500 opreg = rm;
4501 gen_inc(s, ot, opreg, -1);
4502 break;
4503 case 2: /* call Ev */
4504 /* XXX: optimize if memory (no 'and' is necessary) */
4505 if (s->dflag == 0)
4506 gen_op_andl_T0_ffff();
4507 next_eip = s->pc - s->cs_base;
4508 gen_movtl_T1_im(next_eip);
4509 gen_push_T1(s);
4510 gen_op_jmp_T0();
4511 gen_eob(s);
4512 break;
4513 case 3: /* lcall Ev */
4514 gen_op_ld_T1_A0(ot + s->mem_index);
4515 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4516 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4517 do_lcall:
4518 if (s->pe && !s->vm86) {
4519 if (s->cc_op != CC_OP_DYNAMIC)
4520 gen_op_set_cc_op(s->cc_op);
4521 gen_jmp_im(pc_start - s->cs_base);
4522 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4523 gen_helper_lcall_protected(cpu_tmp2_i32, cpu_T[1],
4524 tcg_const_i32(dflag),
4525 tcg_const_i32(s->pc - pc_start));
4526 } else {
4527 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4528 gen_helper_lcall_real(cpu_tmp2_i32, cpu_T[1],
4529 tcg_const_i32(dflag),
4530 tcg_const_i32(s->pc - s->cs_base));
4532 gen_eob(s);
4533 break;
4534 case 4: /* jmp Ev */
4535 if (s->dflag == 0)
4536 gen_op_andl_T0_ffff();
4537 gen_op_jmp_T0();
4538 gen_eob(s);
4539 break;
4540 case 5: /* ljmp Ev */
4541 gen_op_ld_T1_A0(ot + s->mem_index);
4542 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
4543 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
4544 do_ljmp:
4545 if (s->pe && !s->vm86) {
4546 if (s->cc_op != CC_OP_DYNAMIC)
4547 gen_op_set_cc_op(s->cc_op);
4548 gen_jmp_im(pc_start - s->cs_base);
4549 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
4550 gen_helper_ljmp_protected(cpu_tmp2_i32, cpu_T[1],
4551 tcg_const_i32(s->pc - pc_start));
4552 } else {
4553 gen_op_movl_seg_T0_vm(R_CS);
4554 gen_op_movl_T0_T1();
4555 gen_op_jmp_T0();
4557 gen_eob(s);
4558 break;
4559 case 6: /* push Ev */
4560 gen_push_T0(s);
4561 break;
4562 default:
4563 goto illegal_op;
4565 break;
4567 case 0x84: /* test Ev, Gv */
4568 case 0x85:
4569 if ((b & 1) == 0)
4570 ot = OT_BYTE;
4571 else
4572 ot = dflag + OT_WORD;
4574 modrm = ldub_code(s->pc++);
4575 mod = (modrm >> 6) & 3;
4576 rm = (modrm & 7) | REX_B(s);
4577 reg = ((modrm >> 3) & 7) | rex_r;
4579 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4580 gen_op_mov_TN_reg(ot, 1, reg);
4581 gen_op_testl_T0_T1_cc();
4582 s->cc_op = CC_OP_LOGICB + ot;
4583 break;
4585 case 0xa8: /* test eAX, Iv */
4586 case 0xa9:
4587 if ((b & 1) == 0)
4588 ot = OT_BYTE;
4589 else
4590 ot = dflag + OT_WORD;
4591 val = insn_get(s, ot);
4593 gen_op_mov_TN_reg(ot, 0, OR_EAX);
4594 gen_op_movl_T1_im(val);
4595 gen_op_testl_T0_T1_cc();
4596 s->cc_op = CC_OP_LOGICB + ot;
4597 break;
4599 case 0x98: /* CWDE/CBW */
4600 #ifdef TARGET_X86_64
4601 if (dflag == 2) {
4602 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4603 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4604 gen_op_mov_reg_T0(OT_QUAD, R_EAX);
4605 } else
4606 #endif
4607 if (dflag == 1) {
4608 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4609 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4610 gen_op_mov_reg_T0(OT_LONG, R_EAX);
4611 } else {
4612 gen_op_mov_TN_reg(OT_BYTE, 0, R_EAX);
4613 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
4614 gen_op_mov_reg_T0(OT_WORD, R_EAX);
4616 break;
4617 case 0x99: /* CDQ/CWD */
4618 #ifdef TARGET_X86_64
4619 if (dflag == 2) {
4620 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
4621 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 63);
4622 gen_op_mov_reg_T0(OT_QUAD, R_EDX);
4623 } else
4624 #endif
4625 if (dflag == 1) {
4626 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
4627 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4628 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 31);
4629 gen_op_mov_reg_T0(OT_LONG, R_EDX);
4630 } else {
4631 gen_op_mov_TN_reg(OT_WORD, 0, R_EAX);
4632 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4633 tcg_gen_sari_tl(cpu_T[0], cpu_T[0], 15);
4634 gen_op_mov_reg_T0(OT_WORD, R_EDX);
4636 break;
4637 case 0x1af: /* imul Gv, Ev */
4638 case 0x69: /* imul Gv, Ev, I */
4639 case 0x6b:
4640 ot = dflag + OT_WORD;
4641 modrm = ldub_code(s->pc++);
4642 reg = ((modrm >> 3) & 7) | rex_r;
4643 if (b == 0x69)
4644 s->rip_offset = insn_const_size(ot);
4645 else if (b == 0x6b)
4646 s->rip_offset = 1;
4647 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4648 if (b == 0x69) {
4649 val = insn_get(s, ot);
4650 gen_op_movl_T1_im(val);
4651 } else if (b == 0x6b) {
4652 val = (int8_t)insn_get(s, OT_BYTE);
4653 gen_op_movl_T1_im(val);
4654 } else {
4655 gen_op_mov_TN_reg(ot, 1, reg);
4658 #ifdef TARGET_X86_64
4659 if (ot == OT_QUAD) {
4660 gen_helper_imulq_T0_T1(cpu_T[0], cpu_T[0], cpu_T[1]);
4661 } else
4662 #endif
4663 if (ot == OT_LONG) {
4664 #ifdef TARGET_X86_64
4665 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
4666 tcg_gen_ext32s_tl(cpu_T[1], cpu_T[1]);
4667 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4668 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4669 tcg_gen_ext32s_tl(cpu_tmp0, cpu_T[0]);
4670 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4671 #else
4673 TCGv_i64 t0, t1;
4674 t0 = tcg_temp_new_i64();
4675 t1 = tcg_temp_new_i64();
4676 tcg_gen_ext_i32_i64(t0, cpu_T[0]);
4677 tcg_gen_ext_i32_i64(t1, cpu_T[1]);
4678 tcg_gen_mul_i64(t0, t0, t1);
4679 tcg_gen_trunc_i64_i32(cpu_T[0], t0);
4680 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4681 tcg_gen_sari_tl(cpu_tmp0, cpu_T[0], 31);
4682 tcg_gen_shri_i64(t0, t0, 32);
4683 tcg_gen_trunc_i64_i32(cpu_T[1], t0);
4684 tcg_gen_sub_tl(cpu_cc_src, cpu_T[1], cpu_tmp0);
4686 #endif
4687 } else {
4688 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
4689 tcg_gen_ext16s_tl(cpu_T[1], cpu_T[1]);
4690 /* XXX: use 32 bit mul which could be faster */
4691 tcg_gen_mul_tl(cpu_T[0], cpu_T[0], cpu_T[1]);
4692 tcg_gen_mov_tl(cpu_cc_dst, cpu_T[0]);
4693 tcg_gen_ext16s_tl(cpu_tmp0, cpu_T[0]);
4694 tcg_gen_sub_tl(cpu_cc_src, cpu_T[0], cpu_tmp0);
4696 gen_op_mov_reg_T0(ot, reg);
4697 s->cc_op = CC_OP_MULB + ot;
4698 break;
4699 case 0x1c0:
4700 case 0x1c1: /* xadd Ev, Gv */
4701 if ((b & 1) == 0)
4702 ot = OT_BYTE;
4703 else
4704 ot = dflag + OT_WORD;
4705 modrm = ldub_code(s->pc++);
4706 reg = ((modrm >> 3) & 7) | rex_r;
4707 mod = (modrm >> 6) & 3;
4708 if (mod == 3) {
4709 rm = (modrm & 7) | REX_B(s);
4710 gen_op_mov_TN_reg(ot, 0, reg);
4711 gen_op_mov_TN_reg(ot, 1, rm);
4712 gen_op_addl_T0_T1();
4713 gen_op_mov_reg_T1(ot, reg);
4714 gen_op_mov_reg_T0(ot, rm);
4715 } else {
4716 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4717 gen_op_mov_TN_reg(ot, 0, reg);
4718 gen_op_ld_T1_A0(ot + s->mem_index);
4719 gen_op_addl_T0_T1();
4720 gen_op_st_T0_A0(ot + s->mem_index);
4721 gen_op_mov_reg_T1(ot, reg);
4723 gen_op_update2_cc();
4724 s->cc_op = CC_OP_ADDB + ot;
4725 break;
4726 case 0x1b0:
4727 case 0x1b1: /* cmpxchg Ev, Gv */
4729 int label1, label2;
4730 TCGv t0, t1, t2, a0;
4732 if ((b & 1) == 0)
4733 ot = OT_BYTE;
4734 else
4735 ot = dflag + OT_WORD;
4736 modrm = ldub_code(s->pc++);
4737 reg = ((modrm >> 3) & 7) | rex_r;
4738 mod = (modrm >> 6) & 3;
4739 t0 = tcg_temp_local_new();
4740 t1 = tcg_temp_local_new();
4741 t2 = tcg_temp_local_new();
4742 a0 = tcg_temp_local_new();
4743 gen_op_mov_v_reg(ot, t1, reg);
4744 if (mod == 3) {
4745 rm = (modrm & 7) | REX_B(s);
4746 gen_op_mov_v_reg(ot, t0, rm);
4747 } else {
4748 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4749 tcg_gen_mov_tl(a0, cpu_A0);
4750 gen_op_ld_v(ot + s->mem_index, t0, a0);
4751 rm = 0; /* avoid warning */
4753 label1 = gen_new_label();
4754 tcg_gen_ld_tl(t2, cpu_env, offsetof(CPUState, regs[R_EAX]));
4755 tcg_gen_sub_tl(t2, t2, t0);
4756 gen_extu(ot, t2);
4757 tcg_gen_brcondi_tl(TCG_COND_EQ, t2, 0, label1);
4758 if (mod == 3) {
4759 label2 = gen_new_label();
4760 gen_op_mov_reg_v(ot, R_EAX, t0);
4761 tcg_gen_br(label2);
4762 gen_set_label(label1);
4763 gen_op_mov_reg_v(ot, rm, t1);
4764 gen_set_label(label2);
4765 } else {
4766 tcg_gen_mov_tl(t1, t0);
4767 gen_op_mov_reg_v(ot, R_EAX, t0);
4768 gen_set_label(label1);
4769 /* always store */
4770 gen_op_st_v(ot + s->mem_index, t1, a0);
4772 tcg_gen_mov_tl(cpu_cc_src, t0);
4773 tcg_gen_mov_tl(cpu_cc_dst, t2);
4774 s->cc_op = CC_OP_SUBB + ot;
4775 tcg_temp_free(t0);
4776 tcg_temp_free(t1);
4777 tcg_temp_free(t2);
4778 tcg_temp_free(a0);
4780 break;
4781 case 0x1c7: /* cmpxchg8b */
4782 modrm = ldub_code(s->pc++);
4783 mod = (modrm >> 6) & 3;
4784 if ((mod == 3) || ((modrm & 0x38) != 0x8))
4785 goto illegal_op;
4786 #ifdef TARGET_X86_64
4787 if (dflag == 2) {
4788 if (!(s->cpuid_ext_features & CPUID_EXT_CX16))
4789 goto illegal_op;
4790 gen_jmp_im(pc_start - s->cs_base);
4791 if (s->cc_op != CC_OP_DYNAMIC)
4792 gen_op_set_cc_op(s->cc_op);
4793 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4794 gen_helper_cmpxchg16b(cpu_A0);
4795 } else
4796 #endif
4798 if (!(s->cpuid_features & CPUID_CX8))
4799 goto illegal_op;
4800 gen_jmp_im(pc_start - s->cs_base);
4801 if (s->cc_op != CC_OP_DYNAMIC)
4802 gen_op_set_cc_op(s->cc_op);
4803 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4804 gen_helper_cmpxchg8b(cpu_A0);
4806 s->cc_op = CC_OP_EFLAGS;
4807 break;
4809 /**************************/
4810 /* push/pop */
4811 case 0x50 ... 0x57: /* push */
4812 gen_op_mov_TN_reg(OT_LONG, 0, (b & 7) | REX_B(s));
4813 gen_push_T0(s);
4814 break;
4815 case 0x58 ... 0x5f: /* pop */
4816 if (CODE64(s)) {
4817 ot = dflag ? OT_QUAD : OT_WORD;
4818 } else {
4819 ot = dflag + OT_WORD;
4821 gen_pop_T0(s);
4822 /* NOTE: order is important for pop %sp */
4823 gen_pop_update(s);
4824 gen_op_mov_reg_T0(ot, (b & 7) | REX_B(s));
4825 break;
4826 case 0x60: /* pusha */
4827 if (CODE64(s))
4828 goto illegal_op;
4829 gen_pusha(s);
4830 break;
4831 case 0x61: /* popa */
4832 if (CODE64(s))
4833 goto illegal_op;
4834 gen_popa(s);
4835 break;
4836 case 0x68: /* push Iv */
4837 case 0x6a:
4838 if (CODE64(s)) {
4839 ot = dflag ? OT_QUAD : OT_WORD;
4840 } else {
4841 ot = dflag + OT_WORD;
4843 if (b == 0x68)
4844 val = insn_get(s, ot);
4845 else
4846 val = (int8_t)insn_get(s, OT_BYTE);
4847 gen_op_movl_T0_im(val);
4848 gen_push_T0(s);
4849 break;
4850 case 0x8f: /* pop Ev */
4851 if (CODE64(s)) {
4852 ot = dflag ? OT_QUAD : OT_WORD;
4853 } else {
4854 ot = dflag + OT_WORD;
4856 modrm = ldub_code(s->pc++);
4857 mod = (modrm >> 6) & 3;
4858 gen_pop_T0(s);
4859 if (mod == 3) {
4860 /* NOTE: order is important for pop %sp */
4861 gen_pop_update(s);
4862 rm = (modrm & 7) | REX_B(s);
4863 gen_op_mov_reg_T0(ot, rm);
4864 } else {
4865 /* NOTE: order is important too for MMU exceptions */
4866 s->popl_esp_hack = 1 << ot;
4867 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
4868 s->popl_esp_hack = 0;
4869 gen_pop_update(s);
4871 break;
4872 case 0xc8: /* enter */
4874 int level;
4875 val = lduw_code(s->pc);
4876 s->pc += 2;
4877 level = ldub_code(s->pc++);
4878 gen_enter(s, val, level);
4880 break;
4881 case 0xc9: /* leave */
4882 /* XXX: exception not precise (ESP is updated before potential exception) */
4883 if (CODE64(s)) {
4884 gen_op_mov_TN_reg(OT_QUAD, 0, R_EBP);
4885 gen_op_mov_reg_T0(OT_QUAD, R_ESP);
4886 } else if (s->ss32) {
4887 gen_op_mov_TN_reg(OT_LONG, 0, R_EBP);
4888 gen_op_mov_reg_T0(OT_LONG, R_ESP);
4889 } else {
4890 gen_op_mov_TN_reg(OT_WORD, 0, R_EBP);
4891 gen_op_mov_reg_T0(OT_WORD, R_ESP);
4893 gen_pop_T0(s);
4894 if (CODE64(s)) {
4895 ot = dflag ? OT_QUAD : OT_WORD;
4896 } else {
4897 ot = dflag + OT_WORD;
4899 gen_op_mov_reg_T0(ot, R_EBP);
4900 gen_pop_update(s);
4901 break;
4902 case 0x06: /* push es */
4903 case 0x0e: /* push cs */
4904 case 0x16: /* push ss */
4905 case 0x1e: /* push ds */
4906 if (CODE64(s))
4907 goto illegal_op;
4908 gen_op_movl_T0_seg(b >> 3);
4909 gen_push_T0(s);
4910 break;
4911 case 0x1a0: /* push fs */
4912 case 0x1a8: /* push gs */
4913 gen_op_movl_T0_seg((b >> 3) & 7);
4914 gen_push_T0(s);
4915 break;
4916 case 0x07: /* pop es */
4917 case 0x17: /* pop ss */
4918 case 0x1f: /* pop ds */
4919 if (CODE64(s))
4920 goto illegal_op;
4921 reg = b >> 3;
4922 gen_pop_T0(s);
4923 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
4924 gen_pop_update(s);
4925 if (reg == R_SS) {
4926 /* if reg == SS, inhibit interrupts/trace. */
4927 /* If several instructions disable interrupts, only the
4928 _first_ does it */
4929 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
4930 gen_helper_set_inhibit_irq();
4931 s->tf = 0;
4933 if (s->is_jmp) {
4934 gen_jmp_im(s->pc - s->cs_base);
4935 gen_eob(s);
4937 break;
4938 case 0x1a1: /* pop fs */
4939 case 0x1a9: /* pop gs */
4940 gen_pop_T0(s);
4941 gen_movl_seg_T0(s, (b >> 3) & 7, pc_start - s->cs_base);
4942 gen_pop_update(s);
4943 if (s->is_jmp) {
4944 gen_jmp_im(s->pc - s->cs_base);
4945 gen_eob(s);
4947 break;
4949 /**************************/
4950 /* mov */
4951 case 0x88:
4952 case 0x89: /* mov Gv, Ev */
4953 if ((b & 1) == 0)
4954 ot = OT_BYTE;
4955 else
4956 ot = dflag + OT_WORD;
4957 modrm = ldub_code(s->pc++);
4958 reg = ((modrm >> 3) & 7) | rex_r;
4960 /* generate a generic store */
4961 gen_ldst_modrm(s, modrm, ot, reg, 1);
4962 break;
4963 case 0xc6:
4964 case 0xc7: /* mov Ev, Iv */
4965 if ((b & 1) == 0)
4966 ot = OT_BYTE;
4967 else
4968 ot = dflag + OT_WORD;
4969 modrm = ldub_code(s->pc++);
4970 mod = (modrm >> 6) & 3;
4971 if (mod != 3) {
4972 s->rip_offset = insn_const_size(ot);
4973 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
4975 val = insn_get(s, ot);
4976 gen_op_movl_T0_im(val);
4977 if (mod != 3)
4978 gen_op_st_T0_A0(ot + s->mem_index);
4979 else
4980 gen_op_mov_reg_T0(ot, (modrm & 7) | REX_B(s));
4981 break;
4982 case 0x8a:
4983 case 0x8b: /* mov Ev, Gv */
4984 if ((b & 1) == 0)
4985 ot = OT_BYTE;
4986 else
4987 ot = OT_WORD + dflag;
4988 modrm = ldub_code(s->pc++);
4989 reg = ((modrm >> 3) & 7) | rex_r;
4991 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
4992 gen_op_mov_reg_T0(ot, reg);
4993 break;
4994 case 0x8e: /* mov seg, Gv */
4995 modrm = ldub_code(s->pc++);
4996 reg = (modrm >> 3) & 7;
4997 if (reg >= 6 || reg == R_CS)
4998 goto illegal_op;
4999 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
5000 gen_movl_seg_T0(s, reg, pc_start - s->cs_base);
5001 if (reg == R_SS) {
5002 /* if reg == SS, inhibit interrupts/trace */
5003 /* If several instructions disable interrupts, only the
5004 _first_ does it */
5005 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
5006 gen_helper_set_inhibit_irq();
5007 s->tf = 0;
5009 if (s->is_jmp) {
5010 gen_jmp_im(s->pc - s->cs_base);
5011 gen_eob(s);
5013 break;
5014 case 0x8c: /* mov Gv, seg */
5015 modrm = ldub_code(s->pc++);
5016 reg = (modrm >> 3) & 7;
5017 mod = (modrm >> 6) & 3;
5018 if (reg >= 6)
5019 goto illegal_op;
5020 gen_op_movl_T0_seg(reg);
5021 if (mod == 3)
5022 ot = OT_WORD + dflag;
5023 else
5024 ot = OT_WORD;
5025 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
5026 break;
5028 case 0x1b6: /* movzbS Gv, Eb */
5029 case 0x1b7: /* movzwS Gv, Eb */
5030 case 0x1be: /* movsbS Gv, Eb */
5031 case 0x1bf: /* movswS Gv, Eb */
5033 int d_ot;
5034 /* d_ot is the size of destination */
5035 d_ot = dflag + OT_WORD;
5036 /* ot is the size of source */
5037 ot = (b & 1) + OT_BYTE;
5038 modrm = ldub_code(s->pc++);
5039 reg = ((modrm >> 3) & 7) | rex_r;
5040 mod = (modrm >> 6) & 3;
5041 rm = (modrm & 7) | REX_B(s);
5043 if (mod == 3) {
5044 gen_op_mov_TN_reg(ot, 0, rm);
5045 switch(ot | (b & 8)) {
5046 case OT_BYTE:
5047 tcg_gen_ext8u_tl(cpu_T[0], cpu_T[0]);
5048 break;
5049 case OT_BYTE | 8:
5050 tcg_gen_ext8s_tl(cpu_T[0], cpu_T[0]);
5051 break;
5052 case OT_WORD:
5053 tcg_gen_ext16u_tl(cpu_T[0], cpu_T[0]);
5054 break;
5055 default:
5056 case OT_WORD | 8:
5057 tcg_gen_ext16s_tl(cpu_T[0], cpu_T[0]);
5058 break;
5060 gen_op_mov_reg_T0(d_ot, reg);
5061 } else {
5062 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5063 if (b & 8) {
5064 gen_op_lds_T0_A0(ot + s->mem_index);
5065 } else {
5066 gen_op_ldu_T0_A0(ot + s->mem_index);
5068 gen_op_mov_reg_T0(d_ot, reg);
5071 break;
5073 case 0x8d: /* lea */
5074 ot = dflag + OT_WORD;
5075 modrm = ldub_code(s->pc++);
5076 mod = (modrm >> 6) & 3;
5077 if (mod == 3)
5078 goto illegal_op;
5079 reg = ((modrm >> 3) & 7) | rex_r;
5080 /* we must ensure that no segment is added */
5081 s->override = -1;
5082 val = s->addseg;
5083 s->addseg = 0;
5084 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5085 s->addseg = val;
5086 gen_op_mov_reg_A0(ot - OT_WORD, reg);
5087 break;
5089 case 0xa0: /* mov EAX, Ov */
5090 case 0xa1:
5091 case 0xa2: /* mov Ov, EAX */
5092 case 0xa3:
5094 target_ulong offset_addr;
5096 if ((b & 1) == 0)
5097 ot = OT_BYTE;
5098 else
5099 ot = dflag + OT_WORD;
5100 #ifdef TARGET_X86_64
5101 if (s->aflag == 2) {
5102 offset_addr = ldq_code(s->pc);
5103 s->pc += 8;
5104 gen_op_movq_A0_im(offset_addr);
5105 } else
5106 #endif
5108 if (s->aflag) {
5109 offset_addr = insn_get(s, OT_LONG);
5110 } else {
5111 offset_addr = insn_get(s, OT_WORD);
5113 gen_op_movl_A0_im(offset_addr);
5115 gen_add_A0_ds_seg(s);
5116 if ((b & 2) == 0) {
5117 gen_op_ld_T0_A0(ot + s->mem_index);
5118 gen_op_mov_reg_T0(ot, R_EAX);
5119 } else {
5120 gen_op_mov_TN_reg(ot, 0, R_EAX);
5121 gen_op_st_T0_A0(ot + s->mem_index);
5124 break;
5125 case 0xd7: /* xlat */
5126 #ifdef TARGET_X86_64
5127 if (s->aflag == 2) {
5128 gen_op_movq_A0_reg(R_EBX);
5129 gen_op_mov_TN_reg(OT_QUAD, 0, R_EAX);
5130 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5131 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5132 } else
5133 #endif
5135 gen_op_movl_A0_reg(R_EBX);
5136 gen_op_mov_TN_reg(OT_LONG, 0, R_EAX);
5137 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], 0xff);
5138 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_T[0]);
5139 if (s->aflag == 0)
5140 gen_op_andl_A0_ffff();
5141 else
5142 tcg_gen_andi_tl(cpu_A0, cpu_A0, 0xffffffff);
5144 gen_add_A0_ds_seg(s);
5145 gen_op_ldu_T0_A0(OT_BYTE + s->mem_index);
5146 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
5147 break;
5148 case 0xb0 ... 0xb7: /* mov R, Ib */
5149 val = insn_get(s, OT_BYTE);
5150 gen_op_movl_T0_im(val);
5151 gen_op_mov_reg_T0(OT_BYTE, (b & 7) | REX_B(s));
5152 break;
5153 case 0xb8 ... 0xbf: /* mov R, Iv */
5154 #ifdef TARGET_X86_64
5155 if (dflag == 2) {
5156 uint64_t tmp;
5157 /* 64 bit case */
5158 tmp = ldq_code(s->pc);
5159 s->pc += 8;
5160 reg = (b & 7) | REX_B(s);
5161 gen_movtl_T0_im(tmp);
5162 gen_op_mov_reg_T0(OT_QUAD, reg);
5163 } else
5164 #endif
5166 ot = dflag ? OT_LONG : OT_WORD;
5167 val = insn_get(s, ot);
5168 reg = (b & 7) | REX_B(s);
5169 gen_op_movl_T0_im(val);
5170 gen_op_mov_reg_T0(ot, reg);
5172 break;
5174 case 0x91 ... 0x97: /* xchg R, EAX */
5175 ot = dflag + OT_WORD;
5176 reg = (b & 7) | REX_B(s);
5177 rm = R_EAX;
5178 goto do_xchg_reg;
5179 case 0x86:
5180 case 0x87: /* xchg Ev, Gv */
5181 if ((b & 1) == 0)
5182 ot = OT_BYTE;
5183 else
5184 ot = dflag + OT_WORD;
5185 modrm = ldub_code(s->pc++);
5186 reg = ((modrm >> 3) & 7) | rex_r;
5187 mod = (modrm >> 6) & 3;
5188 if (mod == 3) {
5189 rm = (modrm & 7) | REX_B(s);
5190 do_xchg_reg:
5191 gen_op_mov_TN_reg(ot, 0, reg);
5192 gen_op_mov_TN_reg(ot, 1, rm);
5193 gen_op_mov_reg_T0(ot, rm);
5194 gen_op_mov_reg_T1(ot, reg);
5195 } else {
5196 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5197 gen_op_mov_TN_reg(ot, 0, reg);
5198 /* for xchg, lock is implicit */
5199 if (!(prefixes & PREFIX_LOCK))
5200 gen_helper_lock();
5201 gen_op_ld_T1_A0(ot + s->mem_index);
5202 gen_op_st_T0_A0(ot + s->mem_index);
5203 if (!(prefixes & PREFIX_LOCK))
5204 gen_helper_unlock();
5205 gen_op_mov_reg_T1(ot, reg);
5207 break;
5208 case 0xc4: /* les Gv */
5209 if (CODE64(s))
5210 goto illegal_op;
5211 op = R_ES;
5212 goto do_lxx;
5213 case 0xc5: /* lds Gv */
5214 if (CODE64(s))
5215 goto illegal_op;
5216 op = R_DS;
5217 goto do_lxx;
5218 case 0x1b2: /* lss Gv */
5219 op = R_SS;
5220 goto do_lxx;
5221 case 0x1b4: /* lfs Gv */
5222 op = R_FS;
5223 goto do_lxx;
5224 case 0x1b5: /* lgs Gv */
5225 op = R_GS;
5226 do_lxx:
5227 ot = dflag ? OT_LONG : OT_WORD;
5228 modrm = ldub_code(s->pc++);
5229 reg = ((modrm >> 3) & 7) | rex_r;
5230 mod = (modrm >> 6) & 3;
5231 if (mod == 3)
5232 goto illegal_op;
5233 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5234 gen_op_ld_T1_A0(ot + s->mem_index);
5235 gen_add_A0_im(s, 1 << (ot - OT_WORD + 1));
5236 /* load the segment first to handle exceptions properly */
5237 gen_op_ldu_T0_A0(OT_WORD + s->mem_index);
5238 gen_movl_seg_T0(s, op, pc_start - s->cs_base);
5239 /* then put the data */
5240 gen_op_mov_reg_T1(ot, reg);
5241 if (s->is_jmp) {
5242 gen_jmp_im(s->pc - s->cs_base);
5243 gen_eob(s);
5245 break;
5247 /************************/
5248 /* shifts */
5249 case 0xc0:
5250 case 0xc1:
5251 /* shift Ev,Ib */
5252 shift = 2;
5253 grp2:
5255 if ((b & 1) == 0)
5256 ot = OT_BYTE;
5257 else
5258 ot = dflag + OT_WORD;
5260 modrm = ldub_code(s->pc++);
5261 mod = (modrm >> 6) & 3;
5262 op = (modrm >> 3) & 7;
5264 if (mod != 3) {
5265 if (shift == 2) {
5266 s->rip_offset = 1;
5268 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5269 opreg = OR_TMP0;
5270 } else {
5271 opreg = (modrm & 7) | REX_B(s);
5274 /* simpler op */
5275 if (shift == 0) {
5276 gen_shift(s, op, ot, opreg, OR_ECX);
5277 } else {
5278 if (shift == 2) {
5279 shift = ldub_code(s->pc++);
5281 gen_shifti(s, op, ot, opreg, shift);
5284 break;
5285 case 0xd0:
5286 case 0xd1:
5287 /* shift Ev,1 */
5288 shift = 1;
5289 goto grp2;
5290 case 0xd2:
5291 case 0xd3:
5292 /* shift Ev,cl */
5293 shift = 0;
5294 goto grp2;
5296 case 0x1a4: /* shld imm */
5297 op = 0;
5298 shift = 1;
5299 goto do_shiftd;
5300 case 0x1a5: /* shld cl */
5301 op = 0;
5302 shift = 0;
5303 goto do_shiftd;
5304 case 0x1ac: /* shrd imm */
5305 op = 1;
5306 shift = 1;
5307 goto do_shiftd;
5308 case 0x1ad: /* shrd cl */
5309 op = 1;
5310 shift = 0;
5311 do_shiftd:
5312 ot = dflag + OT_WORD;
5313 modrm = ldub_code(s->pc++);
5314 mod = (modrm >> 6) & 3;
5315 rm = (modrm & 7) | REX_B(s);
5316 reg = ((modrm >> 3) & 7) | rex_r;
5317 if (mod != 3) {
5318 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5319 opreg = OR_TMP0;
5320 } else {
5321 opreg = rm;
5323 gen_op_mov_TN_reg(ot, 1, reg);
5325 if (shift) {
5326 val = ldub_code(s->pc++);
5327 tcg_gen_movi_tl(cpu_T3, val);
5328 } else {
5329 tcg_gen_ld_tl(cpu_T3, cpu_env, offsetof(CPUState, regs[R_ECX]));
5331 gen_shiftd_rm_T1_T3(s, ot, opreg, op);
5332 break;
5334 /************************/
5335 /* floats */
5336 case 0xd8 ... 0xdf:
5337 if (s->flags & (HF_EM_MASK | HF_TS_MASK)) {
5338 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
5339 /* XXX: what to do if illegal op ? */
5340 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
5341 break;
5343 modrm = ldub_code(s->pc++);
5344 mod = (modrm >> 6) & 3;
5345 rm = modrm & 7;
5346 op = ((b & 7) << 3) | ((modrm >> 3) & 7);
5347 if (mod != 3) {
5348 /* memory op */
5349 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
5350 switch(op) {
5351 case 0x00 ... 0x07: /* fxxxs */
5352 case 0x10 ... 0x17: /* fixxxl */
5353 case 0x20 ... 0x27: /* fxxxl */
5354 case 0x30 ... 0x37: /* fixxx */
5356 int op1;
5357 op1 = op & 7;
5359 switch(op >> 4) {
5360 case 0:
5361 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5362 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5363 gen_helper_flds_FT0(cpu_tmp2_i32);
5364 break;
5365 case 1:
5366 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5367 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5368 gen_helper_fildl_FT0(cpu_tmp2_i32);
5369 break;
5370 case 2:
5371 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5372 (s->mem_index >> 2) - 1);
5373 gen_helper_fldl_FT0(cpu_tmp1_i64);
5374 break;
5375 case 3:
5376 default:
5377 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5378 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5379 gen_helper_fildl_FT0(cpu_tmp2_i32);
5380 break;
5383 gen_helper_fp_arith_ST0_FT0(op1);
5384 if (op1 == 3) {
5385 /* fcomp needs pop */
5386 gen_helper_fpop();
5389 break;
5390 case 0x08: /* flds */
5391 case 0x0a: /* fsts */
5392 case 0x0b: /* fstps */
5393 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
5394 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
5395 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
5396 switch(op & 7) {
5397 case 0:
5398 switch(op >> 4) {
5399 case 0:
5400 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5401 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5402 gen_helper_flds_ST0(cpu_tmp2_i32);
5403 break;
5404 case 1:
5405 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
5406 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5407 gen_helper_fildl_ST0(cpu_tmp2_i32);
5408 break;
5409 case 2:
5410 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5411 (s->mem_index >> 2) - 1);
5412 gen_helper_fldl_ST0(cpu_tmp1_i64);
5413 break;
5414 case 3:
5415 default:
5416 gen_op_lds_T0_A0(OT_WORD + s->mem_index);
5417 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5418 gen_helper_fildl_ST0(cpu_tmp2_i32);
5419 break;
5421 break;
5422 case 1:
5423 /* XXX: the corresponding CPUID bit must be tested ! */
5424 switch(op >> 4) {
5425 case 1:
5426 gen_helper_fisttl_ST0(cpu_tmp2_i32);
5427 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5428 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5429 break;
5430 case 2:
5431 gen_helper_fisttll_ST0(cpu_tmp1_i64);
5432 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5433 (s->mem_index >> 2) - 1);
5434 break;
5435 case 3:
5436 default:
5437 gen_helper_fistt_ST0(cpu_tmp2_i32);
5438 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5439 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5440 break;
5442 gen_helper_fpop();
5443 break;
5444 default:
5445 switch(op >> 4) {
5446 case 0:
5447 gen_helper_fsts_ST0(cpu_tmp2_i32);
5448 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5449 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5450 break;
5451 case 1:
5452 gen_helper_fistl_ST0(cpu_tmp2_i32);
5453 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5454 gen_op_st_T0_A0(OT_LONG + s->mem_index);
5455 break;
5456 case 2:
5457 gen_helper_fstl_ST0(cpu_tmp1_i64);
5458 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5459 (s->mem_index >> 2) - 1);
5460 break;
5461 case 3:
5462 default:
5463 gen_helper_fist_ST0(cpu_tmp2_i32);
5464 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5465 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5466 break;
5468 if ((op & 7) == 3)
5469 gen_helper_fpop();
5470 break;
5472 break;
5473 case 0x0c: /* fldenv mem */
5474 if (s->cc_op != CC_OP_DYNAMIC)
5475 gen_op_set_cc_op(s->cc_op);
5476 gen_jmp_im(pc_start - s->cs_base);
5477 gen_helper_fldenv(
5478 cpu_A0, tcg_const_i32(s->dflag));
5479 break;
5480 case 0x0d: /* fldcw mem */
5481 gen_op_ld_T0_A0(OT_WORD + s->mem_index);
5482 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5483 gen_helper_fldcw(cpu_tmp2_i32);
5484 break;
5485 case 0x0e: /* fnstenv mem */
5486 if (s->cc_op != CC_OP_DYNAMIC)
5487 gen_op_set_cc_op(s->cc_op);
5488 gen_jmp_im(pc_start - s->cs_base);
5489 gen_helper_fstenv(cpu_A0, tcg_const_i32(s->dflag));
5490 break;
5491 case 0x0f: /* fnstcw mem */
5492 gen_helper_fnstcw(cpu_tmp2_i32);
5493 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5494 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5495 break;
5496 case 0x1d: /* fldt mem */
5497 if (s->cc_op != CC_OP_DYNAMIC)
5498 gen_op_set_cc_op(s->cc_op);
5499 gen_jmp_im(pc_start - s->cs_base);
5500 gen_helper_fldt_ST0(cpu_A0);
5501 break;
5502 case 0x1f: /* fstpt mem */
5503 if (s->cc_op != CC_OP_DYNAMIC)
5504 gen_op_set_cc_op(s->cc_op);
5505 gen_jmp_im(pc_start - s->cs_base);
5506 gen_helper_fstt_ST0(cpu_A0);
5507 gen_helper_fpop();
5508 break;
5509 case 0x2c: /* frstor mem */
5510 if (s->cc_op != CC_OP_DYNAMIC)
5511 gen_op_set_cc_op(s->cc_op);
5512 gen_jmp_im(pc_start - s->cs_base);
5513 gen_helper_frstor(cpu_A0, tcg_const_i32(s->dflag));
5514 break;
5515 case 0x2e: /* fnsave mem */
5516 if (s->cc_op != CC_OP_DYNAMIC)
5517 gen_op_set_cc_op(s->cc_op);
5518 gen_jmp_im(pc_start - s->cs_base);
5519 gen_helper_fsave(cpu_A0, tcg_const_i32(s->dflag));
5520 break;
5521 case 0x2f: /* fnstsw mem */
5522 gen_helper_fnstsw(cpu_tmp2_i32);
5523 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5524 gen_op_st_T0_A0(OT_WORD + s->mem_index);
5525 break;
5526 case 0x3c: /* fbld */
5527 if (s->cc_op != CC_OP_DYNAMIC)
5528 gen_op_set_cc_op(s->cc_op);
5529 gen_jmp_im(pc_start - s->cs_base);
5530 gen_helper_fbld_ST0(cpu_A0);
5531 break;
5532 case 0x3e: /* fbstp */
5533 if (s->cc_op != CC_OP_DYNAMIC)
5534 gen_op_set_cc_op(s->cc_op);
5535 gen_jmp_im(pc_start - s->cs_base);
5536 gen_helper_fbst_ST0(cpu_A0);
5537 gen_helper_fpop();
5538 break;
5539 case 0x3d: /* fildll */
5540 tcg_gen_qemu_ld64(cpu_tmp1_i64, cpu_A0,
5541 (s->mem_index >> 2) - 1);
5542 gen_helper_fildll_ST0(cpu_tmp1_i64);
5543 break;
5544 case 0x3f: /* fistpll */
5545 gen_helper_fistll_ST0(cpu_tmp1_i64);
5546 tcg_gen_qemu_st64(cpu_tmp1_i64, cpu_A0,
5547 (s->mem_index >> 2) - 1);
5548 gen_helper_fpop();
5549 break;
5550 default:
5551 goto illegal_op;
5553 } else {
5554 /* register float ops */
5555 opreg = rm;
5557 switch(op) {
5558 case 0x08: /* fld sti */
5559 gen_helper_fpush();
5560 gen_helper_fmov_ST0_STN(tcg_const_i32((opreg + 1) & 7));
5561 break;
5562 case 0x09: /* fxchg sti */
5563 case 0x29: /* fxchg4 sti, undocumented op */
5564 case 0x39: /* fxchg7 sti, undocumented op */
5565 gen_helper_fxchg_ST0_STN(tcg_const_i32(opreg));
5566 break;
5567 case 0x0a: /* grp d9/2 */
5568 switch(rm) {
5569 case 0: /* fnop */
5570 /* check exceptions (FreeBSD FPU probe) */
5571 if (s->cc_op != CC_OP_DYNAMIC)
5572 gen_op_set_cc_op(s->cc_op);
5573 gen_jmp_im(pc_start - s->cs_base);
5574 gen_helper_fwait();
5575 break;
5576 default:
5577 goto illegal_op;
5579 break;
5580 case 0x0c: /* grp d9/4 */
5581 switch(rm) {
5582 case 0: /* fchs */
5583 gen_helper_fchs_ST0();
5584 break;
5585 case 1: /* fabs */
5586 gen_helper_fabs_ST0();
5587 break;
5588 case 4: /* ftst */
5589 gen_helper_fldz_FT0();
5590 gen_helper_fcom_ST0_FT0();
5591 break;
5592 case 5: /* fxam */
5593 gen_helper_fxam_ST0();
5594 break;
5595 default:
5596 goto illegal_op;
5598 break;
5599 case 0x0d: /* grp d9/5 */
5601 switch(rm) {
5602 case 0:
5603 gen_helper_fpush();
5604 gen_helper_fld1_ST0();
5605 break;
5606 case 1:
5607 gen_helper_fpush();
5608 gen_helper_fldl2t_ST0();
5609 break;
5610 case 2:
5611 gen_helper_fpush();
5612 gen_helper_fldl2e_ST0();
5613 break;
5614 case 3:
5615 gen_helper_fpush();
5616 gen_helper_fldpi_ST0();
5617 break;
5618 case 4:
5619 gen_helper_fpush();
5620 gen_helper_fldlg2_ST0();
5621 break;
5622 case 5:
5623 gen_helper_fpush();
5624 gen_helper_fldln2_ST0();
5625 break;
5626 case 6:
5627 gen_helper_fpush();
5628 gen_helper_fldz_ST0();
5629 break;
5630 default:
5631 goto illegal_op;
5634 break;
5635 case 0x0e: /* grp d9/6 */
5636 switch(rm) {
5637 case 0: /* f2xm1 */
5638 gen_helper_f2xm1();
5639 break;
5640 case 1: /* fyl2x */
5641 gen_helper_fyl2x();
5642 break;
5643 case 2: /* fptan */
5644 gen_helper_fptan();
5645 break;
5646 case 3: /* fpatan */
5647 gen_helper_fpatan();
5648 break;
5649 case 4: /* fxtract */
5650 gen_helper_fxtract();
5651 break;
5652 case 5: /* fprem1 */
5653 gen_helper_fprem1();
5654 break;
5655 case 6: /* fdecstp */
5656 gen_helper_fdecstp();
5657 break;
5658 default:
5659 case 7: /* fincstp */
5660 gen_helper_fincstp();
5661 break;
5663 break;
5664 case 0x0f: /* grp d9/7 */
5665 switch(rm) {
5666 case 0: /* fprem */
5667 gen_helper_fprem();
5668 break;
5669 case 1: /* fyl2xp1 */
5670 gen_helper_fyl2xp1();
5671 break;
5672 case 2: /* fsqrt */
5673 gen_helper_fsqrt();
5674 break;
5675 case 3: /* fsincos */
5676 gen_helper_fsincos();
5677 break;
5678 case 5: /* fscale */
5679 gen_helper_fscale();
5680 break;
5681 case 4: /* frndint */
5682 gen_helper_frndint();
5683 break;
5684 case 6: /* fsin */
5685 gen_helper_fsin();
5686 break;
5687 default:
5688 case 7: /* fcos */
5689 gen_helper_fcos();
5690 break;
5692 break;
5693 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
5694 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
5695 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
5697 int op1;
5699 op1 = op & 7;
5700 if (op >= 0x20) {
5701 gen_helper_fp_arith_STN_ST0(op1, opreg);
5702 if (op >= 0x30)
5703 gen_helper_fpop();
5704 } else {
5705 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5706 gen_helper_fp_arith_ST0_FT0(op1);
5709 break;
5710 case 0x02: /* fcom */
5711 case 0x22: /* fcom2, undocumented op */
5712 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5713 gen_helper_fcom_ST0_FT0();
5714 break;
5715 case 0x03: /* fcomp */
5716 case 0x23: /* fcomp3, undocumented op */
5717 case 0x32: /* fcomp5, undocumented op */
5718 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5719 gen_helper_fcom_ST0_FT0();
5720 gen_helper_fpop();
5721 break;
5722 case 0x15: /* da/5 */
5723 switch(rm) {
5724 case 1: /* fucompp */
5725 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5726 gen_helper_fucom_ST0_FT0();
5727 gen_helper_fpop();
5728 gen_helper_fpop();
5729 break;
5730 default:
5731 goto illegal_op;
5733 break;
5734 case 0x1c:
5735 switch(rm) {
5736 case 0: /* feni (287 only, just do nop here) */
5737 break;
5738 case 1: /* fdisi (287 only, just do nop here) */
5739 break;
5740 case 2: /* fclex */
5741 gen_helper_fclex();
5742 break;
5743 case 3: /* fninit */
5744 gen_helper_fninit();
5745 break;
5746 case 4: /* fsetpm (287 only, just do nop here) */
5747 break;
5748 default:
5749 goto illegal_op;
5751 break;
5752 case 0x1d: /* fucomi */
5753 if (s->cc_op != CC_OP_DYNAMIC)
5754 gen_op_set_cc_op(s->cc_op);
5755 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5756 gen_helper_fucomi_ST0_FT0();
5757 s->cc_op = CC_OP_EFLAGS;
5758 break;
5759 case 0x1e: /* fcomi */
5760 if (s->cc_op != CC_OP_DYNAMIC)
5761 gen_op_set_cc_op(s->cc_op);
5762 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5763 gen_helper_fcomi_ST0_FT0();
5764 s->cc_op = CC_OP_EFLAGS;
5765 break;
5766 case 0x28: /* ffree sti */
5767 gen_helper_ffree_STN(tcg_const_i32(opreg));
5768 break;
5769 case 0x2a: /* fst sti */
5770 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5771 break;
5772 case 0x2b: /* fstp sti */
5773 case 0x0b: /* fstp1 sti, undocumented op */
5774 case 0x3a: /* fstp8 sti, undocumented op */
5775 case 0x3b: /* fstp9 sti, undocumented op */
5776 gen_helper_fmov_STN_ST0(tcg_const_i32(opreg));
5777 gen_helper_fpop();
5778 break;
5779 case 0x2c: /* fucom st(i) */
5780 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5781 gen_helper_fucom_ST0_FT0();
5782 break;
5783 case 0x2d: /* fucomp st(i) */
5784 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5785 gen_helper_fucom_ST0_FT0();
5786 gen_helper_fpop();
5787 break;
5788 case 0x33: /* de/3 */
5789 switch(rm) {
5790 case 1: /* fcompp */
5791 gen_helper_fmov_FT0_STN(tcg_const_i32(1));
5792 gen_helper_fcom_ST0_FT0();
5793 gen_helper_fpop();
5794 gen_helper_fpop();
5795 break;
5796 default:
5797 goto illegal_op;
5799 break;
5800 case 0x38: /* ffreep sti, undocumented op */
5801 gen_helper_ffree_STN(tcg_const_i32(opreg));
5802 gen_helper_fpop();
5803 break;
5804 case 0x3c: /* df/4 */
5805 switch(rm) {
5806 case 0:
5807 gen_helper_fnstsw(cpu_tmp2_i32);
5808 tcg_gen_extu_i32_tl(cpu_T[0], cpu_tmp2_i32);
5809 gen_op_mov_reg_T0(OT_WORD, R_EAX);
5810 break;
5811 default:
5812 goto illegal_op;
5814 break;
5815 case 0x3d: /* fucomip */
5816 if (s->cc_op != CC_OP_DYNAMIC)
5817 gen_op_set_cc_op(s->cc_op);
5818 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5819 gen_helper_fucomi_ST0_FT0();
5820 gen_helper_fpop();
5821 s->cc_op = CC_OP_EFLAGS;
5822 break;
5823 case 0x3e: /* fcomip */
5824 if (s->cc_op != CC_OP_DYNAMIC)
5825 gen_op_set_cc_op(s->cc_op);
5826 gen_helper_fmov_FT0_STN(tcg_const_i32(opreg));
5827 gen_helper_fcomi_ST0_FT0();
5828 gen_helper_fpop();
5829 s->cc_op = CC_OP_EFLAGS;
5830 break;
5831 case 0x10 ... 0x13: /* fcmovxx */
5832 case 0x18 ... 0x1b:
5834 int op1, l1;
5835 static const uint8_t fcmov_cc[8] = {
5836 (JCC_B << 1),
5837 (JCC_Z << 1),
5838 (JCC_BE << 1),
5839 (JCC_P << 1),
5841 op1 = fcmov_cc[op & 3] | (((op >> 3) & 1) ^ 1);
5842 l1 = gen_new_label();
5843 gen_jcc1(s, s->cc_op, op1, l1);
5844 gen_helper_fmov_ST0_STN(tcg_const_i32(opreg));
5845 gen_set_label(l1);
5847 break;
5848 default:
5849 goto illegal_op;
5852 break;
5853 /************************/
5854 /* string ops */
5856 case 0xa4: /* movsS */
5857 case 0xa5:
5858 if ((b & 1) == 0)
5859 ot = OT_BYTE;
5860 else
5861 ot = dflag + OT_WORD;
5863 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5864 gen_repz_movs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5865 } else {
5866 gen_movs(s, ot);
5868 break;
5870 case 0xaa: /* stosS */
5871 case 0xab:
5872 if ((b & 1) == 0)
5873 ot = OT_BYTE;
5874 else
5875 ot = dflag + OT_WORD;
5877 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5878 gen_repz_stos(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5879 } else {
5880 gen_stos(s, ot);
5882 break;
5883 case 0xac: /* lodsS */
5884 case 0xad:
5885 if ((b & 1) == 0)
5886 ot = OT_BYTE;
5887 else
5888 ot = dflag + OT_WORD;
5889 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5890 gen_repz_lods(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5891 } else {
5892 gen_lods(s, ot);
5894 break;
5895 case 0xae: /* scasS */
5896 case 0xaf:
5897 if ((b & 1) == 0)
5898 ot = OT_BYTE;
5899 else
5900 ot = dflag + OT_WORD;
5901 if (prefixes & PREFIX_REPNZ) {
5902 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5903 } else if (prefixes & PREFIX_REPZ) {
5904 gen_repz_scas(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5905 } else {
5906 gen_scas(s, ot);
5907 s->cc_op = CC_OP_SUBB + ot;
5909 break;
5911 case 0xa6: /* cmpsS */
5912 case 0xa7:
5913 if ((b & 1) == 0)
5914 ot = OT_BYTE;
5915 else
5916 ot = dflag + OT_WORD;
5917 if (prefixes & PREFIX_REPNZ) {
5918 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 1);
5919 } else if (prefixes & PREFIX_REPZ) {
5920 gen_repz_cmps(s, ot, pc_start - s->cs_base, s->pc - s->cs_base, 0);
5921 } else {
5922 gen_cmps(s, ot);
5923 s->cc_op = CC_OP_SUBB + ot;
5925 break;
5926 case 0x6c: /* insS */
5927 case 0x6d:
5928 if ((b & 1) == 0)
5929 ot = OT_BYTE;
5930 else
5931 ot = dflag ? OT_LONG : OT_WORD;
5932 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5933 gen_op_andl_T0_ffff();
5934 gen_check_io(s, ot, pc_start - s->cs_base,
5935 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes) | 4);
5936 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5937 gen_repz_ins(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5938 } else {
5939 gen_ins(s, ot);
5940 if (use_icount) {
5941 gen_jmp(s, s->pc - s->cs_base);
5944 break;
5945 case 0x6e: /* outsS */
5946 case 0x6f:
5947 if ((b & 1) == 0)
5948 ot = OT_BYTE;
5949 else
5950 ot = dflag ? OT_LONG : OT_WORD;
5951 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
5952 gen_op_andl_T0_ffff();
5953 gen_check_io(s, ot, pc_start - s->cs_base,
5954 svm_is_rep(prefixes) | 4);
5955 if (prefixes & (PREFIX_REPZ | PREFIX_REPNZ)) {
5956 gen_repz_outs(s, ot, pc_start - s->cs_base, s->pc - s->cs_base);
5957 } else {
5958 gen_outs(s, ot);
5959 if (use_icount) {
5960 gen_jmp(s, s->pc - s->cs_base);
5963 break;
5965 /************************/
5966 /* port I/O */
5968 case 0xe4:
5969 case 0xe5:
5970 if ((b & 1) == 0)
5971 ot = OT_BYTE;
5972 else
5973 ot = dflag ? OT_LONG : OT_WORD;
5974 val = ldub_code(s->pc++);
5975 gen_op_movl_T0_im(val);
5976 gen_check_io(s, ot, pc_start - s->cs_base,
5977 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
5978 if (use_icount)
5979 gen_io_start();
5980 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
5981 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
5982 gen_op_mov_reg_T1(ot, R_EAX);
5983 if (use_icount) {
5984 gen_io_end();
5985 gen_jmp(s, s->pc - s->cs_base);
5987 break;
5988 case 0xe6:
5989 case 0xe7:
5990 if ((b & 1) == 0)
5991 ot = OT_BYTE;
5992 else
5993 ot = dflag ? OT_LONG : OT_WORD;
5994 val = ldub_code(s->pc++);
5995 gen_op_movl_T0_im(val);
5996 gen_check_io(s, ot, pc_start - s->cs_base,
5997 svm_is_rep(prefixes));
5998 gen_op_mov_TN_reg(ot, 1, R_EAX);
6000 if (use_icount)
6001 gen_io_start();
6002 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6003 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6004 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6005 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6006 if (use_icount) {
6007 gen_io_end();
6008 gen_jmp(s, s->pc - s->cs_base);
6010 break;
6011 case 0xec:
6012 case 0xed:
6013 if ((b & 1) == 0)
6014 ot = OT_BYTE;
6015 else
6016 ot = dflag ? OT_LONG : OT_WORD;
6017 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6018 gen_op_andl_T0_ffff();
6019 gen_check_io(s, ot, pc_start - s->cs_base,
6020 SVM_IOIO_TYPE_MASK | svm_is_rep(prefixes));
6021 if (use_icount)
6022 gen_io_start();
6023 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6024 gen_helper_in_func(ot, cpu_T[1], cpu_tmp2_i32);
6025 gen_op_mov_reg_T1(ot, R_EAX);
6026 if (use_icount) {
6027 gen_io_end();
6028 gen_jmp(s, s->pc - s->cs_base);
6030 break;
6031 case 0xee:
6032 case 0xef:
6033 if ((b & 1) == 0)
6034 ot = OT_BYTE;
6035 else
6036 ot = dflag ? OT_LONG : OT_WORD;
6037 gen_op_mov_TN_reg(OT_WORD, 0, R_EDX);
6038 gen_op_andl_T0_ffff();
6039 gen_check_io(s, ot, pc_start - s->cs_base,
6040 svm_is_rep(prefixes));
6041 gen_op_mov_TN_reg(ot, 1, R_EAX);
6043 if (use_icount)
6044 gen_io_start();
6045 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6046 tcg_gen_andi_i32(cpu_tmp2_i32, cpu_tmp2_i32, 0xffff);
6047 tcg_gen_trunc_tl_i32(cpu_tmp3_i32, cpu_T[1]);
6048 gen_helper_out_func(ot, cpu_tmp2_i32, cpu_tmp3_i32);
6049 if (use_icount) {
6050 gen_io_end();
6051 gen_jmp(s, s->pc - s->cs_base);
6053 break;
6055 /************************/
6056 /* control */
6057 case 0xc2: /* ret im */
6058 val = ldsw_code(s->pc);
6059 s->pc += 2;
6060 gen_pop_T0(s);
6061 if (CODE64(s) && s->dflag)
6062 s->dflag = 2;
6063 gen_stack_update(s, val + (2 << s->dflag));
6064 if (s->dflag == 0)
6065 gen_op_andl_T0_ffff();
6066 gen_op_jmp_T0();
6067 gen_eob(s);
6068 break;
6069 case 0xc3: /* ret */
6070 gen_pop_T0(s);
6071 gen_pop_update(s);
6072 if (s->dflag == 0)
6073 gen_op_andl_T0_ffff();
6074 gen_op_jmp_T0();
6075 gen_eob(s);
6076 break;
6077 case 0xca: /* lret im */
6078 val = ldsw_code(s->pc);
6079 s->pc += 2;
6080 do_lret:
6081 if (s->pe && !s->vm86) {
6082 if (s->cc_op != CC_OP_DYNAMIC)
6083 gen_op_set_cc_op(s->cc_op);
6084 gen_jmp_im(pc_start - s->cs_base);
6085 gen_helper_lret_protected(tcg_const_i32(s->dflag),
6086 tcg_const_i32(val));
6087 } else {
6088 gen_stack_A0(s);
6089 /* pop offset */
6090 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6091 if (s->dflag == 0)
6092 gen_op_andl_T0_ffff();
6093 /* NOTE: keeping EIP updated is not a problem in case of
6094 exception */
6095 gen_op_jmp_T0();
6096 /* pop selector */
6097 gen_op_addl_A0_im(2 << s->dflag);
6098 gen_op_ld_T0_A0(1 + s->dflag + s->mem_index);
6099 gen_op_movl_seg_T0_vm(R_CS);
6100 /* add stack offset */
6101 gen_stack_update(s, val + (4 << s->dflag));
6103 gen_eob(s);
6104 break;
6105 case 0xcb: /* lret */
6106 val = 0;
6107 goto do_lret;
6108 case 0xcf: /* iret */
6109 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IRET);
6110 if (!s->pe) {
6111 /* real mode */
6112 gen_helper_iret_real(tcg_const_i32(s->dflag));
6113 s->cc_op = CC_OP_EFLAGS;
6114 } else if (s->vm86) {
6115 if (s->iopl != 3) {
6116 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6117 } else {
6118 gen_helper_iret_real(tcg_const_i32(s->dflag));
6119 s->cc_op = CC_OP_EFLAGS;
6121 } else {
6122 if (s->cc_op != CC_OP_DYNAMIC)
6123 gen_op_set_cc_op(s->cc_op);
6124 gen_jmp_im(pc_start - s->cs_base);
6125 gen_helper_iret_protected(tcg_const_i32(s->dflag),
6126 tcg_const_i32(s->pc - s->cs_base));
6127 s->cc_op = CC_OP_EFLAGS;
6129 gen_eob(s);
6130 break;
6131 case 0xe8: /* call im */
6133 if (dflag)
6134 tval = (int32_t)insn_get(s, OT_LONG);
6135 else
6136 tval = (int16_t)insn_get(s, OT_WORD);
6137 next_eip = s->pc - s->cs_base;
6138 tval += next_eip;
6139 if (s->dflag == 0)
6140 tval &= 0xffff;
6141 gen_movtl_T0_im(next_eip);
6142 gen_push_T0(s);
6143 gen_jmp(s, tval);
6145 break;
6146 case 0x9a: /* lcall im */
6148 unsigned int selector, offset;
6150 if (CODE64(s))
6151 goto illegal_op;
6152 ot = dflag ? OT_LONG : OT_WORD;
6153 offset = insn_get(s, ot);
6154 selector = insn_get(s, OT_WORD);
6156 gen_op_movl_T0_im(selector);
6157 gen_op_movl_T1_imu(offset);
6159 goto do_lcall;
6160 case 0xe9: /* jmp im */
6161 if (dflag)
6162 tval = (int32_t)insn_get(s, OT_LONG);
6163 else
6164 tval = (int16_t)insn_get(s, OT_WORD);
6165 tval += s->pc - s->cs_base;
6166 if (s->dflag == 0)
6167 tval &= 0xffff;
6168 else if(!CODE64(s))
6169 tval &= 0xffffffff;
6170 gen_jmp(s, tval);
6171 break;
6172 case 0xea: /* ljmp im */
6174 unsigned int selector, offset;
6176 if (CODE64(s))
6177 goto illegal_op;
6178 ot = dflag ? OT_LONG : OT_WORD;
6179 offset = insn_get(s, ot);
6180 selector = insn_get(s, OT_WORD);
6182 gen_op_movl_T0_im(selector);
6183 gen_op_movl_T1_imu(offset);
6185 goto do_ljmp;
6186 case 0xeb: /* jmp Jb */
6187 tval = (int8_t)insn_get(s, OT_BYTE);
6188 tval += s->pc - s->cs_base;
6189 if (s->dflag == 0)
6190 tval &= 0xffff;
6191 gen_jmp(s, tval);
6192 break;
6193 case 0x70 ... 0x7f: /* jcc Jb */
6194 tval = (int8_t)insn_get(s, OT_BYTE);
6195 goto do_jcc;
6196 case 0x180 ... 0x18f: /* jcc Jv */
6197 if (dflag) {
6198 tval = (int32_t)insn_get(s, OT_LONG);
6199 } else {
6200 tval = (int16_t)insn_get(s, OT_WORD);
6202 do_jcc:
6203 next_eip = s->pc - s->cs_base;
6204 tval += next_eip;
6205 if (s->dflag == 0)
6206 tval &= 0xffff;
6207 gen_jcc(s, b, tval, next_eip);
6208 break;
6210 case 0x190 ... 0x19f: /* setcc Gv */
6211 modrm = ldub_code(s->pc++);
6212 gen_setcc(s, b);
6213 gen_ldst_modrm(s, modrm, OT_BYTE, OR_TMP0, 1);
6214 break;
6215 case 0x140 ... 0x14f: /* cmov Gv, Ev */
6217 int l1;
6218 TCGv t0;
6220 ot = dflag + OT_WORD;
6221 modrm = ldub_code(s->pc++);
6222 reg = ((modrm >> 3) & 7) | rex_r;
6223 mod = (modrm >> 6) & 3;
6224 t0 = tcg_temp_local_new();
6225 if (mod != 3) {
6226 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6227 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
6228 } else {
6229 rm = (modrm & 7) | REX_B(s);
6230 gen_op_mov_v_reg(ot, t0, rm);
6232 #ifdef TARGET_X86_64
6233 if (ot == OT_LONG) {
6234 /* XXX: specific Intel behaviour ? */
6235 l1 = gen_new_label();
6236 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6237 tcg_gen_st32_tl(t0, cpu_env, offsetof(CPUState, regs[reg]) + REG_L_OFFSET);
6238 gen_set_label(l1);
6239 tcg_gen_movi_tl(cpu_tmp0, 0);
6240 tcg_gen_st32_tl(cpu_tmp0, cpu_env, offsetof(CPUState, regs[reg]) + REG_LH_OFFSET);
6241 } else
6242 #endif
6244 l1 = gen_new_label();
6245 gen_jcc1(s, s->cc_op, b ^ 1, l1);
6246 gen_op_mov_reg_v(ot, reg, t0);
6247 gen_set_label(l1);
6249 tcg_temp_free(t0);
6251 break;
6253 /************************/
6254 /* flags */
6255 case 0x9c: /* pushf */
6256 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PUSHF);
6257 if (s->vm86 && s->iopl != 3) {
6258 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6259 } else {
6260 if (s->cc_op != CC_OP_DYNAMIC)
6261 gen_op_set_cc_op(s->cc_op);
6262 gen_helper_read_eflags(cpu_T[0]);
6263 gen_push_T0(s);
6265 break;
6266 case 0x9d: /* popf */
6267 gen_svm_check_intercept(s, pc_start, SVM_EXIT_POPF);
6268 if (s->vm86 && s->iopl != 3) {
6269 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6270 } else {
6271 gen_pop_T0(s);
6272 if (s->cpl == 0) {
6273 if (s->dflag) {
6274 gen_helper_write_eflags(cpu_T[0],
6275 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK)));
6276 } else {
6277 gen_helper_write_eflags(cpu_T[0],
6278 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK | IOPL_MASK) & 0xffff));
6280 } else {
6281 if (s->cpl <= s->iopl) {
6282 if (s->dflag) {
6283 gen_helper_write_eflags(cpu_T[0],
6284 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK)));
6285 } else {
6286 gen_helper_write_eflags(cpu_T[0],
6287 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK | IF_MASK) & 0xffff));
6289 } else {
6290 if (s->dflag) {
6291 gen_helper_write_eflags(cpu_T[0],
6292 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK)));
6293 } else {
6294 gen_helper_write_eflags(cpu_T[0],
6295 tcg_const_i32((TF_MASK | AC_MASK | ID_MASK | NT_MASK) & 0xffff));
6299 gen_pop_update(s);
6300 s->cc_op = CC_OP_EFLAGS;
6301 /* abort translation because TF flag may change */
6302 gen_jmp_im(s->pc - s->cs_base);
6303 gen_eob(s);
6305 break;
6306 case 0x9e: /* sahf */
6307 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6308 goto illegal_op;
6309 gen_op_mov_TN_reg(OT_BYTE, 0, R_AH);
6310 if (s->cc_op != CC_OP_DYNAMIC)
6311 gen_op_set_cc_op(s->cc_op);
6312 gen_compute_eflags(cpu_cc_src);
6313 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, CC_O);
6314 tcg_gen_andi_tl(cpu_T[0], cpu_T[0], CC_S | CC_Z | CC_A | CC_P | CC_C);
6315 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, cpu_T[0]);
6316 s->cc_op = CC_OP_EFLAGS;
6317 break;
6318 case 0x9f: /* lahf */
6319 if (CODE64(s) && !(s->cpuid_ext3_features & CPUID_EXT3_LAHF_LM))
6320 goto illegal_op;
6321 if (s->cc_op != CC_OP_DYNAMIC)
6322 gen_op_set_cc_op(s->cc_op);
6323 gen_compute_eflags(cpu_T[0]);
6324 /* Note: gen_compute_eflags() only gives the condition codes */
6325 tcg_gen_ori_tl(cpu_T[0], cpu_T[0], 0x02);
6326 gen_op_mov_reg_T0(OT_BYTE, R_AH);
6327 break;
6328 case 0xf5: /* cmc */
6329 if (s->cc_op != CC_OP_DYNAMIC)
6330 gen_op_set_cc_op(s->cc_op);
6331 gen_compute_eflags(cpu_cc_src);
6332 tcg_gen_xori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6333 s->cc_op = CC_OP_EFLAGS;
6334 break;
6335 case 0xf8: /* clc */
6336 if (s->cc_op != CC_OP_DYNAMIC)
6337 gen_op_set_cc_op(s->cc_op);
6338 gen_compute_eflags(cpu_cc_src);
6339 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_C);
6340 s->cc_op = CC_OP_EFLAGS;
6341 break;
6342 case 0xf9: /* stc */
6343 if (s->cc_op != CC_OP_DYNAMIC)
6344 gen_op_set_cc_op(s->cc_op);
6345 gen_compute_eflags(cpu_cc_src);
6346 tcg_gen_ori_tl(cpu_cc_src, cpu_cc_src, CC_C);
6347 s->cc_op = CC_OP_EFLAGS;
6348 break;
6349 case 0xfc: /* cld */
6350 tcg_gen_movi_i32(cpu_tmp2_i32, 1);
6351 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6352 break;
6353 case 0xfd: /* std */
6354 tcg_gen_movi_i32(cpu_tmp2_i32, -1);
6355 tcg_gen_st_i32(cpu_tmp2_i32, cpu_env, offsetof(CPUState, df));
6356 break;
6358 /************************/
6359 /* bit operations */
6360 case 0x1ba: /* bt/bts/btr/btc Gv, im */
6361 ot = dflag + OT_WORD;
6362 modrm = ldub_code(s->pc++);
6363 op = (modrm >> 3) & 7;
6364 mod = (modrm >> 6) & 3;
6365 rm = (modrm & 7) | REX_B(s);
6366 if (mod != 3) {
6367 s->rip_offset = 1;
6368 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6369 gen_op_ld_T0_A0(ot + s->mem_index);
6370 } else {
6371 gen_op_mov_TN_reg(ot, 0, rm);
6373 /* load shift */
6374 val = ldub_code(s->pc++);
6375 gen_op_movl_T1_im(val);
6376 if (op < 4)
6377 goto illegal_op;
6378 op -= 4;
6379 goto bt_op;
6380 case 0x1a3: /* bt Gv, Ev */
6381 op = 0;
6382 goto do_btx;
6383 case 0x1ab: /* bts */
6384 op = 1;
6385 goto do_btx;
6386 case 0x1b3: /* btr */
6387 op = 2;
6388 goto do_btx;
6389 case 0x1bb: /* btc */
6390 op = 3;
6391 do_btx:
6392 ot = dflag + OT_WORD;
6393 modrm = ldub_code(s->pc++);
6394 reg = ((modrm >> 3) & 7) | rex_r;
6395 mod = (modrm >> 6) & 3;
6396 rm = (modrm & 7) | REX_B(s);
6397 gen_op_mov_TN_reg(OT_LONG, 1, reg);
6398 if (mod != 3) {
6399 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6400 /* specific case: we need to add a displacement */
6401 gen_exts(ot, cpu_T[1]);
6402 tcg_gen_sari_tl(cpu_tmp0, cpu_T[1], 3 + ot);
6403 tcg_gen_shli_tl(cpu_tmp0, cpu_tmp0, ot);
6404 tcg_gen_add_tl(cpu_A0, cpu_A0, cpu_tmp0);
6405 gen_op_ld_T0_A0(ot + s->mem_index);
6406 } else {
6407 gen_op_mov_TN_reg(ot, 0, rm);
6409 bt_op:
6410 tcg_gen_andi_tl(cpu_T[1], cpu_T[1], (1 << (3 + ot)) - 1);
6411 switch(op) {
6412 case 0:
6413 tcg_gen_shr_tl(cpu_cc_src, cpu_T[0], cpu_T[1]);
6414 tcg_gen_movi_tl(cpu_cc_dst, 0);
6415 break;
6416 case 1:
6417 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6418 tcg_gen_movi_tl(cpu_tmp0, 1);
6419 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6420 tcg_gen_or_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6421 break;
6422 case 2:
6423 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6424 tcg_gen_movi_tl(cpu_tmp0, 1);
6425 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6426 tcg_gen_not_tl(cpu_tmp0, cpu_tmp0);
6427 tcg_gen_and_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6428 break;
6429 default:
6430 case 3:
6431 tcg_gen_shr_tl(cpu_tmp4, cpu_T[0], cpu_T[1]);
6432 tcg_gen_movi_tl(cpu_tmp0, 1);
6433 tcg_gen_shl_tl(cpu_tmp0, cpu_tmp0, cpu_T[1]);
6434 tcg_gen_xor_tl(cpu_T[0], cpu_T[0], cpu_tmp0);
6435 break;
6437 s->cc_op = CC_OP_SARB + ot;
6438 if (op != 0) {
6439 if (mod != 3)
6440 gen_op_st_T0_A0(ot + s->mem_index);
6441 else
6442 gen_op_mov_reg_T0(ot, rm);
6443 tcg_gen_mov_tl(cpu_cc_src, cpu_tmp4);
6444 tcg_gen_movi_tl(cpu_cc_dst, 0);
6446 break;
6447 case 0x1bc: /* bsf */
6448 case 0x1bd: /* bsr */
6450 int label1;
6451 TCGv t0;
6453 ot = dflag + OT_WORD;
6454 modrm = ldub_code(s->pc++);
6455 reg = ((modrm >> 3) & 7) | rex_r;
6456 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
6457 gen_extu(ot, cpu_T[0]);
6458 label1 = gen_new_label();
6459 tcg_gen_movi_tl(cpu_cc_dst, 0);
6460 t0 = tcg_temp_local_new();
6461 tcg_gen_mov_tl(t0, cpu_T[0]);
6462 tcg_gen_brcondi_tl(TCG_COND_EQ, t0, 0, label1);
6463 if (b & 1) {
6464 gen_helper_bsr(cpu_T[0], t0);
6465 } else {
6466 gen_helper_bsf(cpu_T[0], t0);
6468 gen_op_mov_reg_T0(ot, reg);
6469 tcg_gen_movi_tl(cpu_cc_dst, 1);
6470 gen_set_label(label1);
6471 tcg_gen_discard_tl(cpu_cc_src);
6472 s->cc_op = CC_OP_LOGICB + ot;
6473 tcg_temp_free(t0);
6475 break;
6476 /************************/
6477 /* bcd */
6478 case 0x27: /* daa */
6479 if (CODE64(s))
6480 goto illegal_op;
6481 if (s->cc_op != CC_OP_DYNAMIC)
6482 gen_op_set_cc_op(s->cc_op);
6483 gen_helper_daa();
6484 s->cc_op = CC_OP_EFLAGS;
6485 break;
6486 case 0x2f: /* das */
6487 if (CODE64(s))
6488 goto illegal_op;
6489 if (s->cc_op != CC_OP_DYNAMIC)
6490 gen_op_set_cc_op(s->cc_op);
6491 gen_helper_das();
6492 s->cc_op = CC_OP_EFLAGS;
6493 break;
6494 case 0x37: /* aaa */
6495 if (CODE64(s))
6496 goto illegal_op;
6497 if (s->cc_op != CC_OP_DYNAMIC)
6498 gen_op_set_cc_op(s->cc_op);
6499 gen_helper_aaa();
6500 s->cc_op = CC_OP_EFLAGS;
6501 break;
6502 case 0x3f: /* aas */
6503 if (CODE64(s))
6504 goto illegal_op;
6505 if (s->cc_op != CC_OP_DYNAMIC)
6506 gen_op_set_cc_op(s->cc_op);
6507 gen_helper_aas();
6508 s->cc_op = CC_OP_EFLAGS;
6509 break;
6510 case 0xd4: /* aam */
6511 if (CODE64(s))
6512 goto illegal_op;
6513 val = ldub_code(s->pc++);
6514 if (val == 0) {
6515 gen_exception(s, EXCP00_DIVZ, pc_start - s->cs_base);
6516 } else {
6517 gen_helper_aam(tcg_const_i32(val));
6518 s->cc_op = CC_OP_LOGICB;
6520 break;
6521 case 0xd5: /* aad */
6522 if (CODE64(s))
6523 goto illegal_op;
6524 val = ldub_code(s->pc++);
6525 gen_helper_aad(tcg_const_i32(val));
6526 s->cc_op = CC_OP_LOGICB;
6527 break;
6528 /************************/
6529 /* misc */
6530 case 0x90: /* nop */
6531 /* XXX: xchg + rex handling */
6532 /* XXX: correct lock test for all insn */
6533 if (prefixes & PREFIX_LOCK)
6534 goto illegal_op;
6535 if (prefixes & PREFIX_REPZ) {
6536 gen_svm_check_intercept(s, pc_start, SVM_EXIT_PAUSE);
6538 break;
6539 case 0x9b: /* fwait */
6540 if ((s->flags & (HF_MP_MASK | HF_TS_MASK)) ==
6541 (HF_MP_MASK | HF_TS_MASK)) {
6542 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
6543 } else {
6544 if (s->cc_op != CC_OP_DYNAMIC)
6545 gen_op_set_cc_op(s->cc_op);
6546 gen_jmp_im(pc_start - s->cs_base);
6547 gen_helper_fwait();
6549 break;
6550 case 0xcc: /* int3 */
6551 gen_interrupt(s, EXCP03_INT3, pc_start - s->cs_base, s->pc - s->cs_base);
6552 break;
6553 case 0xcd: /* int N */
6554 val = ldub_code(s->pc++);
6555 if (s->vm86 && s->iopl != 3) {
6556 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6557 } else {
6558 gen_interrupt(s, val, pc_start - s->cs_base, s->pc - s->cs_base);
6560 break;
6561 case 0xce: /* into */
6562 if (CODE64(s))
6563 goto illegal_op;
6564 if (s->cc_op != CC_OP_DYNAMIC)
6565 gen_op_set_cc_op(s->cc_op);
6566 gen_jmp_im(pc_start - s->cs_base);
6567 gen_helper_into(tcg_const_i32(s->pc - pc_start));
6568 break;
6569 #ifdef WANT_ICEBP
6570 case 0xf1: /* icebp (undocumented, exits to external debugger) */
6571 gen_svm_check_intercept(s, pc_start, SVM_EXIT_ICEBP);
6572 #if 1
6573 gen_debug(s, pc_start - s->cs_base);
6574 #else
6575 /* start debug */
6576 tb_flush(cpu_single_env);
6577 cpu_set_log(CPU_LOG_INT | CPU_LOG_TB_IN_ASM);
6578 #endif
6579 break;
6580 #endif
6581 case 0xfa: /* cli */
6582 if (!s->vm86) {
6583 if (s->cpl <= s->iopl) {
6584 gen_helper_cli();
6585 } else {
6586 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6588 } else {
6589 if (s->iopl == 3) {
6590 gen_helper_cli();
6591 } else {
6592 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6595 break;
6596 case 0xfb: /* sti */
6597 if (!s->vm86) {
6598 if (s->cpl <= s->iopl) {
6599 gen_sti:
6600 gen_helper_sti();
6601 /* interruptions are enabled only the first insn after sti */
6602 /* If several instructions disable interrupts, only the
6603 _first_ does it */
6604 if (!(s->tb->flags & HF_INHIBIT_IRQ_MASK))
6605 gen_helper_set_inhibit_irq();
6606 /* give a chance to handle pending irqs */
6607 gen_jmp_im(s->pc - s->cs_base);
6608 gen_eob(s);
6609 } else {
6610 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6612 } else {
6613 if (s->iopl == 3) {
6614 goto gen_sti;
6615 } else {
6616 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6619 break;
6620 case 0x62: /* bound */
6621 if (CODE64(s))
6622 goto illegal_op;
6623 ot = dflag ? OT_LONG : OT_WORD;
6624 modrm = ldub_code(s->pc++);
6625 reg = (modrm >> 3) & 7;
6626 mod = (modrm >> 6) & 3;
6627 if (mod == 3)
6628 goto illegal_op;
6629 gen_op_mov_TN_reg(ot, 0, reg);
6630 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6631 gen_jmp_im(pc_start - s->cs_base);
6632 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6633 if (ot == OT_WORD)
6634 gen_helper_boundw(cpu_A0, cpu_tmp2_i32);
6635 else
6636 gen_helper_boundl(cpu_A0, cpu_tmp2_i32);
6637 break;
6638 case 0x1c8 ... 0x1cf: /* bswap reg */
6639 reg = (b & 7) | REX_B(s);
6640 #ifdef TARGET_X86_64
6641 if (dflag == 2) {
6642 gen_op_mov_TN_reg(OT_QUAD, 0, reg);
6643 tcg_gen_bswap_i64(cpu_T[0], cpu_T[0]);
6644 gen_op_mov_reg_T0(OT_QUAD, reg);
6645 } else
6647 TCGv_i32 tmp0;
6648 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6650 tmp0 = tcg_temp_new_i32();
6651 tcg_gen_trunc_i64_i32(tmp0, cpu_T[0]);
6652 tcg_gen_bswap_i32(tmp0, tmp0);
6653 tcg_gen_extu_i32_i64(cpu_T[0], tmp0);
6654 gen_op_mov_reg_T0(OT_LONG, reg);
6656 #else
6658 gen_op_mov_TN_reg(OT_LONG, 0, reg);
6659 tcg_gen_bswap_i32(cpu_T[0], cpu_T[0]);
6660 gen_op_mov_reg_T0(OT_LONG, reg);
6662 #endif
6663 break;
6664 case 0xd6: /* salc */
6665 if (CODE64(s))
6666 goto illegal_op;
6667 if (s->cc_op != CC_OP_DYNAMIC)
6668 gen_op_set_cc_op(s->cc_op);
6669 gen_compute_eflags_c(cpu_T[0]);
6670 tcg_gen_neg_tl(cpu_T[0], cpu_T[0]);
6671 gen_op_mov_reg_T0(OT_BYTE, R_EAX);
6672 break;
6673 case 0xe0: /* loopnz */
6674 case 0xe1: /* loopz */
6675 case 0xe2: /* loop */
6676 case 0xe3: /* jecxz */
6678 int l1, l2, l3;
6680 tval = (int8_t)insn_get(s, OT_BYTE);
6681 next_eip = s->pc - s->cs_base;
6682 tval += next_eip;
6683 if (s->dflag == 0)
6684 tval &= 0xffff;
6686 l1 = gen_new_label();
6687 l2 = gen_new_label();
6688 l3 = gen_new_label();
6689 b &= 3;
6690 switch(b) {
6691 case 0: /* loopnz */
6692 case 1: /* loopz */
6693 if (s->cc_op != CC_OP_DYNAMIC)
6694 gen_op_set_cc_op(s->cc_op);
6695 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6696 gen_op_jz_ecx(s->aflag, l3);
6697 gen_compute_eflags(cpu_tmp0);
6698 tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, CC_Z);
6699 if (b == 0) {
6700 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, l1);
6701 } else {
6702 tcg_gen_brcondi_tl(TCG_COND_NE, cpu_tmp0, 0, l1);
6704 break;
6705 case 2: /* loop */
6706 gen_op_add_reg_im(s->aflag, R_ECX, -1);
6707 gen_op_jnz_ecx(s->aflag, l1);
6708 break;
6709 default:
6710 case 3: /* jcxz */
6711 gen_op_jz_ecx(s->aflag, l1);
6712 break;
6715 gen_set_label(l3);
6716 gen_jmp_im(next_eip);
6717 tcg_gen_br(l2);
6719 gen_set_label(l1);
6720 gen_jmp_im(tval);
6721 gen_set_label(l2);
6722 gen_eob(s);
6724 break;
6725 case 0x130: /* wrmsr */
6726 case 0x132: /* rdmsr */
6727 if (s->cpl != 0) {
6728 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6729 } else {
6730 if (s->cc_op != CC_OP_DYNAMIC)
6731 gen_op_set_cc_op(s->cc_op);
6732 gen_jmp_im(pc_start - s->cs_base);
6733 if (b & 2) {
6734 gen_helper_rdmsr();
6735 } else {
6736 gen_helper_wrmsr();
6739 break;
6740 case 0x131: /* rdtsc */
6741 if (s->cc_op != CC_OP_DYNAMIC)
6742 gen_op_set_cc_op(s->cc_op);
6743 gen_jmp_im(pc_start - s->cs_base);
6744 if (use_icount)
6745 gen_io_start();
6746 gen_helper_rdtsc();
6747 if (use_icount) {
6748 gen_io_end();
6749 gen_jmp(s, s->pc - s->cs_base);
6751 break;
6752 case 0x133: /* rdpmc */
6753 if (s->cc_op != CC_OP_DYNAMIC)
6754 gen_op_set_cc_op(s->cc_op);
6755 gen_jmp_im(pc_start - s->cs_base);
6756 gen_helper_rdpmc();
6757 break;
6758 case 0x134: /* sysenter */
6759 /* For Intel SYSENTER is valid on 64-bit */
6760 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6761 goto illegal_op;
6762 if (!s->pe) {
6763 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6764 } else {
6765 if (s->cc_op != CC_OP_DYNAMIC) {
6766 gen_op_set_cc_op(s->cc_op);
6767 s->cc_op = CC_OP_DYNAMIC;
6769 gen_jmp_im(pc_start - s->cs_base);
6770 gen_helper_sysenter();
6771 gen_eob(s);
6773 break;
6774 case 0x135: /* sysexit */
6775 /* For Intel SYSEXIT is valid on 64-bit */
6776 if (CODE64(s) && cpu_single_env->cpuid_vendor1 != CPUID_VENDOR_INTEL_1)
6777 goto illegal_op;
6778 if (!s->pe) {
6779 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6780 } else {
6781 if (s->cc_op != CC_OP_DYNAMIC) {
6782 gen_op_set_cc_op(s->cc_op);
6783 s->cc_op = CC_OP_DYNAMIC;
6785 gen_jmp_im(pc_start - s->cs_base);
6786 gen_helper_sysexit(tcg_const_i32(dflag));
6787 gen_eob(s);
6789 break;
6790 #ifdef TARGET_X86_64
6791 case 0x105: /* syscall */
6792 /* XXX: is it usable in real mode ? */
6793 if (s->cc_op != CC_OP_DYNAMIC) {
6794 gen_op_set_cc_op(s->cc_op);
6795 s->cc_op = CC_OP_DYNAMIC;
6797 gen_jmp_im(pc_start - s->cs_base);
6798 gen_helper_syscall(tcg_const_i32(s->pc - pc_start));
6799 gen_eob(s);
6800 break;
6801 case 0x107: /* sysret */
6802 if (!s->pe) {
6803 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6804 } else {
6805 if (s->cc_op != CC_OP_DYNAMIC) {
6806 gen_op_set_cc_op(s->cc_op);
6807 s->cc_op = CC_OP_DYNAMIC;
6809 gen_jmp_im(pc_start - s->cs_base);
6810 gen_helper_sysret(tcg_const_i32(s->dflag));
6811 /* condition codes are modified only in long mode */
6812 if (s->lma)
6813 s->cc_op = CC_OP_EFLAGS;
6814 gen_eob(s);
6816 break;
6817 #endif
6818 case 0x1a2: /* cpuid */
6819 if (s->cc_op != CC_OP_DYNAMIC)
6820 gen_op_set_cc_op(s->cc_op);
6821 gen_jmp_im(pc_start - s->cs_base);
6822 gen_helper_cpuid();
6823 break;
6824 case 0xf4: /* hlt */
6825 if (s->cpl != 0) {
6826 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6827 } else {
6828 if (s->cc_op != CC_OP_DYNAMIC)
6829 gen_op_set_cc_op(s->cc_op);
6830 gen_jmp_im(pc_start - s->cs_base);
6831 gen_helper_hlt(tcg_const_i32(s->pc - pc_start));
6832 s->is_jmp = 3;
6834 break;
6835 case 0x100:
6836 modrm = ldub_code(s->pc++);
6837 mod = (modrm >> 6) & 3;
6838 op = (modrm >> 3) & 7;
6839 switch(op) {
6840 case 0: /* sldt */
6841 if (!s->pe || s->vm86)
6842 goto illegal_op;
6843 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_READ);
6844 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,ldt.selector));
6845 ot = OT_WORD;
6846 if (mod == 3)
6847 ot += s->dflag;
6848 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6849 break;
6850 case 2: /* lldt */
6851 if (!s->pe || s->vm86)
6852 goto illegal_op;
6853 if (s->cpl != 0) {
6854 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6855 } else {
6856 gen_svm_check_intercept(s, pc_start, SVM_EXIT_LDTR_WRITE);
6857 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6858 gen_jmp_im(pc_start - s->cs_base);
6859 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6860 gen_helper_lldt(cpu_tmp2_i32);
6862 break;
6863 case 1: /* str */
6864 if (!s->pe || s->vm86)
6865 goto illegal_op;
6866 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_READ);
6867 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,tr.selector));
6868 ot = OT_WORD;
6869 if (mod == 3)
6870 ot += s->dflag;
6871 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 1);
6872 break;
6873 case 3: /* ltr */
6874 if (!s->pe || s->vm86)
6875 goto illegal_op;
6876 if (s->cpl != 0) {
6877 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6878 } else {
6879 gen_svm_check_intercept(s, pc_start, SVM_EXIT_TR_WRITE);
6880 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6881 gen_jmp_im(pc_start - s->cs_base);
6882 tcg_gen_trunc_tl_i32(cpu_tmp2_i32, cpu_T[0]);
6883 gen_helper_ltr(cpu_tmp2_i32);
6885 break;
6886 case 4: /* verr */
6887 case 5: /* verw */
6888 if (!s->pe || s->vm86)
6889 goto illegal_op;
6890 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
6891 if (s->cc_op != CC_OP_DYNAMIC)
6892 gen_op_set_cc_op(s->cc_op);
6893 if (op == 4)
6894 gen_helper_verr(cpu_T[0]);
6895 else
6896 gen_helper_verw(cpu_T[0]);
6897 s->cc_op = CC_OP_EFLAGS;
6898 break;
6899 default:
6900 goto illegal_op;
6902 break;
6903 case 0x101:
6904 modrm = ldub_code(s->pc++);
6905 mod = (modrm >> 6) & 3;
6906 op = (modrm >> 3) & 7;
6907 rm = modrm & 7;
6908 switch(op) {
6909 case 0: /* sgdt */
6910 if (mod == 3)
6911 goto illegal_op;
6912 gen_svm_check_intercept(s, pc_start, SVM_EXIT_GDTR_READ);
6913 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6914 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.limit));
6915 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6916 gen_add_A0_im(s, 2);
6917 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, gdt.base));
6918 if (!s->dflag)
6919 gen_op_andl_T0_im(0xffffff);
6920 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6921 break;
6922 case 1:
6923 if (mod == 3) {
6924 switch (rm) {
6925 case 0: /* monitor */
6926 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6927 s->cpl != 0)
6928 goto illegal_op;
6929 if (s->cc_op != CC_OP_DYNAMIC)
6930 gen_op_set_cc_op(s->cc_op);
6931 gen_jmp_im(pc_start - s->cs_base);
6932 #ifdef TARGET_X86_64
6933 if (s->aflag == 2) {
6934 gen_op_movq_A0_reg(R_EAX);
6935 } else
6936 #endif
6938 gen_op_movl_A0_reg(R_EAX);
6939 if (s->aflag == 0)
6940 gen_op_andl_A0_ffff();
6942 gen_add_A0_ds_seg(s);
6943 gen_helper_monitor(cpu_A0);
6944 break;
6945 case 1: /* mwait */
6946 if (!(s->cpuid_ext_features & CPUID_EXT_MONITOR) ||
6947 s->cpl != 0)
6948 goto illegal_op;
6949 if (s->cc_op != CC_OP_DYNAMIC) {
6950 gen_op_set_cc_op(s->cc_op);
6951 s->cc_op = CC_OP_DYNAMIC;
6953 gen_jmp_im(pc_start - s->cs_base);
6954 gen_helper_mwait(tcg_const_i32(s->pc - pc_start));
6955 gen_eob(s);
6956 break;
6957 default:
6958 goto illegal_op;
6960 } else { /* sidt */
6961 gen_svm_check_intercept(s, pc_start, SVM_EXIT_IDTR_READ);
6962 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
6963 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.limit));
6964 gen_op_st_T0_A0(OT_WORD + s->mem_index);
6965 gen_add_A0_im(s, 2);
6966 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, idt.base));
6967 if (!s->dflag)
6968 gen_op_andl_T0_im(0xffffff);
6969 gen_op_st_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
6971 break;
6972 case 2: /* lgdt */
6973 case 3: /* lidt */
6974 if (mod == 3) {
6975 if (s->cc_op != CC_OP_DYNAMIC)
6976 gen_op_set_cc_op(s->cc_op);
6977 gen_jmp_im(pc_start - s->cs_base);
6978 switch(rm) {
6979 case 0: /* VMRUN */
6980 if (!(s->flags & HF_SVME_MASK) || !s->pe)
6981 goto illegal_op;
6982 if (s->cpl != 0) {
6983 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
6984 break;
6985 } else {
6986 gen_helper_vmrun(tcg_const_i32(s->aflag),
6987 tcg_const_i32(s->pc - pc_start));
6988 tcg_gen_exit_tb(0);
6989 s->is_jmp = 3;
6991 break;
6992 case 1: /* VMMCALL */
6993 if (!(s->flags & HF_SVME_MASK))
6994 goto illegal_op;
6995 gen_helper_vmmcall();
6996 break;
6997 case 2: /* VMLOAD */
6998 if (!(s->flags & HF_SVME_MASK) || !s->pe)
6999 goto illegal_op;
7000 if (s->cpl != 0) {
7001 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7002 break;
7003 } else {
7004 gen_helper_vmload(tcg_const_i32(s->aflag));
7006 break;
7007 case 3: /* VMSAVE */
7008 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7009 goto illegal_op;
7010 if (s->cpl != 0) {
7011 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7012 break;
7013 } else {
7014 gen_helper_vmsave(tcg_const_i32(s->aflag));
7016 break;
7017 case 4: /* STGI */
7018 if ((!(s->flags & HF_SVME_MASK) &&
7019 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7020 !s->pe)
7021 goto illegal_op;
7022 if (s->cpl != 0) {
7023 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7024 break;
7025 } else {
7026 gen_helper_stgi();
7028 break;
7029 case 5: /* CLGI */
7030 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7031 goto illegal_op;
7032 if (s->cpl != 0) {
7033 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7034 break;
7035 } else {
7036 gen_helper_clgi();
7038 break;
7039 case 6: /* SKINIT */
7040 if ((!(s->flags & HF_SVME_MASK) &&
7041 !(s->cpuid_ext3_features & CPUID_EXT3_SKINIT)) ||
7042 !s->pe)
7043 goto illegal_op;
7044 gen_helper_skinit();
7045 break;
7046 case 7: /* INVLPGA */
7047 if (!(s->flags & HF_SVME_MASK) || !s->pe)
7048 goto illegal_op;
7049 if (s->cpl != 0) {
7050 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7051 break;
7052 } else {
7053 gen_helper_invlpga(tcg_const_i32(s->aflag));
7055 break;
7056 default:
7057 goto illegal_op;
7059 } else if (s->cpl != 0) {
7060 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7061 } else {
7062 gen_svm_check_intercept(s, pc_start,
7063 op==2 ? SVM_EXIT_GDTR_WRITE : SVM_EXIT_IDTR_WRITE);
7064 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7065 gen_op_ld_T1_A0(OT_WORD + s->mem_index);
7066 gen_add_A0_im(s, 2);
7067 gen_op_ld_T0_A0(CODE64(s) + OT_LONG + s->mem_index);
7068 if (!s->dflag)
7069 gen_op_andl_T0_im(0xffffff);
7070 if (op == 2) {
7071 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,gdt.base));
7072 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,gdt.limit));
7073 } else {
7074 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,idt.base));
7075 tcg_gen_st32_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,idt.limit));
7078 break;
7079 case 4: /* smsw */
7080 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_CR0);
7081 #if defined TARGET_X86_64 && defined WORDS_BIGENDIAN
7082 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]) + 4);
7083 #else
7084 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,cr[0]));
7085 #endif
7086 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 1);
7087 break;
7088 case 6: /* lmsw */
7089 if (s->cpl != 0) {
7090 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7091 } else {
7092 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7093 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7094 gen_helper_lmsw(cpu_T[0]);
7095 gen_jmp_im(s->pc - s->cs_base);
7096 gen_eob(s);
7098 break;
7099 case 7: /* invlpg */
7100 if (s->cpl != 0) {
7101 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7102 } else {
7103 if (mod == 3) {
7104 #ifdef TARGET_X86_64
7105 if (CODE64(s) && rm == 0) {
7106 /* swapgs */
7107 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
7108 tcg_gen_ld_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,kernelgsbase));
7109 tcg_gen_st_tl(cpu_T[1], cpu_env, offsetof(CPUX86State,segs[R_GS].base));
7110 tcg_gen_st_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,kernelgsbase));
7111 } else
7112 #endif
7114 goto illegal_op;
7116 } else {
7117 if (s->cc_op != CC_OP_DYNAMIC)
7118 gen_op_set_cc_op(s->cc_op);
7119 gen_jmp_im(pc_start - s->cs_base);
7120 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7121 gen_helper_invlpg(cpu_A0);
7122 gen_jmp_im(s->pc - s->cs_base);
7123 gen_eob(s);
7126 break;
7127 default:
7128 goto illegal_op;
7130 break;
7131 case 0x108: /* invd */
7132 case 0x109: /* wbinvd */
7133 if (s->cpl != 0) {
7134 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7135 } else {
7136 gen_svm_check_intercept(s, pc_start, (b & 2) ? SVM_EXIT_INVD : SVM_EXIT_WBINVD);
7137 /* nothing to do */
7139 break;
7140 case 0x63: /* arpl or movslS (x86_64) */
7141 #ifdef TARGET_X86_64
7142 if (CODE64(s)) {
7143 int d_ot;
7144 /* d_ot is the size of destination */
7145 d_ot = dflag + OT_WORD;
7147 modrm = ldub_code(s->pc++);
7148 reg = ((modrm >> 3) & 7) | rex_r;
7149 mod = (modrm >> 6) & 3;
7150 rm = (modrm & 7) | REX_B(s);
7152 if (mod == 3) {
7153 gen_op_mov_TN_reg(OT_LONG, 0, rm);
7154 /* sign extend */
7155 if (d_ot == OT_QUAD)
7156 tcg_gen_ext32s_tl(cpu_T[0], cpu_T[0]);
7157 gen_op_mov_reg_T0(d_ot, reg);
7158 } else {
7159 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7160 if (d_ot == OT_QUAD) {
7161 gen_op_lds_T0_A0(OT_LONG + s->mem_index);
7162 } else {
7163 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7165 gen_op_mov_reg_T0(d_ot, reg);
7167 } else
7168 #endif
7170 int label1;
7171 TCGv t0, t1, t2;
7173 if (!s->pe || s->vm86)
7174 goto illegal_op;
7175 t0 = tcg_temp_local_new();
7176 t1 = tcg_temp_local_new();
7177 t2 = tcg_temp_local_new();
7178 ot = OT_WORD;
7179 modrm = ldub_code(s->pc++);
7180 reg = (modrm >> 3) & 7;
7181 mod = (modrm >> 6) & 3;
7182 rm = modrm & 7;
7183 if (mod != 3) {
7184 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7185 gen_op_ld_v(ot + s->mem_index, t0, cpu_A0);
7186 } else {
7187 gen_op_mov_v_reg(ot, t0, rm);
7189 gen_op_mov_v_reg(ot, t1, reg);
7190 tcg_gen_andi_tl(cpu_tmp0, t0, 3);
7191 tcg_gen_andi_tl(t1, t1, 3);
7192 tcg_gen_movi_tl(t2, 0);
7193 label1 = gen_new_label();
7194 tcg_gen_brcond_tl(TCG_COND_GE, cpu_tmp0, t1, label1);
7195 tcg_gen_andi_tl(t0, t0, ~3);
7196 tcg_gen_or_tl(t0, t0, t1);
7197 tcg_gen_movi_tl(t2, CC_Z);
7198 gen_set_label(label1);
7199 if (mod != 3) {
7200 gen_op_st_v(ot + s->mem_index, t0, cpu_A0);
7201 } else {
7202 gen_op_mov_reg_v(ot, rm, t0);
7204 if (s->cc_op != CC_OP_DYNAMIC)
7205 gen_op_set_cc_op(s->cc_op);
7206 gen_compute_eflags(cpu_cc_src);
7207 tcg_gen_andi_tl(cpu_cc_src, cpu_cc_src, ~CC_Z);
7208 tcg_gen_or_tl(cpu_cc_src, cpu_cc_src, t2);
7209 s->cc_op = CC_OP_EFLAGS;
7210 tcg_temp_free(t0);
7211 tcg_temp_free(t1);
7212 tcg_temp_free(t2);
7214 break;
7215 case 0x102: /* lar */
7216 case 0x103: /* lsl */
7218 int label1;
7219 TCGv t0;
7220 if (!s->pe || s->vm86)
7221 goto illegal_op;
7222 ot = dflag ? OT_LONG : OT_WORD;
7223 modrm = ldub_code(s->pc++);
7224 reg = ((modrm >> 3) & 7) | rex_r;
7225 gen_ldst_modrm(s, modrm, OT_WORD, OR_TMP0, 0);
7226 t0 = tcg_temp_local_new();
7227 if (s->cc_op != CC_OP_DYNAMIC)
7228 gen_op_set_cc_op(s->cc_op);
7229 if (b == 0x102)
7230 gen_helper_lar(t0, cpu_T[0]);
7231 else
7232 gen_helper_lsl(t0, cpu_T[0]);
7233 tcg_gen_andi_tl(cpu_tmp0, cpu_cc_src, CC_Z);
7234 label1 = gen_new_label();
7235 tcg_gen_brcondi_tl(TCG_COND_EQ, cpu_tmp0, 0, label1);
7236 gen_op_mov_reg_v(ot, reg, t0);
7237 gen_set_label(label1);
7238 s->cc_op = CC_OP_EFLAGS;
7239 tcg_temp_free(t0);
7241 break;
7242 case 0x118:
7243 modrm = ldub_code(s->pc++);
7244 mod = (modrm >> 6) & 3;
7245 op = (modrm >> 3) & 7;
7246 switch(op) {
7247 case 0: /* prefetchnta */
7248 case 1: /* prefetchnt0 */
7249 case 2: /* prefetchnt0 */
7250 case 3: /* prefetchnt0 */
7251 if (mod == 3)
7252 goto illegal_op;
7253 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7254 /* nothing more to do */
7255 break;
7256 default: /* nop (multi byte) */
7257 gen_nop_modrm(s, modrm);
7258 break;
7260 break;
7261 case 0x119 ... 0x11f: /* nop (multi byte) */
7262 modrm = ldub_code(s->pc++);
7263 gen_nop_modrm(s, modrm);
7264 break;
7265 case 0x120: /* mov reg, crN */
7266 case 0x122: /* mov crN, reg */
7267 if (s->cpl != 0) {
7268 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7269 } else {
7270 modrm = ldub_code(s->pc++);
7271 if ((modrm & 0xc0) != 0xc0)
7272 goto illegal_op;
7273 rm = (modrm & 7) | REX_B(s);
7274 reg = ((modrm >> 3) & 7) | rex_r;
7275 if (CODE64(s))
7276 ot = OT_QUAD;
7277 else
7278 ot = OT_LONG;
7279 switch(reg) {
7280 case 0:
7281 case 2:
7282 case 3:
7283 case 4:
7284 case 8:
7285 if (s->cc_op != CC_OP_DYNAMIC)
7286 gen_op_set_cc_op(s->cc_op);
7287 gen_jmp_im(pc_start - s->cs_base);
7288 if (b & 2) {
7289 gen_op_mov_TN_reg(ot, 0, rm);
7290 gen_helper_write_crN(tcg_const_i32(reg), cpu_T[0]);
7291 gen_jmp_im(s->pc - s->cs_base);
7292 gen_eob(s);
7293 } else {
7294 gen_helper_read_crN(cpu_T[0], tcg_const_i32(reg));
7295 gen_op_mov_reg_T0(ot, rm);
7297 break;
7298 default:
7299 goto illegal_op;
7302 break;
7303 case 0x121: /* mov reg, drN */
7304 case 0x123: /* mov drN, reg */
7305 if (s->cpl != 0) {
7306 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7307 } else {
7308 modrm = ldub_code(s->pc++);
7309 if ((modrm & 0xc0) != 0xc0)
7310 goto illegal_op;
7311 rm = (modrm & 7) | REX_B(s);
7312 reg = ((modrm >> 3) & 7) | rex_r;
7313 if (CODE64(s))
7314 ot = OT_QUAD;
7315 else
7316 ot = OT_LONG;
7317 /* XXX: do it dynamically with CR4.DE bit */
7318 if (reg == 4 || reg == 5 || reg >= 8)
7319 goto illegal_op;
7320 if (b & 2) {
7321 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_DR0 + reg);
7322 gen_op_mov_TN_reg(ot, 0, rm);
7323 gen_helper_movl_drN_T0(tcg_const_i32(reg), cpu_T[0]);
7324 gen_jmp_im(s->pc - s->cs_base);
7325 gen_eob(s);
7326 } else {
7327 gen_svm_check_intercept(s, pc_start, SVM_EXIT_READ_DR0 + reg);
7328 tcg_gen_ld_tl(cpu_T[0], cpu_env, offsetof(CPUX86State,dr[reg]));
7329 gen_op_mov_reg_T0(ot, rm);
7332 break;
7333 case 0x106: /* clts */
7334 if (s->cpl != 0) {
7335 gen_exception(s, EXCP0D_GPF, pc_start - s->cs_base);
7336 } else {
7337 gen_svm_check_intercept(s, pc_start, SVM_EXIT_WRITE_CR0);
7338 gen_helper_clts();
7339 /* abort block because static cpu state changed */
7340 gen_jmp_im(s->pc - s->cs_base);
7341 gen_eob(s);
7343 break;
7344 /* MMX/3DNow!/SSE/SSE2/SSE3/SSSE3/SSE4 support */
7345 case 0x1c3: /* MOVNTI reg, mem */
7346 if (!(s->cpuid_features & CPUID_SSE2))
7347 goto illegal_op;
7348 ot = s->dflag == 2 ? OT_QUAD : OT_LONG;
7349 modrm = ldub_code(s->pc++);
7350 mod = (modrm >> 6) & 3;
7351 if (mod == 3)
7352 goto illegal_op;
7353 reg = ((modrm >> 3) & 7) | rex_r;
7354 /* generate a generic store */
7355 gen_ldst_modrm(s, modrm, ot, reg, 1);
7356 break;
7357 case 0x1ae:
7358 modrm = ldub_code(s->pc++);
7359 mod = (modrm >> 6) & 3;
7360 op = (modrm >> 3) & 7;
7361 switch(op) {
7362 case 0: /* fxsave */
7363 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7364 (s->flags & HF_EM_MASK))
7365 goto illegal_op;
7366 if (s->flags & HF_TS_MASK) {
7367 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7368 break;
7370 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7371 if (s->cc_op != CC_OP_DYNAMIC)
7372 gen_op_set_cc_op(s->cc_op);
7373 gen_jmp_im(pc_start - s->cs_base);
7374 gen_helper_fxsave(cpu_A0, tcg_const_i32((s->dflag == 2)));
7375 break;
7376 case 1: /* fxrstor */
7377 if (mod == 3 || !(s->cpuid_features & CPUID_FXSR) ||
7378 (s->flags & HF_EM_MASK))
7379 goto illegal_op;
7380 if (s->flags & HF_TS_MASK) {
7381 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7382 break;
7384 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7385 if (s->cc_op != CC_OP_DYNAMIC)
7386 gen_op_set_cc_op(s->cc_op);
7387 gen_jmp_im(pc_start - s->cs_base);
7388 gen_helper_fxrstor(cpu_A0, tcg_const_i32((s->dflag == 2)));
7389 break;
7390 case 2: /* ldmxcsr */
7391 case 3: /* stmxcsr */
7392 if (s->flags & HF_TS_MASK) {
7393 gen_exception(s, EXCP07_PREX, pc_start - s->cs_base);
7394 break;
7396 if ((s->flags & HF_EM_MASK) || !(s->flags & HF_OSFXSR_MASK) ||
7397 mod == 3)
7398 goto illegal_op;
7399 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7400 if (op == 2) {
7401 gen_op_ld_T0_A0(OT_LONG + s->mem_index);
7402 tcg_gen_st32_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7403 } else {
7404 tcg_gen_ld32u_tl(cpu_T[0], cpu_env, offsetof(CPUX86State, mxcsr));
7405 gen_op_st_T0_A0(OT_LONG + s->mem_index);
7407 break;
7408 case 5: /* lfence */
7409 case 6: /* mfence */
7410 if ((modrm & 0xc7) != 0xc0 || !(s->cpuid_features & CPUID_SSE))
7411 goto illegal_op;
7412 break;
7413 case 7: /* sfence / clflush */
7414 if ((modrm & 0xc7) == 0xc0) {
7415 /* sfence */
7416 /* XXX: also check for cpuid_ext2_features & CPUID_EXT2_EMMX */
7417 if (!(s->cpuid_features & CPUID_SSE))
7418 goto illegal_op;
7419 } else {
7420 /* clflush */
7421 if (!(s->cpuid_features & CPUID_CLFLUSH))
7422 goto illegal_op;
7423 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7425 break;
7426 default:
7427 goto illegal_op;
7429 break;
7430 case 0x10d: /* 3DNow! prefetch(w) */
7431 modrm = ldub_code(s->pc++);
7432 mod = (modrm >> 6) & 3;
7433 if (mod == 3)
7434 goto illegal_op;
7435 gen_lea_modrm(s, modrm, &reg_addr, &offset_addr);
7436 /* ignore for now */
7437 break;
7438 case 0x1aa: /* rsm */
7439 gen_svm_check_intercept(s, pc_start, SVM_EXIT_RSM);
7440 if (!(s->flags & HF_SMM_MASK))
7441 goto illegal_op;
7442 if (s->cc_op != CC_OP_DYNAMIC) {
7443 gen_op_set_cc_op(s->cc_op);
7444 s->cc_op = CC_OP_DYNAMIC;
7446 gen_jmp_im(s->pc - s->cs_base);
7447 gen_helper_rsm();
7448 gen_eob(s);
7449 break;
7450 case 0x1b8: /* SSE4.2 popcnt */
7451 if ((prefixes & (PREFIX_REPZ | PREFIX_LOCK | PREFIX_REPNZ)) !=
7452 PREFIX_REPZ)
7453 goto illegal_op;
7454 if (!(s->cpuid_ext_features & CPUID_EXT_POPCNT))
7455 goto illegal_op;
7457 modrm = ldub_code(s->pc++);
7458 reg = ((modrm >> 3) & 7);
7460 if (s->prefix & PREFIX_DATA)
7461 ot = OT_WORD;
7462 else if (s->dflag != 2)
7463 ot = OT_LONG;
7464 else
7465 ot = OT_QUAD;
7467 gen_ldst_modrm(s, modrm, ot, OR_TMP0, 0);
7468 gen_helper_popcnt(cpu_T[0], cpu_T[0], tcg_const_i32(ot));
7469 gen_op_mov_reg_T0(ot, reg);
7471 s->cc_op = CC_OP_EFLAGS;
7472 break;
7473 case 0x10e ... 0x10f:
7474 /* 3DNow! instructions, ignore prefixes */
7475 s->prefix &= ~(PREFIX_REPZ | PREFIX_REPNZ | PREFIX_DATA);
7476 case 0x110 ... 0x117:
7477 case 0x128 ... 0x12f:
7478 case 0x138 ... 0x13a:
7479 case 0x150 ... 0x177:
7480 case 0x17c ... 0x17f:
7481 case 0x1c2:
7482 case 0x1c4 ... 0x1c6:
7483 case 0x1d0 ... 0x1fe:
7484 gen_sse(s, b, pc_start, rex_r);
7485 break;
7486 default:
7487 goto illegal_op;
7489 /* lock generation */
7490 if (s->prefix & PREFIX_LOCK)
7491 gen_helper_unlock();
7492 return s->pc;
7493 illegal_op:
7494 if (s->prefix & PREFIX_LOCK)
7495 gen_helper_unlock();
7496 /* XXX: ensure that no lock was generated */
7497 gen_exception(s, EXCP06_ILLOP, pc_start - s->cs_base);
7498 return s->pc;
7501 void optimize_flags_init(void)
7503 #if TCG_TARGET_REG_BITS == 32
7504 assert(sizeof(CCTable) == (1 << 3));
7505 #else
7506 assert(sizeof(CCTable) == (1 << 4));
7507 #endif
7508 cpu_env = tcg_global_reg_new_ptr(TCG_AREG0, "env");
7509 cpu_cc_op = tcg_global_mem_new_i32(TCG_AREG0,
7510 offsetof(CPUState, cc_op), "cc_op");
7511 cpu_cc_src = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_src),
7512 "cc_src");
7513 cpu_cc_dst = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_dst),
7514 "cc_dst");
7515 cpu_cc_tmp = tcg_global_mem_new(TCG_AREG0, offsetof(CPUState, cc_tmp),
7516 "cc_tmp");
7518 /* register helpers */
7519 #define GEN_HELPER 2
7520 #include "helper.h"
7523 /* generate intermediate code in gen_opc_buf and gen_opparam_buf for
7524 basic block 'tb'. If search_pc is TRUE, also generate PC
7525 information for each intermediate instruction. */
7526 static inline void gen_intermediate_code_internal(CPUState *env,
7527 TranslationBlock *tb,
7528 int search_pc)
7530 DisasContext dc1, *dc = &dc1;
7531 target_ulong pc_ptr;
7532 uint16_t *gen_opc_end;
7533 CPUBreakpoint *bp;
7534 int j, lj, cflags;
7535 uint64_t flags;
7536 target_ulong pc_start;
7537 target_ulong cs_base;
7538 int num_insns;
7539 int max_insns;
7541 /* generate intermediate code */
7542 pc_start = tb->pc;
7543 cs_base = tb->cs_base;
7544 flags = tb->flags;
7545 cflags = tb->cflags;
7547 dc->pe = (flags >> HF_PE_SHIFT) & 1;
7548 dc->code32 = (flags >> HF_CS32_SHIFT) & 1;
7549 dc->ss32 = (flags >> HF_SS32_SHIFT) & 1;
7550 dc->addseg = (flags >> HF_ADDSEG_SHIFT) & 1;
7551 dc->f_st = 0;
7552 dc->vm86 = (flags >> VM_SHIFT) & 1;
7553 dc->cpl = (flags >> HF_CPL_SHIFT) & 3;
7554 dc->iopl = (flags >> IOPL_SHIFT) & 3;
7555 dc->tf = (flags >> TF_SHIFT) & 1;
7556 dc->singlestep_enabled = env->singlestep_enabled;
7557 dc->cc_op = CC_OP_DYNAMIC;
7558 dc->cs_base = cs_base;
7559 dc->tb = tb;
7560 dc->popl_esp_hack = 0;
7561 /* select memory access functions */
7562 dc->mem_index = 0;
7563 if (flags & HF_SOFTMMU_MASK) {
7564 if (dc->cpl == 3)
7565 dc->mem_index = 2 * 4;
7566 else
7567 dc->mem_index = 1 * 4;
7569 dc->cpuid_features = env->cpuid_features;
7570 dc->cpuid_ext_features = env->cpuid_ext_features;
7571 dc->cpuid_ext2_features = env->cpuid_ext2_features;
7572 dc->cpuid_ext3_features = env->cpuid_ext3_features;
7573 #ifdef TARGET_X86_64
7574 dc->lma = (flags >> HF_LMA_SHIFT) & 1;
7575 dc->code64 = (flags >> HF_CS64_SHIFT) & 1;
7576 #endif
7577 dc->flags = flags;
7578 dc->jmp_opt = !(dc->tf || env->singlestep_enabled ||
7579 (flags & HF_INHIBIT_IRQ_MASK)
7580 #ifndef CONFIG_SOFTMMU
7581 || (flags & HF_SOFTMMU_MASK)
7582 #endif
7584 #if 0
7585 /* check addseg logic */
7586 if (!dc->addseg && (dc->vm86 || !dc->pe || !dc->code32))
7587 printf("ERROR addseg\n");
7588 #endif
7590 cpu_T[0] = tcg_temp_new();
7591 cpu_T[1] = tcg_temp_new();
7592 cpu_A0 = tcg_temp_new();
7593 cpu_T3 = tcg_temp_new();
7595 cpu_tmp0 = tcg_temp_new();
7596 cpu_tmp1_i64 = tcg_temp_new_i64();
7597 cpu_tmp2_i32 = tcg_temp_new_i32();
7598 cpu_tmp3_i32 = tcg_temp_new_i32();
7599 cpu_tmp4 = tcg_temp_new();
7600 cpu_tmp5 = tcg_temp_new();
7601 cpu_tmp6 = tcg_temp_new();
7602 cpu_ptr0 = tcg_temp_new_ptr();
7603 cpu_ptr1 = tcg_temp_new_ptr();
7605 gen_opc_end = gen_opc_buf + OPC_MAX_SIZE;
7607 dc->is_jmp = DISAS_NEXT;
7608 pc_ptr = pc_start;
7609 lj = -1;
7610 num_insns = 0;
7611 max_insns = tb->cflags & CF_COUNT_MASK;
7612 if (max_insns == 0)
7613 max_insns = CF_COUNT_MASK;
7615 gen_icount_start();
7616 for(;;) {
7617 if (unlikely(!TAILQ_EMPTY(&env->breakpoints))) {
7618 TAILQ_FOREACH(bp, &env->breakpoints, entry) {
7619 if (bp->pc == pc_ptr) {
7620 gen_debug(dc, pc_ptr - dc->cs_base);
7621 break;
7625 if (search_pc) {
7626 j = gen_opc_ptr - gen_opc_buf;
7627 if (lj < j) {
7628 lj++;
7629 while (lj < j)
7630 gen_opc_instr_start[lj++] = 0;
7632 gen_opc_pc[lj] = pc_ptr;
7633 gen_opc_cc_op[lj] = dc->cc_op;
7634 gen_opc_instr_start[lj] = 1;
7635 gen_opc_icount[lj] = num_insns;
7637 if (num_insns + 1 == max_insns && (tb->cflags & CF_LAST_IO))
7638 gen_io_start();
7640 pc_ptr = disas_insn(dc, pc_ptr);
7641 num_insns++;
7642 /* stop translation if indicated */
7643 if (dc->is_jmp)
7644 break;
7645 /* if single step mode, we generate only one instruction and
7646 generate an exception */
7647 /* if irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
7648 the flag and abort the translation to give the irqs a
7649 change to be happen */
7650 if (dc->tf || dc->singlestep_enabled ||
7651 (flags & HF_INHIBIT_IRQ_MASK)) {
7652 gen_jmp_im(pc_ptr - dc->cs_base);
7653 gen_eob(dc);
7654 break;
7656 /* if too long translation, stop generation too */
7657 if (gen_opc_ptr >= gen_opc_end ||
7658 (pc_ptr - pc_start) >= (TARGET_PAGE_SIZE - 32) ||
7659 num_insns >= max_insns) {
7660 gen_jmp_im(pc_ptr - dc->cs_base);
7661 gen_eob(dc);
7662 break;
7665 if (tb->cflags & CF_LAST_IO)
7666 gen_io_end();
7667 gen_icount_end(tb, num_insns);
7668 *gen_opc_ptr = INDEX_op_end;
7669 /* we don't forget to fill the last values */
7670 if (search_pc) {
7671 j = gen_opc_ptr - gen_opc_buf;
7672 lj++;
7673 while (lj <= j)
7674 gen_opc_instr_start[lj++] = 0;
7677 #ifdef DEBUG_DISAS
7678 log_cpu_state_mask(CPU_LOG_TB_CPU, env, X86_DUMP_CCOP);
7679 if (qemu_loglevel_mask(CPU_LOG_TB_IN_ASM)) {
7680 int disas_flags;
7681 qemu_log("----------------\n");
7682 qemu_log("IN: %s\n", lookup_symbol(pc_start));
7683 #ifdef TARGET_X86_64
7684 if (dc->code64)
7685 disas_flags = 2;
7686 else
7687 #endif
7688 disas_flags = !dc->code32;
7689 log_target_disas(pc_start, pc_ptr - pc_start, disas_flags);
7690 qemu_log("\n");
7692 #endif
7694 if (!search_pc) {
7695 tb->size = pc_ptr - pc_start;
7696 tb->icount = num_insns;
7700 void gen_intermediate_code(CPUState *env, TranslationBlock *tb)
7702 gen_intermediate_code_internal(env, tb, 0);
7705 void gen_intermediate_code_pc(CPUState *env, TranslationBlock *tb)
7707 gen_intermediate_code_internal(env, tb, 1);
7710 void gen_pc_load(CPUState *env, TranslationBlock *tb,
7711 unsigned long searched_pc, int pc_pos, void *puc)
7713 int cc_op;
7714 #ifdef DEBUG_DISAS
7715 if (qemu_loglevel_mask(CPU_LOG_TB_OP)) {
7716 int i;
7717 qemu_log("RESTORE:\n");
7718 for(i = 0;i <= pc_pos; i++) {
7719 if (gen_opc_instr_start[i]) {
7720 qemu_log("0x%04x: " TARGET_FMT_lx "\n", i, gen_opc_pc[i]);
7723 qemu_log("spc=0x%08lx pc_pos=0x%x eip=" TARGET_FMT_lx " cs_base=%x\n",
7724 searched_pc, pc_pos, gen_opc_pc[pc_pos] - tb->cs_base,
7725 (uint32_t)tb->cs_base);
7727 #endif
7728 env->eip = gen_opc_pc[pc_pos] - tb->cs_base;
7729 cc_op = gen_opc_cc_op[pc_pos];
7730 if (cc_op != CC_OP_DYNAMIC)
7731 env->cc_op = cc_op;