4 * Copyright (c) 2003 Fabrice Bellard
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 #include "qemu/osdep.h"
21 #include "qemu/host-utils.h"
23 #include "exec/exec-all.h"
24 #include "tcg/tcg-op.h"
25 #include "tcg/tcg-op-gvec.h"
26 #include "exec/translator.h"
27 #include "fpu/softfloat.h"
29 #include "exec/helper-proto.h"
30 #include "exec/helper-gen.h"
31 #include "helper-tcg.h"
35 #define HELPER_H "helper.h"
36 #include "exec/helper-info.c.inc"
39 /* Fixes for Windows namespace pollution. */
43 #define PREFIX_REPZ 0x01
44 #define PREFIX_REPNZ 0x02
45 #define PREFIX_LOCK 0x04
46 #define PREFIX_DATA 0x08
47 #define PREFIX_ADR 0x10
48 #define PREFIX_VEX 0x20
49 #define PREFIX_REX 0x40
59 /* For a switch indexed by MODRM, match all memory operands for a given OP. */
60 #define CASE_MODRM_MEM_OP(OP) \
61 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
62 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
63 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7
65 #define CASE_MODRM_OP(OP) \
66 case (0 << 6) | (OP << 3) | 0 ... (0 << 6) | (OP << 3) | 7: \
67 case (1 << 6) | (OP << 3) | 0 ... (1 << 6) | (OP << 3) | 7: \
68 case (2 << 6) | (OP << 3) | 0 ... (2 << 6) | (OP << 3) | 7: \
69 case (3 << 6) | (OP << 3) | 0 ... (3 << 6) | (OP << 3) | 7
71 //#define MACRO_TEST 1
73 /* global register indexes */
74 static TCGv cpu_cc_dst
, cpu_cc_src
, cpu_cc_src2
;
76 static TCGv_i32 cpu_cc_op
;
77 static TCGv cpu_regs
[CPU_NB_REGS
];
78 static TCGv cpu_seg_base
[6];
79 static TCGv_i64 cpu_bndl
[4];
80 static TCGv_i64 cpu_bndu
[4];
82 typedef struct DisasContext
{
83 DisasContextBase base
;
85 target_ulong pc
; /* pc = eip + cs_base */
86 target_ulong cs_base
; /* base of CS segment */
92 int8_t override
; /* -1 if no override, else R_CS, R_DS, etc */
98 #ifndef CONFIG_USER_ONLY
99 uint8_t cpl
; /* code priv level */
100 uint8_t iopl
; /* i/o priv level */
102 uint8_t vex_l
; /* vex vector length */
103 uint8_t vex_v
; /* vex vvvv register, without 1's complement. */
104 uint8_t popl_esp_hack
; /* for correct popl with esp base handling */
105 uint8_t rip_offset
; /* only used in x86_64, but left for simplicity */
112 bool vex_w
; /* used by AVX even on 32-bit processors */
113 bool jmp_opt
; /* use direct block chaining for direct jumps */
114 bool repz_opt
; /* optimize jumps within repz instructions */
117 CCOp cc_op
; /* current CC operation */
118 int mem_index
; /* select memory access functions */
119 uint32_t flags
; /* all execution flags */
121 int cpuid_ext_features
;
122 int cpuid_ext2_features
;
123 int cpuid_ext3_features
;
124 int cpuid_7_0_ebx_features
;
125 int cpuid_7_0_ecx_features
;
126 int cpuid_7_1_eax_features
;
127 int cpuid_xsave_features
;
129 /* TCG local temps */
135 /* TCG local register indexes (only used inside old micro ops) */
143 TCGOp
*prev_insn_start
;
144 TCGOp
*prev_insn_end
;
148 * Point EIP to next instruction before ending translation.
149 * For instructions that can change hflags.
151 #define DISAS_EOB_NEXT DISAS_TARGET_0
154 * Point EIP to next instruction and set HF_INHIBIT_IRQ if not
155 * already set. For instructions that activate interrupt shadow.
157 #define DISAS_EOB_INHIBIT_IRQ DISAS_TARGET_1
160 * Return to the main loop; EIP might have already been updated
161 * but even in that case do not use lookup_and_goto_ptr().
163 #define DISAS_EOB_ONLY DISAS_TARGET_2
166 * EIP has already been updated. For jumps that wish to use
167 * lookup_and_goto_ptr()
169 #define DISAS_JUMP DISAS_TARGET_3
172 * EIP has already been updated. Use updated value of
173 * EFLAGS.TF to determine singlestep trap (SYSCALL/SYSRET).
175 #define DISAS_EOB_RECHECK_TF DISAS_TARGET_4
177 /* The environment in which user-only runs is constrained. */
178 #ifdef CONFIG_USER_ONLY
182 #define SVME(S) false
183 #define GUEST(S) false
185 #define PE(S) (((S)->flags & HF_PE_MASK) != 0)
186 #define CPL(S) ((S)->cpl)
187 #define IOPL(S) ((S)->iopl)
188 #define SVME(S) (((S)->flags & HF_SVME_MASK) != 0)
189 #define GUEST(S) (((S)->flags & HF_GUEST_MASK) != 0)
191 #if defined(CONFIG_USER_ONLY) && defined(TARGET_X86_64)
192 #define VM86(S) false
193 #define CODE32(S) true
195 #define ADDSEG(S) false
197 #define VM86(S) (((S)->flags & HF_VM_MASK) != 0)
198 #define CODE32(S) (((S)->flags & HF_CS32_MASK) != 0)
199 #define SS32(S) (((S)->flags & HF_SS32_MASK) != 0)
200 #define ADDSEG(S) (((S)->flags & HF_ADDSEG_MASK) != 0)
202 #if !defined(TARGET_X86_64)
203 #define CODE64(S) false
204 #elif defined(CONFIG_USER_ONLY)
205 #define CODE64(S) true
207 #define CODE64(S) (((S)->flags & HF_CS64_MASK) != 0)
209 #if defined(CONFIG_USER_ONLY) || defined(TARGET_X86_64)
210 #define LMA(S) (((S)->flags & HF_LMA_MASK) != 0)
216 #define REX_PREFIX(S) (((S)->prefix & PREFIX_REX) != 0)
217 #define REX_W(S) ((S)->vex_w)
218 #define REX_R(S) ((S)->rex_r + 0)
219 #define REX_X(S) ((S)->rex_x + 0)
220 #define REX_B(S) ((S)->rex_b + 0)
222 #define REX_PREFIX(S) false
223 #define REX_W(S) false
230 * Many sysemu-only helpers are not reachable for user-only.
231 * Define stub generators here, so that we need not either sprinkle
232 * ifdefs through the translator, nor provide the helper function.
234 #define STUB_HELPER(NAME, ...) \
235 static inline void gen_helper_##NAME(__VA_ARGS__) \
236 { qemu_build_not_reached(); }
238 #ifdef CONFIG_USER_ONLY
239 STUB_HELPER(clgi
, TCGv_env env
)
240 STUB_HELPER(flush_page
, TCGv_env env
, TCGv addr
)
241 STUB_HELPER(inb
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
242 STUB_HELPER(inw
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
243 STUB_HELPER(inl
, TCGv ret
, TCGv_env env
, TCGv_i32 port
)
244 STUB_HELPER(monitor
, TCGv_env env
, TCGv addr
)
245 STUB_HELPER(mwait
, TCGv_env env
, TCGv_i32 pc_ofs
)
246 STUB_HELPER(outb
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
247 STUB_HELPER(outw
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
248 STUB_HELPER(outl
, TCGv_env env
, TCGv_i32 port
, TCGv_i32 val
)
249 STUB_HELPER(stgi
, TCGv_env env
)
250 STUB_HELPER(svm_check_intercept
, TCGv_env env
, TCGv_i32 type
)
251 STUB_HELPER(vmload
, TCGv_env env
, TCGv_i32 aflag
)
252 STUB_HELPER(vmmcall
, TCGv_env env
)
253 STUB_HELPER(vmrun
, TCGv_env env
, TCGv_i32 aflag
, TCGv_i32 pc_ofs
)
254 STUB_HELPER(vmsave
, TCGv_env env
, TCGv_i32 aflag
)
255 STUB_HELPER(write_crN
, TCGv_env env
, TCGv_i32 reg
, TCGv val
)
258 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
);
259 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
);
260 static void gen_exception_gpf(DisasContext
*s
);
270 OP_SHL1
, /* undocumented */
292 /* Bit set if the global variable is live after setting CC_OP to X. */
293 static const uint8_t cc_op_live
[CC_OP_NB
] = {
294 [CC_OP_DYNAMIC
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
295 [CC_OP_EFLAGS
] = USES_CC_SRC
,
296 [CC_OP_MULB
... CC_OP_MULQ
] = USES_CC_DST
| USES_CC_SRC
,
297 [CC_OP_ADDB
... CC_OP_ADDQ
] = USES_CC_DST
| USES_CC_SRC
,
298 [CC_OP_ADCB
... CC_OP_ADCQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
299 [CC_OP_SUBB
... CC_OP_SUBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRCT
,
300 [CC_OP_SBBB
... CC_OP_SBBQ
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
301 [CC_OP_LOGICB
... CC_OP_LOGICQ
] = USES_CC_DST
,
302 [CC_OP_INCB
... CC_OP_INCQ
] = USES_CC_DST
| USES_CC_SRC
,
303 [CC_OP_DECB
... CC_OP_DECQ
] = USES_CC_DST
| USES_CC_SRC
,
304 [CC_OP_SHLB
... CC_OP_SHLQ
] = USES_CC_DST
| USES_CC_SRC
,
305 [CC_OP_SARB
... CC_OP_SARQ
] = USES_CC_DST
| USES_CC_SRC
,
306 [CC_OP_BMILGB
... CC_OP_BMILGQ
] = USES_CC_DST
| USES_CC_SRC
,
307 [CC_OP_BLSIB
... CC_OP_BLSIQ
] = USES_CC_DST
| USES_CC_SRC
,
308 [CC_OP_ADCX
] = USES_CC_DST
| USES_CC_SRC
,
309 [CC_OP_ADOX
] = USES_CC_SRC
| USES_CC_SRC2
,
310 [CC_OP_ADCOX
] = USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
,
312 [CC_OP_POPCNT
] = USES_CC_DST
,
315 static void set_cc_op_1(DisasContext
*s
, CCOp op
, bool dirty
)
319 if (s
->cc_op
== op
) {
323 /* Discard CC computation that will no longer be used. */
324 dead
= cc_op_live
[s
->cc_op
] & ~cc_op_live
[op
];
325 if (dead
& USES_CC_DST
) {
326 tcg_gen_discard_tl(cpu_cc_dst
);
328 if (dead
& USES_CC_SRC
) {
329 tcg_gen_discard_tl(cpu_cc_src
);
331 if (dead
& USES_CC_SRC2
) {
332 tcg_gen_discard_tl(cpu_cc_src2
);
334 if (dead
& USES_CC_SRCT
) {
335 tcg_gen_discard_tl(s
->cc_srcT
);
338 if (dirty
&& s
->cc_op
== CC_OP_DYNAMIC
) {
339 tcg_gen_discard_i32(cpu_cc_op
);
341 s
->cc_op_dirty
= dirty
;
345 static void set_cc_op(DisasContext
*s
, CCOp op
)
348 * The DYNAMIC setting is translator only, everything else
349 * will be spilled later.
351 set_cc_op_1(s
, op
, op
!= CC_OP_DYNAMIC
);
354 static void assume_cc_op(DisasContext
*s
, CCOp op
)
356 set_cc_op_1(s
, op
, false);
359 static void gen_update_cc_op(DisasContext
*s
)
361 if (s
->cc_op_dirty
) {
362 tcg_gen_movi_i32(cpu_cc_op
, s
->cc_op
);
363 s
->cc_op_dirty
= false;
369 #define NB_OP_SIZES 4
371 #else /* !TARGET_X86_64 */
373 #define NB_OP_SIZES 3
375 #endif /* !TARGET_X86_64 */
378 #define REG_B_OFFSET (sizeof(target_ulong) - 1)
379 #define REG_H_OFFSET (sizeof(target_ulong) - 2)
380 #define REG_W_OFFSET (sizeof(target_ulong) - 2)
381 #define REG_L_OFFSET (sizeof(target_ulong) - 4)
382 #define REG_LH_OFFSET (sizeof(target_ulong) - 8)
384 #define REG_B_OFFSET 0
385 #define REG_H_OFFSET 1
386 #define REG_W_OFFSET 0
387 #define REG_L_OFFSET 0
388 #define REG_LH_OFFSET 4
391 /* In instruction encodings for byte register accesses the
392 * register number usually indicates "low 8 bits of register N";
393 * however there are some special cases where N 4..7 indicates
394 * [AH, CH, DH, BH], ie "bits 15..8 of register N-4". Return
395 * true for this special case, false otherwise.
397 static inline bool byte_reg_is_xH(DisasContext
*s
, int reg
)
399 /* Any time the REX prefix is present, byte registers are uniform */
400 if (reg
< 4 || REX_PREFIX(s
)) {
406 /* Select the size of a push/pop operation. */
407 static inline MemOp
mo_pushpop(DisasContext
*s
, MemOp ot
)
410 return ot
== MO_16
? MO_16
: MO_64
;
416 /* Select the size of the stack pointer. */
417 static inline MemOp
mo_stacksize(DisasContext
*s
)
419 return CODE64(s
) ? MO_64
: SS32(s
) ? MO_32
: MO_16
;
422 /* Compute the result of writing t0 to the OT-sized register REG.
424 * If DEST is NULL, store the result into the register and return the
427 * If DEST is not NULL, store the result into DEST and return the
430 static TCGv
gen_op_deposit_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv dest
, TCGv t0
)
434 if (byte_reg_is_xH(s
, reg
)) {
435 dest
= dest
? dest
: cpu_regs
[reg
- 4];
436 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
- 4], t0
, 8, 8);
437 return cpu_regs
[reg
- 4];
439 dest
= dest
? dest
: cpu_regs
[reg
];
440 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 8);
443 dest
= dest
? dest
: cpu_regs
[reg
];
444 tcg_gen_deposit_tl(dest
, cpu_regs
[reg
], t0
, 0, 16);
447 /* For x86_64, this sets the higher half of register to zero.
448 For i386, this is equivalent to a mov. */
449 dest
= dest
? dest
: cpu_regs
[reg
];
450 tcg_gen_ext32u_tl(dest
, t0
);
454 dest
= dest
? dest
: cpu_regs
[reg
];
455 tcg_gen_mov_tl(dest
, t0
);
459 g_assert_not_reached();
461 return cpu_regs
[reg
];
464 static void gen_op_mov_reg_v(DisasContext
*s
, MemOp ot
, int reg
, TCGv t0
)
466 gen_op_deposit_reg_v(s
, ot
, reg
, NULL
, t0
);
470 void gen_op_mov_v_reg(DisasContext
*s
, MemOp ot
, TCGv t0
, int reg
)
472 if (ot
== MO_8
&& byte_reg_is_xH(s
, reg
)) {
473 tcg_gen_extract_tl(t0
, cpu_regs
[reg
- 4], 8, 8);
475 tcg_gen_mov_tl(t0
, cpu_regs
[reg
]);
479 static void gen_add_A0_im(DisasContext
*s
, int val
)
481 tcg_gen_addi_tl(s
->A0
, s
->A0
, val
);
483 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
487 static inline void gen_op_jmp_v(DisasContext
*s
, TCGv dest
)
489 tcg_gen_mov_tl(cpu_eip
, dest
);
494 void gen_op_add_reg_im(DisasContext
*s
, MemOp size
, int reg
, int32_t val
)
496 tcg_gen_addi_tl(s
->tmp0
, cpu_regs
[reg
], val
);
497 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
500 static inline void gen_op_add_reg(DisasContext
*s
, MemOp size
, int reg
, TCGv val
)
502 tcg_gen_add_tl(s
->tmp0
, cpu_regs
[reg
], val
);
503 gen_op_mov_reg_v(s
, size
, reg
, s
->tmp0
);
506 static inline void gen_op_ld_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
508 tcg_gen_qemu_ld_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
511 static inline void gen_op_st_v(DisasContext
*s
, int idx
, TCGv t0
, TCGv a0
)
513 tcg_gen_qemu_st_tl(t0
, a0
, s
->mem_index
, idx
| MO_LE
);
516 static void gen_update_eip_next(DisasContext
*s
)
518 assert(s
->pc_save
!= -1);
519 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
520 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->pc
- s
->pc_save
);
521 } else if (CODE64(s
)) {
522 tcg_gen_movi_tl(cpu_eip
, s
->pc
);
524 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->pc
- s
->cs_base
));
529 static void gen_update_eip_cur(DisasContext
*s
)
531 assert(s
->pc_save
!= -1);
532 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
533 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
534 } else if (CODE64(s
)) {
535 tcg_gen_movi_tl(cpu_eip
, s
->base
.pc_next
);
537 tcg_gen_movi_tl(cpu_eip
, (uint32_t)(s
->base
.pc_next
- s
->cs_base
));
539 s
->pc_save
= s
->base
.pc_next
;
542 static int cur_insn_len(DisasContext
*s
)
544 return s
->pc
- s
->base
.pc_next
;
547 static TCGv_i32
cur_insn_len_i32(DisasContext
*s
)
549 return tcg_constant_i32(cur_insn_len(s
));
552 static TCGv_i32
eip_next_i32(DisasContext
*s
)
554 assert(s
->pc_save
!= -1);
556 * This function has two users: lcall_real (always 16-bit mode), and
557 * iret_protected (16, 32, or 64-bit mode). IRET only uses the value
558 * when EFLAGS.NT is set, which is illegal in 64-bit mode, which is
559 * why passing a 32-bit value isn't broken. To avoid using this where
560 * we shouldn't, return -1 in 64-bit mode so that execution goes into
564 return tcg_constant_i32(-1);
566 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
567 TCGv_i32 ret
= tcg_temp_new_i32();
568 tcg_gen_trunc_tl_i32(ret
, cpu_eip
);
569 tcg_gen_addi_i32(ret
, ret
, s
->pc
- s
->pc_save
);
572 return tcg_constant_i32(s
->pc
- s
->cs_base
);
576 static TCGv
eip_next_tl(DisasContext
*s
)
578 assert(s
->pc_save
!= -1);
579 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
580 TCGv ret
= tcg_temp_new();
581 tcg_gen_addi_tl(ret
, cpu_eip
, s
->pc
- s
->pc_save
);
583 } else if (CODE64(s
)) {
584 return tcg_constant_tl(s
->pc
);
586 return tcg_constant_tl((uint32_t)(s
->pc
- s
->cs_base
));
590 static TCGv
eip_cur_tl(DisasContext
*s
)
592 assert(s
->pc_save
!= -1);
593 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
594 TCGv ret
= tcg_temp_new();
595 tcg_gen_addi_tl(ret
, cpu_eip
, s
->base
.pc_next
- s
->pc_save
);
597 } else if (CODE64(s
)) {
598 return tcg_constant_tl(s
->base
.pc_next
);
600 return tcg_constant_tl((uint32_t)(s
->base
.pc_next
- s
->cs_base
));
604 /* Compute SEG:REG into DEST. SEG is selected from the override segment
605 (OVR_SEG) and the default segment (DEF_SEG). OVR_SEG may be -1 to
606 indicate no override. */
607 static void gen_lea_v_seg_dest(DisasContext
*s
, MemOp aflag
, TCGv dest
, TCGv a0
,
608 int def_seg
, int ovr_seg
)
614 tcg_gen_mov_tl(dest
, a0
);
621 if (ovr_seg
< 0 && ADDSEG(s
)) {
625 tcg_gen_ext32u_tl(dest
, a0
);
631 tcg_gen_ext16u_tl(dest
, a0
);
642 g_assert_not_reached();
646 TCGv seg
= cpu_seg_base
[ovr_seg
];
648 if (aflag
== MO_64
) {
649 tcg_gen_add_tl(dest
, a0
, seg
);
650 } else if (CODE64(s
)) {
651 tcg_gen_ext32u_tl(dest
, a0
);
652 tcg_gen_add_tl(dest
, dest
, seg
);
654 tcg_gen_add_tl(dest
, a0
, seg
);
655 tcg_gen_ext32u_tl(dest
, dest
);
660 static void gen_lea_v_seg(DisasContext
*s
, TCGv a0
,
661 int def_seg
, int ovr_seg
)
663 gen_lea_v_seg_dest(s
, s
->aflag
, s
->A0
, a0
, def_seg
, ovr_seg
);
666 static inline void gen_string_movl_A0_ESI(DisasContext
*s
)
668 gen_lea_v_seg(s
, cpu_regs
[R_ESI
], R_DS
, s
->override
);
671 static inline void gen_string_movl_A0_EDI(DisasContext
*s
)
673 gen_lea_v_seg(s
, cpu_regs
[R_EDI
], R_ES
, -1);
676 static inline TCGv
gen_compute_Dshift(DisasContext
*s
, MemOp ot
)
678 TCGv dshift
= tcg_temp_new();
679 tcg_gen_ld32s_tl(dshift
, tcg_env
, offsetof(CPUX86State
, df
));
680 tcg_gen_shli_tl(dshift
, dshift
, ot
);
684 static TCGv
gen_ext_tl(TCGv dst
, TCGv src
, MemOp size
, bool sign
)
690 dst
= tcg_temp_new();
692 tcg_gen_ext_tl(dst
, src
, size
| (sign
? MO_SIGN
: 0));
696 static void gen_exts(MemOp ot
, TCGv reg
)
698 gen_ext_tl(reg
, reg
, ot
, true);
701 static void gen_op_j_ecx(DisasContext
*s
, TCGCond cond
, TCGLabel
*label1
)
703 TCGv tmp
= gen_ext_tl(NULL
, cpu_regs
[R_ECX
], s
->aflag
, false);
705 tcg_gen_brcondi_tl(cond
, tmp
, 0, label1
);
708 static inline void gen_op_jz_ecx(DisasContext
*s
, TCGLabel
*label1
)
710 gen_op_j_ecx(s
, TCG_COND_EQ
, label1
);
713 static inline void gen_op_jnz_ecx(DisasContext
*s
, TCGLabel
*label1
)
715 gen_op_j_ecx(s
, TCG_COND_NE
, label1
);
718 static void gen_helper_in_func(MemOp ot
, TCGv v
, TCGv_i32 n
)
722 gen_helper_inb(v
, tcg_env
, n
);
725 gen_helper_inw(v
, tcg_env
, n
);
728 gen_helper_inl(v
, tcg_env
, n
);
731 g_assert_not_reached();
735 static void gen_helper_out_func(MemOp ot
, TCGv_i32 v
, TCGv_i32 n
)
739 gen_helper_outb(tcg_env
, v
, n
);
742 gen_helper_outw(tcg_env
, v
, n
);
745 gen_helper_outl(tcg_env
, v
, n
);
748 g_assert_not_reached();
753 * Validate that access to [port, port + 1<<ot) is allowed.
754 * Raise #GP, or VMM exit if not.
756 static bool gen_check_io(DisasContext
*s
, MemOp ot
, TCGv_i32 port
,
759 #ifdef CONFIG_USER_ONLY
761 * We do not implement the ioperm(2) syscall, so the TSS check
764 gen_exception_gpf(s
);
767 if (PE(s
) && (CPL(s
) > IOPL(s
) || VM86(s
))) {
768 gen_helper_check_io(tcg_env
, port
, tcg_constant_i32(1 << ot
));
772 gen_update_eip_cur(s
);
773 if (s
->prefix
& (PREFIX_REPZ
| PREFIX_REPNZ
)) {
774 svm_flags
|= SVM_IOIO_REP_MASK
;
776 svm_flags
|= 1 << (SVM_IOIO_SIZE_SHIFT
+ ot
);
777 gen_helper_svm_check_io(tcg_env
, port
,
778 tcg_constant_i32(svm_flags
),
779 cur_insn_len_i32(s
));
785 static void gen_movs(DisasContext
*s
, MemOp ot
)
789 gen_string_movl_A0_ESI(s
);
790 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
791 gen_string_movl_A0_EDI(s
);
792 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
794 dshift
= gen_compute_Dshift(s
, ot
);
795 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
796 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
799 /* compute all eflags to reg */
800 static void gen_mov_eflags(DisasContext
*s
, TCGv reg
)
802 TCGv dst
, src1
, src2
;
806 if (s
->cc_op
== CC_OP_EFLAGS
) {
807 tcg_gen_mov_tl(reg
, cpu_cc_src
);
810 if (s
->cc_op
== CC_OP_CLR
) {
811 tcg_gen_movi_tl(reg
, CC_Z
| CC_P
);
819 /* Take care to not read values that are not live. */
820 live
= cc_op_live
[s
->cc_op
] & ~USES_CC_SRCT
;
821 dead
= live
^ (USES_CC_DST
| USES_CC_SRC
| USES_CC_SRC2
);
823 TCGv zero
= tcg_constant_tl(0);
824 if (dead
& USES_CC_DST
) {
827 if (dead
& USES_CC_SRC
) {
830 if (dead
& USES_CC_SRC2
) {
835 if (s
->cc_op
!= CC_OP_DYNAMIC
) {
836 cc_op
= tcg_constant_i32(s
->cc_op
);
840 gen_helper_cc_compute_all(reg
, dst
, src1
, src2
, cc_op
);
843 /* compute all eflags to cc_src */
844 static void gen_compute_eflags(DisasContext
*s
)
846 gen_mov_eflags(s
, cpu_cc_src
);
847 set_cc_op(s
, CC_OP_EFLAGS
);
850 typedef struct CCPrepare
{
859 static CCPrepare
gen_prepare_sign_nz(TCGv src
, MemOp size
)
862 return (CCPrepare
) { .cond
= TCG_COND_LT
, .reg
= src
};
864 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= src
,
865 .imm
= 1ull << ((8 << size
) - 1) };
869 static CCPrepare
gen_prepare_val_nz(TCGv src
, MemOp size
, bool eqz
)
872 return (CCPrepare
) { .cond
= eqz
? TCG_COND_EQ
: TCG_COND_NE
,
875 return (CCPrepare
) { .cond
= eqz
? TCG_COND_TSTEQ
: TCG_COND_TSTNE
,
876 .imm
= MAKE_64BIT_MASK(0, 8 << size
),
881 /* compute eflags.C, trying to store it in reg if not NULL */
882 static CCPrepare
gen_prepare_eflags_c(DisasContext
*s
, TCGv reg
)
887 case CC_OP_SUBB
... CC_OP_SUBQ
:
888 /* (DATA_TYPE)CC_SRCT < (DATA_TYPE)CC_SRC */
889 size
= s
->cc_op
- CC_OP_SUBB
;
890 gen_ext_tl(s
->cc_srcT
, s
->cc_srcT
, size
, false);
891 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
892 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= s
->cc_srcT
,
893 .reg2
= cpu_cc_src
, .use_reg2
= true };
895 case CC_OP_ADDB
... CC_OP_ADDQ
:
896 /* (DATA_TYPE)CC_DST < (DATA_TYPE)CC_SRC */
897 size
= s
->cc_op
- CC_OP_ADDB
;
898 gen_ext_tl(cpu_cc_dst
, cpu_cc_dst
, size
, false);
899 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
900 return (CCPrepare
) { .cond
= TCG_COND_LTU
, .reg
= cpu_cc_dst
,
901 .reg2
= cpu_cc_src
, .use_reg2
= true };
903 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
906 return (CCPrepare
) { .cond
= TCG_COND_NEVER
};
908 case CC_OP_INCB
... CC_OP_INCQ
:
909 case CC_OP_DECB
... CC_OP_DECQ
:
910 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
,
911 .no_setcond
= true };
913 case CC_OP_SHLB
... CC_OP_SHLQ
:
914 /* (CC_SRC >> (DATA_BITS - 1)) & 1 */
915 size
= s
->cc_op
- CC_OP_SHLB
;
916 return gen_prepare_sign_nz(cpu_cc_src
, size
);
918 case CC_OP_MULB
... CC_OP_MULQ
:
919 return (CCPrepare
) { .cond
= TCG_COND_NE
,
922 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
923 size
= s
->cc_op
- CC_OP_BMILGB
;
924 return gen_prepare_val_nz(cpu_cc_src
, size
, true);
926 case CC_OP_BLSIB
... CC_OP_BLSIQ
:
927 size
= s
->cc_op
- CC_OP_BLSIB
;
928 return gen_prepare_val_nz(cpu_cc_src
, size
, false);
932 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_dst
,
933 .no_setcond
= true };
936 case CC_OP_SARB
... CC_OP_SARQ
:
938 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
,
939 .reg
= cpu_cc_src
, .imm
= CC_C
};
942 /* The need to compute only C from CC_OP_DYNAMIC is important
943 in efficiently implementing e.g. INC at the start of a TB. */
946 reg
= tcg_temp_new();
948 gen_helper_cc_compute_c(reg
, cpu_cc_dst
, cpu_cc_src
,
949 cpu_cc_src2
, cpu_cc_op
);
950 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= reg
,
951 .no_setcond
= true };
955 /* compute eflags.P, trying to store it in reg if not NULL */
956 static CCPrepare
gen_prepare_eflags_p(DisasContext
*s
, TCGv reg
)
958 gen_compute_eflags(s
);
959 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
963 /* compute eflags.S, trying to store it in reg if not NULL */
964 static CCPrepare
gen_prepare_eflags_s(DisasContext
*s
, TCGv reg
)
968 gen_compute_eflags(s
);
974 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
978 return (CCPrepare
) { .cond
= TCG_COND_NEVER
};
981 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
982 return gen_prepare_sign_nz(cpu_cc_dst
, size
);
987 /* compute eflags.O, trying to store it in reg if not NULL */
988 static CCPrepare
gen_prepare_eflags_o(DisasContext
*s
, TCGv reg
)
993 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src2
,
994 .no_setcond
= true };
997 return (CCPrepare
) { .cond
= TCG_COND_NEVER
};
998 case CC_OP_MULB
... CC_OP_MULQ
:
999 return (CCPrepare
) { .cond
= TCG_COND_NE
, .reg
= cpu_cc_src
};
1001 gen_compute_eflags(s
);
1002 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1007 /* compute eflags.Z, trying to store it in reg if not NULL */
1008 static CCPrepare
gen_prepare_eflags_z(DisasContext
*s
, TCGv reg
)
1012 gen_compute_eflags(s
);
1018 return (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1021 return (CCPrepare
) { .cond
= TCG_COND_ALWAYS
};
1024 MemOp size
= (s
->cc_op
- CC_OP_ADDB
) & 3;
1025 return gen_prepare_val_nz(cpu_cc_dst
, size
, true);
1030 /* return how to compute jump opcode 'b'. 'reg' can be clobbered
1031 * if needed; it may be used for CCPrepare.reg if that will
1032 * provide more freedom in the translation of a subsequent setcond. */
1033 static CCPrepare
gen_prepare_cc(DisasContext
*s
, int b
, TCGv reg
)
1035 int inv
, jcc_op
, cond
;
1040 jcc_op
= (b
>> 1) & 7;
1043 case CC_OP_SUBB
... CC_OP_SUBQ
:
1044 /* We optimize relational operators for the cmp/jcc case. */
1045 size
= s
->cc_op
- CC_OP_SUBB
;
1048 gen_ext_tl(s
->cc_srcT
, s
->cc_srcT
, size
, false);
1049 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, false);
1050 cc
= (CCPrepare
) { .cond
= TCG_COND_LEU
, .reg
= s
->cc_srcT
,
1051 .reg2
= cpu_cc_src
, .use_reg2
= true };
1059 gen_ext_tl(s
->cc_srcT
, s
->cc_srcT
, size
, true);
1060 gen_ext_tl(cpu_cc_src
, cpu_cc_src
, size
, true);
1061 cc
= (CCPrepare
) { .cond
= cond
, .reg
= s
->cc_srcT
,
1062 .reg2
= cpu_cc_src
, .use_reg2
= true };
1072 /* This actually generates good code for JC, JZ and JS. */
1075 cc
= gen_prepare_eflags_o(s
, reg
);
1078 cc
= gen_prepare_eflags_c(s
, reg
);
1081 cc
= gen_prepare_eflags_z(s
, reg
);
1084 gen_compute_eflags(s
);
1085 cc
= (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= cpu_cc_src
,
1086 .imm
= CC_Z
| CC_C
};
1089 cc
= gen_prepare_eflags_s(s
, reg
);
1092 cc
= gen_prepare_eflags_p(s
, reg
);
1095 gen_compute_eflags(s
);
1096 if (!reg
|| reg
== cpu_cc_src
) {
1097 reg
= tcg_temp_new();
1099 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1100 cc
= (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= reg
,
1105 gen_compute_eflags(s
);
1106 if (!reg
|| reg
== cpu_cc_src
) {
1107 reg
= tcg_temp_new();
1109 tcg_gen_addi_tl(reg
, cpu_cc_src
, CC_O
- CC_S
);
1110 cc
= (CCPrepare
) { .cond
= TCG_COND_TSTNE
, .reg
= reg
,
1111 .imm
= CC_O
| CC_Z
};
1118 cc
.cond
= tcg_invert_cond(cc
.cond
);
1123 static void gen_setcc1(DisasContext
*s
, int b
, TCGv reg
)
1125 CCPrepare cc
= gen_prepare_cc(s
, b
, reg
);
1127 if (cc
.no_setcond
) {
1128 if (cc
.cond
== TCG_COND_EQ
) {
1129 tcg_gen_xori_tl(reg
, cc
.reg
, 1);
1131 tcg_gen_mov_tl(reg
, cc
.reg
);
1137 tcg_gen_setcond_tl(cc
.cond
, reg
, cc
.reg
, cc
.reg2
);
1139 tcg_gen_setcondi_tl(cc
.cond
, reg
, cc
.reg
, cc
.imm
);
1143 static inline void gen_compute_eflags_c(DisasContext
*s
, TCGv reg
)
1145 gen_setcc1(s
, JCC_B
<< 1, reg
);
1148 /* generate a conditional jump to label 'l1' according to jump opcode
1149 value 'b'. In the fast case, T0 is guaranteed not to be used. */
1150 static inline void gen_jcc1_noeob(DisasContext
*s
, int b
, TCGLabel
*l1
)
1152 CCPrepare cc
= gen_prepare_cc(s
, b
, NULL
);
1155 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1157 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1161 /* Generate a conditional jump to label 'l1' according to jump opcode
1162 value 'b'. In the fast case, T0 is guaranteed not to be used.
1163 One or both of the branches will call gen_jmp_rel, so ensure
1165 static inline void gen_jcc1(DisasContext
*s
, int b
, TCGLabel
*l1
)
1167 CCPrepare cc
= gen_prepare_cc(s
, b
, NULL
);
1169 gen_update_cc_op(s
);
1171 tcg_gen_brcond_tl(cc
.cond
, cc
.reg
, cc
.reg2
, l1
);
1173 tcg_gen_brcondi_tl(cc
.cond
, cc
.reg
, cc
.imm
, l1
);
1177 /* XXX: does not work with gdbstub "ice" single step - not a
1178 serious problem. The caller can jump to the returned label
1179 to stop the REP but, if the flags have changed, it has to call
1180 gen_update_cc_op before doing so. */
1181 static TCGLabel
*gen_jz_ecx_string(DisasContext
*s
)
1183 TCGLabel
*l1
= gen_new_label();
1184 TCGLabel
*l2
= gen_new_label();
1186 gen_update_cc_op(s
);
1187 gen_op_jnz_ecx(s
, l1
);
1189 gen_jmp_rel_csize(s
, 0, 1);
1194 static void gen_stos(DisasContext
*s
, MemOp ot
)
1196 gen_string_movl_A0_EDI(s
);
1197 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1198 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1201 static void gen_lods(DisasContext
*s
, MemOp ot
)
1203 gen_string_movl_A0_ESI(s
);
1204 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1205 gen_op_mov_reg_v(s
, ot
, R_EAX
, s
->T0
);
1206 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1209 static void gen_scas(DisasContext
*s
, MemOp ot
)
1211 gen_string_movl_A0_EDI(s
);
1212 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1213 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
1214 tcg_gen_mov_tl(s
->cc_srcT
, s
->T0
);
1215 tcg_gen_sub_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
1216 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1218 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1221 static void gen_cmps(DisasContext
*s
, MemOp ot
)
1225 gen_string_movl_A0_EDI(s
);
1226 gen_op_ld_v(s
, ot
, s
->T1
, s
->A0
);
1227 gen_string_movl_A0_ESI(s
);
1228 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1229 tcg_gen_mov_tl(cpu_cc_src
, s
->T1
);
1230 tcg_gen_mov_tl(s
->cc_srcT
, s
->T0
);
1231 tcg_gen_sub_tl(cpu_cc_dst
, s
->T0
, s
->T1
);
1232 set_cc_op(s
, CC_OP_SUBB
+ ot
);
1234 dshift
= gen_compute_Dshift(s
, ot
);
1235 gen_op_add_reg(s
, s
->aflag
, R_ESI
, dshift
);
1236 gen_op_add_reg(s
, s
->aflag
, R_EDI
, dshift
);
1239 static void gen_bpt_io(DisasContext
*s
, TCGv_i32 t_port
, int ot
)
1241 if (s
->flags
& HF_IOBPT_MASK
) {
1242 #ifdef CONFIG_USER_ONLY
1243 /* user-mode cpu should not be in IOBPT mode */
1244 g_assert_not_reached();
1246 TCGv_i32 t_size
= tcg_constant_i32(1 << ot
);
1247 TCGv t_next
= eip_next_tl(s
);
1248 gen_helper_bpt_io(tcg_env
, t_port
, t_size
, t_next
);
1249 #endif /* CONFIG_USER_ONLY */
1253 static void gen_ins(DisasContext
*s
, MemOp ot
)
1255 gen_string_movl_A0_EDI(s
);
1256 /* Note: we must do this dummy write first to be restartable in
1257 case of page fault. */
1258 tcg_gen_movi_tl(s
->T0
, 0);
1259 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1260 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1261 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1262 gen_helper_in_func(ot
, s
->T0
, s
->tmp2_i32
);
1263 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1264 gen_op_add_reg(s
, s
->aflag
, R_EDI
, gen_compute_Dshift(s
, ot
));
1265 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1268 static void gen_outs(DisasContext
*s
, MemOp ot
)
1270 gen_string_movl_A0_ESI(s
);
1271 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1273 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_EDX
]);
1274 tcg_gen_andi_i32(s
->tmp2_i32
, s
->tmp2_i32
, 0xffff);
1275 tcg_gen_trunc_tl_i32(s
->tmp3_i32
, s
->T0
);
1276 gen_helper_out_func(ot
, s
->tmp2_i32
, s
->tmp3_i32
);
1277 gen_op_add_reg(s
, s
->aflag
, R_ESI
, gen_compute_Dshift(s
, ot
));
1278 gen_bpt_io(s
, s
->tmp2_i32
, ot
);
1281 /* Generate jumps to current or next instruction */
1282 static void gen_repz(DisasContext
*s
, MemOp ot
,
1283 void (*fn
)(DisasContext
*s
, MemOp ot
))
1286 l2
= gen_jz_ecx_string(s
);
1288 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1290 * A loop would cause two single step exceptions if ECX = 1
1291 * before rep string_insn
1294 gen_op_jz_ecx(s
, l2
);
1296 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1299 static void gen_repz_nz(DisasContext
*s
, MemOp ot
,
1300 void (*fn
)(DisasContext
*s
, MemOp ot
))
1303 int nz
= (s
->prefix
& PREFIX_REPNZ
) ? 1 : 0;
1305 l2
= gen_jz_ecx_string(s
);
1307 gen_op_add_reg_im(s
, s
->aflag
, R_ECX
, -1);
1308 gen_jcc1(s
, (JCC_Z
<< 1) | (nz
^ 1), l2
);
1310 gen_op_jz_ecx(s
, l2
);
1313 * Only one iteration is done at a time, so the translation
1314 * block ends unconditionally after this instruction and there
1315 * is no control flow junction - no need to set CC_OP_DYNAMIC.
1317 gen_jmp_rel_csize(s
, -cur_insn_len(s
), 0);
1320 static void gen_helper_fp_arith_ST0_FT0(int op
)
1324 gen_helper_fadd_ST0_FT0(tcg_env
);
1327 gen_helper_fmul_ST0_FT0(tcg_env
);
1330 gen_helper_fcom_ST0_FT0(tcg_env
);
1333 gen_helper_fcom_ST0_FT0(tcg_env
);
1336 gen_helper_fsub_ST0_FT0(tcg_env
);
1339 gen_helper_fsubr_ST0_FT0(tcg_env
);
1342 gen_helper_fdiv_ST0_FT0(tcg_env
);
1345 gen_helper_fdivr_ST0_FT0(tcg_env
);
1350 /* NOTE the exception in "r" op ordering */
1351 static void gen_helper_fp_arith_STN_ST0(int op
, int opreg
)
1353 TCGv_i32 tmp
= tcg_constant_i32(opreg
);
1356 gen_helper_fadd_STN_ST0(tcg_env
, tmp
);
1359 gen_helper_fmul_STN_ST0(tcg_env
, tmp
);
1362 gen_helper_fsubr_STN_ST0(tcg_env
, tmp
);
1365 gen_helper_fsub_STN_ST0(tcg_env
, tmp
);
1368 gen_helper_fdivr_STN_ST0(tcg_env
, tmp
);
1371 gen_helper_fdiv_STN_ST0(tcg_env
, tmp
);
1376 static void gen_exception(DisasContext
*s
, int trapno
)
1378 gen_update_cc_op(s
);
1379 gen_update_eip_cur(s
);
1380 gen_helper_raise_exception(tcg_env
, tcg_constant_i32(trapno
));
1381 s
->base
.is_jmp
= DISAS_NORETURN
;
1384 /* Generate #UD for the current instruction. The assumption here is that
1385 the instruction is known, but it isn't allowed in the current cpu mode. */
1386 static void gen_illegal_opcode(DisasContext
*s
)
1388 gen_exception(s
, EXCP06_ILLOP
);
1391 /* Generate #GP for the current instruction. */
1392 static void gen_exception_gpf(DisasContext
*s
)
1394 gen_exception(s
, EXCP0D_GPF
);
1397 /* Check for cpl == 0; if not, raise #GP and return false. */
1398 static bool check_cpl0(DisasContext
*s
)
1403 gen_exception_gpf(s
);
1407 /* XXX: add faster immediate case */
1408 static void gen_shiftd_rm_T1(DisasContext
*s
, MemOp ot
,
1409 bool is_right
, TCGv count
)
1411 target_ulong mask
= (ot
== MO_64
? 63 : 31);
1415 /* Note: we implement the Intel behaviour for shift count > 16.
1416 This means "shrdw C, B, A" shifts A:B:A >> C. Build the B:A
1417 portion by constructing it as a 32-bit value. */
1419 tcg_gen_deposit_tl(s
->tmp0
, s
->T0
, s
->T1
, 16, 16);
1420 tcg_gen_mov_tl(s
->T1
, s
->T0
);
1421 tcg_gen_mov_tl(s
->T0
, s
->tmp0
);
1423 tcg_gen_deposit_tl(s
->T1
, s
->T0
, s
->T1
, 16, 16);
1426 * If TARGET_X86_64 defined then fall through into MO_32 case,
1427 * otherwise fall through default case.
1430 #ifdef TARGET_X86_64
1431 /* Concatenate the two 32-bit values and use a 64-bit shift. */
1432 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1434 tcg_gen_concat_tl_i64(s
->T0
, s
->T0
, s
->T1
);
1435 tcg_gen_shr_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1436 tcg_gen_shr_i64(s
->T0
, s
->T0
, count
);
1438 tcg_gen_concat_tl_i64(s
->T0
, s
->T1
, s
->T0
);
1439 tcg_gen_shl_i64(s
->tmp0
, s
->T0
, s
->tmp0
);
1440 tcg_gen_shl_i64(s
->T0
, s
->T0
, count
);
1441 tcg_gen_shri_i64(s
->tmp0
, s
->tmp0
, 32);
1442 tcg_gen_shri_i64(s
->T0
, s
->T0
, 32);
1447 tcg_gen_subi_tl(s
->tmp0
, count
, 1);
1449 tcg_gen_shr_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1451 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1452 tcg_gen_shr_tl(s
->T0
, s
->T0
, count
);
1453 tcg_gen_shl_tl(s
->T1
, s
->T1
, s
->tmp4
);
1455 tcg_gen_shl_tl(s
->tmp0
, s
->T0
, s
->tmp0
);
1457 /* Only needed if count > 16, for Intel behaviour. */
1458 tcg_gen_subfi_tl(s
->tmp4
, 33, count
);
1459 tcg_gen_shr_tl(s
->tmp4
, s
->T1
, s
->tmp4
);
1460 tcg_gen_or_tl(s
->tmp0
, s
->tmp0
, s
->tmp4
);
1463 tcg_gen_subfi_tl(s
->tmp4
, mask
+ 1, count
);
1464 tcg_gen_shl_tl(s
->T0
, s
->T0
, count
);
1465 tcg_gen_shr_tl(s
->T1
, s
->T1
, s
->tmp4
);
1467 tcg_gen_movi_tl(s
->tmp4
, 0);
1468 tcg_gen_movcond_tl(TCG_COND_EQ
, s
->T1
, count
, s
->tmp4
,
1470 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
1475 #define X86_MAX_INSN_LENGTH 15
1477 static uint64_t advance_pc(CPUX86State
*env
, DisasContext
*s
, int num_bytes
)
1479 uint64_t pc
= s
->pc
;
1481 /* This is a subsequent insn that crosses a page boundary. */
1482 if (s
->base
.num_insns
> 1 &&
1483 !is_same_page(&s
->base
, s
->pc
+ num_bytes
- 1)) {
1484 siglongjmp(s
->jmpbuf
, 2);
1488 if (unlikely(cur_insn_len(s
) > X86_MAX_INSN_LENGTH
)) {
1489 /* If the instruction's 16th byte is on a different page than the 1st, a
1490 * page fault on the second page wins over the general protection fault
1491 * caused by the instruction being too long.
1492 * This can happen even if the operand is only one byte long!
1494 if (((s
->pc
- 1) ^ (pc
- 1)) & TARGET_PAGE_MASK
) {
1495 (void)translator_ldub(env
, &s
->base
,
1496 (s
->pc
- 1) & TARGET_PAGE_MASK
);
1498 siglongjmp(s
->jmpbuf
, 1);
1504 static inline uint8_t x86_ldub_code(CPUX86State
*env
, DisasContext
*s
)
1506 return translator_ldub(env
, &s
->base
, advance_pc(env
, s
, 1));
1509 static inline uint16_t x86_lduw_code(CPUX86State
*env
, DisasContext
*s
)
1511 return translator_lduw(env
, &s
->base
, advance_pc(env
, s
, 2));
1514 static inline uint32_t x86_ldl_code(CPUX86State
*env
, DisasContext
*s
)
1516 return translator_ldl(env
, &s
->base
, advance_pc(env
, s
, 4));
1519 #ifdef TARGET_X86_64
1520 static inline uint64_t x86_ldq_code(CPUX86State
*env
, DisasContext
*s
)
1522 return translator_ldq(env
, &s
->base
, advance_pc(env
, s
, 8));
1526 /* Decompose an address. */
1528 typedef struct AddressParts
{
1536 static AddressParts
gen_lea_modrm_0(CPUX86State
*env
, DisasContext
*s
,
1537 int modrm
, bool is_vsib
)
1539 int def_seg
, base
, index
, scale
, mod
, rm
;
1548 mod
= (modrm
>> 6) & 3;
1550 base
= rm
| REX_B(s
);
1553 /* Normally filtered out earlier, but including this path
1554 simplifies multi-byte nop, as well as bndcl, bndcu, bndcn. */
1563 int code
= x86_ldub_code(env
, s
);
1564 scale
= (code
>> 6) & 3;
1565 index
= ((code
>> 3) & 7) | REX_X(s
);
1566 if (index
== 4 && !is_vsib
) {
1567 index
= -1; /* no index */
1569 base
= (code
& 7) | REX_B(s
);
1575 if ((base
& 7) == 5) {
1577 disp
= (int32_t)x86_ldl_code(env
, s
);
1578 if (CODE64(s
) && !havesib
) {
1580 disp
+= s
->pc
+ s
->rip_offset
;
1585 disp
= (int8_t)x86_ldub_code(env
, s
);
1589 disp
= (int32_t)x86_ldl_code(env
, s
);
1593 /* For correct popl handling with esp. */
1594 if (base
== R_ESP
&& s
->popl_esp_hack
) {
1595 disp
+= s
->popl_esp_hack
;
1597 if (base
== R_EBP
|| base
== R_ESP
) {
1606 disp
= x86_lduw_code(env
, s
);
1609 } else if (mod
== 1) {
1610 disp
= (int8_t)x86_ldub_code(env
, s
);
1612 disp
= (int16_t)x86_lduw_code(env
, s
);
1652 g_assert_not_reached();
1656 return (AddressParts
){ def_seg
, base
, index
, scale
, disp
};
1659 /* Compute the address, with a minimum number of TCG ops. */
1660 static TCGv
gen_lea_modrm_1(DisasContext
*s
, AddressParts a
, bool is_vsib
)
1664 if (a
.index
>= 0 && !is_vsib
) {
1666 ea
= cpu_regs
[a
.index
];
1668 tcg_gen_shli_tl(s
->A0
, cpu_regs
[a
.index
], a
.scale
);
1672 tcg_gen_add_tl(s
->A0
, ea
, cpu_regs
[a
.base
]);
1675 } else if (a
.base
>= 0) {
1676 ea
= cpu_regs
[a
.base
];
1679 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& a
.base
== -2) {
1680 /* With cpu_eip ~= pc_save, the expression is pc-relative. */
1681 tcg_gen_addi_tl(s
->A0
, cpu_eip
, a
.disp
- s
->pc_save
);
1683 tcg_gen_movi_tl(s
->A0
, a
.disp
);
1686 } else if (a
.disp
!= 0) {
1687 tcg_gen_addi_tl(s
->A0
, ea
, a
.disp
);
1694 static void gen_lea_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1696 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
1697 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
1698 gen_lea_v_seg(s
, ea
, a
.def_seg
, s
->override
);
1701 static void gen_nop_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
)
1703 (void)gen_lea_modrm_0(env
, s
, modrm
, false);
1706 /* Used for BNDCL, BNDCU, BNDCN. */
1707 static void gen_bndck(CPUX86State
*env
, DisasContext
*s
, int modrm
,
1708 TCGCond cond
, TCGv_i64 bndv
)
1710 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
1711 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
1713 tcg_gen_extu_tl_i64(s
->tmp1_i64
, ea
);
1715 tcg_gen_ext32u_i64(s
->tmp1_i64
, s
->tmp1_i64
);
1717 tcg_gen_setcond_i64(cond
, s
->tmp1_i64
, s
->tmp1_i64
, bndv
);
1718 tcg_gen_extrl_i64_i32(s
->tmp2_i32
, s
->tmp1_i64
);
1719 gen_helper_bndck(tcg_env
, s
->tmp2_i32
);
1722 /* generate modrm load of memory or register. */
1723 static void gen_ld_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
, MemOp ot
)
1727 mod
= (modrm
>> 6) & 3;
1728 rm
= (modrm
& 7) | REX_B(s
);
1730 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
1732 gen_lea_modrm(env
, s
, modrm
);
1733 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
1737 /* generate modrm store of memory or register. */
1738 static void gen_st_modrm(CPUX86State
*env
, DisasContext
*s
, int modrm
, MemOp ot
)
1742 mod
= (modrm
>> 6) & 3;
1743 rm
= (modrm
& 7) | REX_B(s
);
1745 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
1747 gen_lea_modrm(env
, s
, modrm
);
1748 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
1752 static target_ulong
insn_get_addr(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
1758 ret
= x86_ldub_code(env
, s
);
1761 ret
= x86_lduw_code(env
, s
);
1764 ret
= x86_ldl_code(env
, s
);
1766 #ifdef TARGET_X86_64
1768 ret
= x86_ldq_code(env
, s
);
1772 g_assert_not_reached();
1777 static inline uint32_t insn_get(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
1783 ret
= x86_ldub_code(env
, s
);
1786 ret
= x86_lduw_code(env
, s
);
1789 #ifdef TARGET_X86_64
1792 ret
= x86_ldl_code(env
, s
);
1795 g_assert_not_reached();
1800 static target_long
insn_get_signed(CPUX86State
*env
, DisasContext
*s
, MemOp ot
)
1806 ret
= (int8_t) x86_ldub_code(env
, s
);
1809 ret
= (int16_t) x86_lduw_code(env
, s
);
1812 ret
= (int32_t) x86_ldl_code(env
, s
);
1814 #ifdef TARGET_X86_64
1816 ret
= x86_ldq_code(env
, s
);
1820 g_assert_not_reached();
1825 static void gen_conditional_jump_labels(DisasContext
*s
, target_long diff
,
1826 TCGLabel
*not_taken
, TCGLabel
*taken
)
1829 gen_set_label(not_taken
);
1831 gen_jmp_rel_csize(s
, 0, 1);
1833 gen_set_label(taken
);
1834 gen_jmp_rel(s
, s
->dflag
, diff
, 0);
1837 static void gen_jcc(DisasContext
*s
, int b
, int diff
)
1839 TCGLabel
*l1
= gen_new_label();
1842 gen_conditional_jump_labels(s
, diff
, NULL
, l1
);
1845 static void gen_cmovcc1(DisasContext
*s
, int b
, TCGv dest
, TCGv src
)
1847 CCPrepare cc
= gen_prepare_cc(s
, b
, NULL
);
1850 cc
.reg2
= tcg_constant_tl(cc
.imm
);
1853 tcg_gen_movcond_tl(cc
.cond
, dest
, cc
.reg
, cc
.reg2
, src
, dest
);
1856 static void gen_op_movl_seg_real(DisasContext
*s
, X86Seg seg_reg
, TCGv seg
)
1858 TCGv selector
= tcg_temp_new();
1859 tcg_gen_ext16u_tl(selector
, seg
);
1860 tcg_gen_st32_tl(selector
, tcg_env
,
1861 offsetof(CPUX86State
,segs
[seg_reg
].selector
));
1862 tcg_gen_shli_tl(cpu_seg_base
[seg_reg
], selector
, 4);
1865 /* move SRC to seg_reg and compute if the CPU state may change. Never
1866 call this function with seg_reg == R_CS */
1867 static void gen_movl_seg(DisasContext
*s
, X86Seg seg_reg
, TCGv src
)
1869 if (PE(s
) && !VM86(s
)) {
1870 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, src
);
1871 gen_helper_load_seg(tcg_env
, tcg_constant_i32(seg_reg
), s
->tmp2_i32
);
1872 /* abort translation because the addseg value may change or
1873 because ss32 may change. For R_SS, translation must always
1874 stop as a special handling must be done to disable hardware
1875 interrupts for the next instruction */
1876 if (seg_reg
== R_SS
) {
1877 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
1878 } else if (CODE32(s
) && seg_reg
< R_FS
) {
1879 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
1882 gen_op_movl_seg_real(s
, seg_reg
, src
);
1883 if (seg_reg
== R_SS
) {
1884 s
->base
.is_jmp
= DISAS_EOB_INHIBIT_IRQ
;
1889 static void gen_far_call(DisasContext
*s
)
1891 TCGv_i32 new_cs
= tcg_temp_new_i32();
1892 tcg_gen_trunc_tl_i32(new_cs
, s
->T1
);
1893 if (PE(s
) && !VM86(s
)) {
1894 gen_helper_lcall_protected(tcg_env
, new_cs
, s
->T0
,
1895 tcg_constant_i32(s
->dflag
- 1),
1898 TCGv_i32 new_eip
= tcg_temp_new_i32();
1899 tcg_gen_trunc_tl_i32(new_eip
, s
->T0
);
1900 gen_helper_lcall_real(tcg_env
, new_cs
, new_eip
,
1901 tcg_constant_i32(s
->dflag
- 1),
1904 s
->base
.is_jmp
= DISAS_JUMP
;
1907 static void gen_far_jmp(DisasContext
*s
)
1909 if (PE(s
) && !VM86(s
)) {
1910 TCGv_i32 new_cs
= tcg_temp_new_i32();
1911 tcg_gen_trunc_tl_i32(new_cs
, s
->T1
);
1912 gen_helper_ljmp_protected(tcg_env
, new_cs
, s
->T0
,
1915 gen_op_movl_seg_real(s
, R_CS
, s
->T1
);
1916 gen_op_jmp_v(s
, s
->T0
);
1918 s
->base
.is_jmp
= DISAS_JUMP
;
1921 static void gen_svm_check_intercept(DisasContext
*s
, uint32_t type
)
1923 /* no SVM activated; fast case */
1924 if (likely(!GUEST(s
))) {
1927 gen_helper_svm_check_intercept(tcg_env
, tcg_constant_i32(type
));
1930 static inline void gen_stack_update(DisasContext
*s
, int addend
)
1932 gen_op_add_reg_im(s
, mo_stacksize(s
), R_ESP
, addend
);
1935 static void gen_lea_ss_ofs(DisasContext
*s
, TCGv dest
, TCGv src
, target_ulong offset
)
1938 tcg_gen_addi_tl(dest
, src
, offset
);
1941 gen_lea_v_seg_dest(s
, mo_stacksize(s
), dest
, src
, R_SS
, -1);
1944 /* Generate a push. It depends on ss32, addseg and dflag. */
1945 static void gen_push_v(DisasContext
*s
, TCGv val
)
1947 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
1948 MemOp a_ot
= mo_stacksize(s
);
1949 int size
= 1 << d_ot
;
1950 TCGv new_esp
= tcg_temp_new();
1952 tcg_gen_subi_tl(new_esp
, cpu_regs
[R_ESP
], size
);
1954 /* Now reduce the value to the address size and apply SS base. */
1955 gen_lea_ss_ofs(s
, s
->A0
, new_esp
, 0);
1956 gen_op_st_v(s
, d_ot
, val
, s
->A0
);
1957 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, new_esp
);
1960 /* two step pop is necessary for precise exceptions */
1961 static MemOp
gen_pop_T0(DisasContext
*s
)
1963 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
1965 gen_lea_ss_ofs(s
, s
->T0
, cpu_regs
[R_ESP
], 0);
1966 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->T0
);
1971 static inline void gen_pop_update(DisasContext
*s
, MemOp ot
)
1973 gen_stack_update(s
, 1 << ot
);
1976 static void gen_pusha(DisasContext
*s
)
1978 MemOp d_ot
= s
->dflag
;
1979 int size
= 1 << d_ot
;
1982 for (i
= 0; i
< 8; i
++) {
1983 gen_lea_ss_ofs(s
, s
->A0
, cpu_regs
[R_ESP
], (i
- 8) * size
);
1984 gen_op_st_v(s
, d_ot
, cpu_regs
[7 - i
], s
->A0
);
1987 gen_stack_update(s
, -8 * size
);
1990 static void gen_popa(DisasContext
*s
)
1992 MemOp d_ot
= s
->dflag
;
1993 int size
= 1 << d_ot
;
1996 for (i
= 0; i
< 8; i
++) {
1997 /* ESP is not reloaded */
1998 if (7 - i
== R_ESP
) {
2001 gen_lea_ss_ofs(s
, s
->A0
, cpu_regs
[R_ESP
], i
* size
);
2002 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2003 gen_op_mov_reg_v(s
, d_ot
, 7 - i
, s
->T0
);
2006 gen_stack_update(s
, 8 * size
);
2009 static void gen_enter(DisasContext
*s
, int esp_addend
, int level
)
2011 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2012 MemOp a_ot
= mo_stacksize(s
);
2013 int size
= 1 << d_ot
;
2015 /* Push BP; compute FrameTemp into T1. */
2016 tcg_gen_subi_tl(s
->T1
, cpu_regs
[R_ESP
], size
);
2017 gen_lea_ss_ofs(s
, s
->A0
, s
->T1
, 0);
2018 gen_op_st_v(s
, d_ot
, cpu_regs
[R_EBP
], s
->A0
);
2024 /* Copy level-1 pointers from the previous frame. */
2025 for (i
= 1; i
< level
; ++i
) {
2026 gen_lea_ss_ofs(s
, s
->A0
, cpu_regs
[R_EBP
], -size
* i
);
2027 gen_op_ld_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2029 gen_lea_ss_ofs(s
, s
->A0
, s
->T1
, -size
* i
);
2030 gen_op_st_v(s
, d_ot
, s
->tmp0
, s
->A0
);
2033 /* Push the current FrameTemp as the last level. */
2034 gen_lea_ss_ofs(s
, s
->A0
, s
->T1
, -size
* level
);
2035 gen_op_st_v(s
, d_ot
, s
->T1
, s
->A0
);
2038 /* Copy the FrameTemp value to EBP. */
2039 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T1
);
2041 /* Compute the final value of ESP. */
2042 tcg_gen_subi_tl(s
->T1
, s
->T1
, esp_addend
+ size
* level
);
2043 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2046 static void gen_leave(DisasContext
*s
)
2048 MemOp d_ot
= mo_pushpop(s
, s
->dflag
);
2049 MemOp a_ot
= mo_stacksize(s
);
2051 gen_lea_ss_ofs(s
, s
->A0
, cpu_regs
[R_EBP
], 0);
2052 gen_op_ld_v(s
, d_ot
, s
->T0
, s
->A0
);
2054 tcg_gen_addi_tl(s
->T1
, cpu_regs
[R_EBP
], 1 << d_ot
);
2056 gen_op_mov_reg_v(s
, d_ot
, R_EBP
, s
->T0
);
2057 gen_op_mov_reg_v(s
, a_ot
, R_ESP
, s
->T1
);
2060 /* Similarly, except that the assumption here is that we don't decode
2061 the instruction at all -- either a missing opcode, an unimplemented
2062 feature, or just a bogus instruction stream. */
2063 static void gen_unknown_opcode(CPUX86State
*env
, DisasContext
*s
)
2065 gen_illegal_opcode(s
);
2067 if (qemu_loglevel_mask(LOG_UNIMP
)) {
2068 FILE *logfile
= qemu_log_trylock();
2070 target_ulong pc
= s
->base
.pc_next
, end
= s
->pc
;
2072 fprintf(logfile
, "ILLOPC: " TARGET_FMT_lx
":", pc
);
2073 for (; pc
< end
; ++pc
) {
2074 fprintf(logfile
, " %02x", translator_ldub(env
, &s
->base
, pc
));
2076 fprintf(logfile
, "\n");
2077 qemu_log_unlock(logfile
);
2082 /* an interrupt is different from an exception because of the
2084 static void gen_interrupt(DisasContext
*s
, uint8_t intno
)
2086 gen_update_cc_op(s
);
2087 gen_update_eip_cur(s
);
2088 gen_helper_raise_interrupt(tcg_env
, tcg_constant_i32(intno
),
2089 cur_insn_len_i32(s
));
2090 s
->base
.is_jmp
= DISAS_NORETURN
;
2093 static void gen_set_hflag(DisasContext
*s
, uint32_t mask
)
2095 if ((s
->flags
& mask
) == 0) {
2096 TCGv_i32 t
= tcg_temp_new_i32();
2097 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2098 tcg_gen_ori_i32(t
, t
, mask
);
2099 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2104 static void gen_reset_hflag(DisasContext
*s
, uint32_t mask
)
2106 if (s
->flags
& mask
) {
2107 TCGv_i32 t
= tcg_temp_new_i32();
2108 tcg_gen_ld_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2109 tcg_gen_andi_i32(t
, t
, ~mask
);
2110 tcg_gen_st_i32(t
, tcg_env
, offsetof(CPUX86State
, hflags
));
2115 static void gen_set_eflags(DisasContext
*s
, target_ulong mask
)
2117 TCGv t
= tcg_temp_new();
2119 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2120 tcg_gen_ori_tl(t
, t
, mask
);
2121 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2124 static void gen_reset_eflags(DisasContext
*s
, target_ulong mask
)
2126 TCGv t
= tcg_temp_new();
2128 tcg_gen_ld_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2129 tcg_gen_andi_tl(t
, t
, ~mask
);
2130 tcg_gen_st_tl(t
, tcg_env
, offsetof(CPUX86State
, eflags
));
2133 /* Clear BND registers during legacy branches. */
2134 static void gen_bnd_jmp(DisasContext
*s
)
2136 /* Clear the registers only if BND prefix is missing, MPX is enabled,
2137 and if the BNDREGs are known to be in use (non-zero) already.
2138 The helper itself will check BNDPRESERVE at runtime. */
2139 if ((s
->prefix
& PREFIX_REPNZ
) == 0
2140 && (s
->flags
& HF_MPX_EN_MASK
) != 0
2141 && (s
->flags
& HF_MPX_IU_MASK
) != 0) {
2142 gen_helper_bnd_jmp(tcg_env
);
2147 * Generate an end of block, including common tasks such as generating
2148 * single step traps, resetting the RF flag, and handling the interrupt
2152 gen_eob(DisasContext
*s
, int mode
)
2156 gen_update_cc_op(s
);
2158 /* If several instructions disable interrupts, only the first does it. */
2159 inhibit_reset
= false;
2160 if (s
->flags
& HF_INHIBIT_IRQ_MASK
) {
2161 gen_reset_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2162 inhibit_reset
= true;
2163 } else if (mode
== DISAS_EOB_INHIBIT_IRQ
) {
2164 gen_set_hflag(s
, HF_INHIBIT_IRQ_MASK
);
2167 if (s
->base
.tb
->flags
& HF_RF_MASK
) {
2168 gen_reset_eflags(s
, RF_MASK
);
2170 if (mode
== DISAS_EOB_RECHECK_TF
) {
2171 gen_helper_rechecking_single_step(tcg_env
);
2172 tcg_gen_exit_tb(NULL
, 0);
2173 } else if ((s
->flags
& HF_TF_MASK
) && mode
!= DISAS_EOB_INHIBIT_IRQ
) {
2174 gen_helper_single_step(tcg_env
);
2175 } else if (mode
== DISAS_JUMP
&&
2176 /* give irqs a chance to happen */
2178 tcg_gen_lookup_and_goto_ptr();
2180 tcg_gen_exit_tb(NULL
, 0);
2183 s
->base
.is_jmp
= DISAS_NORETURN
;
2186 /* Jump to eip+diff, truncating the result to OT. */
2187 static void gen_jmp_rel(DisasContext
*s
, MemOp ot
, int diff
, int tb_num
)
2189 bool use_goto_tb
= s
->jmp_opt
;
2190 target_ulong mask
= -1;
2191 target_ulong new_pc
= s
->pc
+ diff
;
2192 target_ulong new_eip
= new_pc
- s
->cs_base
;
2194 assert(!s
->cc_op_dirty
);
2196 /* In 64-bit mode, operand size is fixed at 64 bits. */
2200 if (tb_cflags(s
->base
.tb
) & CF_PCREL
&& CODE32(s
)) {
2201 use_goto_tb
= false;
2209 if (tb_cflags(s
->base
.tb
) & CF_PCREL
) {
2210 tcg_gen_addi_tl(cpu_eip
, cpu_eip
, new_pc
- s
->pc_save
);
2212 * If we can prove the branch does not leave the page and we have
2213 * no extra masking to apply (data16 branch in code32, see above),
2214 * then we have also proven that the addition does not wrap.
2216 if (!use_goto_tb
|| !is_same_page(&s
->base
, new_pc
)) {
2217 tcg_gen_andi_tl(cpu_eip
, cpu_eip
, mask
);
2218 use_goto_tb
= false;
2220 } else if (!CODE64(s
)) {
2221 new_pc
= (uint32_t)(new_eip
+ s
->cs_base
);
2224 if (use_goto_tb
&& translator_use_goto_tb(&s
->base
, new_pc
)) {
2225 /* jump to same page: we can use a direct jump */
2226 tcg_gen_goto_tb(tb_num
);
2227 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2228 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2230 tcg_gen_exit_tb(s
->base
.tb
, tb_num
);
2231 s
->base
.is_jmp
= DISAS_NORETURN
;
2233 if (!(tb_cflags(s
->base
.tb
) & CF_PCREL
)) {
2234 tcg_gen_movi_tl(cpu_eip
, new_eip
);
2237 gen_eob(s
, DISAS_JUMP
); /* jump to another page */
2239 gen_eob(s
, DISAS_EOB_ONLY
); /* exit to main loop */
2244 /* Jump to eip+diff, truncating to the current code size. */
2245 static void gen_jmp_rel_csize(DisasContext
*s
, int diff
, int tb_num
)
2247 /* CODE64 ignores the OT argument, so we need not consider it. */
2248 gen_jmp_rel(s
, CODE32(s
) ? MO_32
: MO_16
, diff
, tb_num
);
2251 static inline void gen_ldq_env_A0(DisasContext
*s
, int offset
)
2253 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2254 tcg_gen_st_i64(s
->tmp1_i64
, tcg_env
, offset
);
2257 static inline void gen_stq_env_A0(DisasContext
*s
, int offset
)
2259 tcg_gen_ld_i64(s
->tmp1_i64
, tcg_env
, offset
);
2260 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
, s
->mem_index
, MO_LEUQ
);
2263 static inline void gen_ldo_env_A0(DisasContext
*s
, int offset
, bool align
)
2265 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2266 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2267 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2268 int mem_index
= s
->mem_index
;
2269 TCGv_i128 t
= tcg_temp_new_i128();
2271 tcg_gen_qemu_ld_i128(t
, s
->A0
, mem_index
, mop
);
2272 tcg_gen_st_i128(t
, tcg_env
, offset
);
2275 static inline void gen_sto_env_A0(DisasContext
*s
, int offset
, bool align
)
2277 MemOp atom
= (s
->cpuid_ext_features
& CPUID_EXT_AVX
2278 ? MO_ATOM_IFALIGN
: MO_ATOM_IFALIGN_PAIR
);
2279 MemOp mop
= MO_128
| MO_LE
| atom
| (align
? MO_ALIGN_16
: 0);
2280 int mem_index
= s
->mem_index
;
2281 TCGv_i128 t
= tcg_temp_new_i128();
2283 tcg_gen_ld_i128(t
, tcg_env
, offset
);
2284 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
);
2287 static void gen_ldy_env_A0(DisasContext
*s
, int offset
, bool align
)
2289 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2290 int mem_index
= s
->mem_index
;
2291 TCGv_i128 t0
= tcg_temp_new_i128();
2292 TCGv_i128 t1
= tcg_temp_new_i128();
2294 tcg_gen_qemu_ld_i128(t0
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2295 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2296 tcg_gen_qemu_ld_i128(t1
, s
->tmp0
, mem_index
, mop
);
2298 tcg_gen_st_i128(t0
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2299 tcg_gen_st_i128(t1
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2302 static void gen_sty_env_A0(DisasContext
*s
, int offset
, bool align
)
2304 MemOp mop
= MO_128
| MO_LE
| MO_ATOM_IFALIGN_PAIR
;
2305 int mem_index
= s
->mem_index
;
2306 TCGv_i128 t
= tcg_temp_new_i128();
2308 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(0)));
2309 tcg_gen_qemu_st_i128(t
, s
->A0
, mem_index
, mop
| (align
? MO_ALIGN_32
: 0));
2310 tcg_gen_addi_tl(s
->tmp0
, s
->A0
, 16);
2311 tcg_gen_ld_i128(t
, tcg_env
, offset
+ offsetof(YMMReg
, YMM_X(1)));
2312 tcg_gen_qemu_st_i128(t
, s
->tmp0
, mem_index
, mop
);
2315 static void gen_cmpxchg8b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2317 TCGv_i64 cmp
, val
, old
;
2320 gen_lea_modrm(env
, s
, modrm
);
2322 cmp
= tcg_temp_new_i64();
2323 val
= tcg_temp_new_i64();
2324 old
= tcg_temp_new_i64();
2326 /* Construct the comparison values from the register pair. */
2327 tcg_gen_concat_tl_i64(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2328 tcg_gen_concat_tl_i64(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2330 /* Only require atomic with LOCK; non-parallel handled in generator. */
2331 if (s
->prefix
& PREFIX_LOCK
) {
2332 tcg_gen_atomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
, s
->mem_index
, MO_TEUQ
);
2334 tcg_gen_nonatomic_cmpxchg_i64(old
, s
->A0
, cmp
, val
,
2335 s
->mem_index
, MO_TEUQ
);
2338 /* Set tmp0 to match the required value of Z. */
2339 tcg_gen_setcond_i64(TCG_COND_EQ
, cmp
, old
, cmp
);
2341 tcg_gen_trunc_i64_tl(Z
, cmp
);
2344 * Extract the result values for the register pair.
2345 * For 32-bit, we may do this unconditionally, because on success (Z=1),
2346 * the old value matches the previous value in EDX:EAX. For x86_64,
2347 * the store must be conditional, because we must leave the source
2348 * registers unchanged on success, and zero-extend the writeback
2351 if (TARGET_LONG_BITS
== 32) {
2352 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], old
);
2354 TCGv zero
= tcg_constant_tl(0);
2356 tcg_gen_extr_i64_tl(s
->T0
, s
->T1
, old
);
2357 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EAX
], Z
, zero
,
2358 s
->T0
, cpu_regs
[R_EAX
]);
2359 tcg_gen_movcond_tl(TCG_COND_EQ
, cpu_regs
[R_EDX
], Z
, zero
,
2360 s
->T1
, cpu_regs
[R_EDX
]);
2364 gen_compute_eflags(s
);
2365 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, Z
, ctz32(CC_Z
), 1);
2368 #ifdef TARGET_X86_64
2369 static void gen_cmpxchg16b(DisasContext
*s
, CPUX86State
*env
, int modrm
)
2371 MemOp mop
= MO_TE
| MO_128
| MO_ALIGN
;
2375 gen_lea_modrm(env
, s
, modrm
);
2377 cmp
= tcg_temp_new_i128();
2378 val
= tcg_temp_new_i128();
2379 tcg_gen_concat_i64_i128(cmp
, cpu_regs
[R_EAX
], cpu_regs
[R_EDX
]);
2380 tcg_gen_concat_i64_i128(val
, cpu_regs
[R_EBX
], cpu_regs
[R_ECX
]);
2382 /* Only require atomic with LOCK; non-parallel handled in generator. */
2383 if (s
->prefix
& PREFIX_LOCK
) {
2384 tcg_gen_atomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
2386 tcg_gen_nonatomic_cmpxchg_i128(val
, s
->A0
, cmp
, val
, s
->mem_index
, mop
);
2389 tcg_gen_extr_i128_i64(s
->T0
, s
->T1
, val
);
2391 /* Determine success after the fact. */
2392 t0
= tcg_temp_new_i64();
2393 t1
= tcg_temp_new_i64();
2394 tcg_gen_xor_i64(t0
, s
->T0
, cpu_regs
[R_EAX
]);
2395 tcg_gen_xor_i64(t1
, s
->T1
, cpu_regs
[R_EDX
]);
2396 tcg_gen_or_i64(t0
, t0
, t1
);
2399 gen_compute_eflags(s
);
2400 tcg_gen_setcondi_i64(TCG_COND_EQ
, t0
, t0
, 0);
2401 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, t0
, ctz32(CC_Z
), 1);
2404 * Extract the result values for the register pair. We may do this
2405 * unconditionally, because on success (Z=1), the old value matches
2406 * the previous value in RDX:RAX.
2408 tcg_gen_mov_i64(cpu_regs
[R_EAX
], s
->T0
);
2409 tcg_gen_mov_i64(cpu_regs
[R_EDX
], s
->T1
);
2413 static bool disas_insn_x87(DisasContext
*s
, CPUState
*cpu
, int b
)
2415 CPUX86State
*env
= cpu_env(cpu
);
2416 bool update_fip
= true;
2417 int modrm
, mod
, rm
, op
;
2419 if (s
->flags
& (HF_EM_MASK
| HF_TS_MASK
)) {
2420 /* if CR0.EM or CR0.TS are set, generate an FPU exception */
2421 /* XXX: what to do if illegal op ? */
2422 gen_exception(s
, EXCP07_PREX
);
2425 modrm
= x86_ldub_code(env
, s
);
2426 mod
= (modrm
>> 6) & 3;
2428 op
= ((b
& 7) << 3) | ((modrm
>> 3) & 7);
2431 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
2432 TCGv ea
= gen_lea_modrm_1(s
, a
, false);
2433 TCGv last_addr
= tcg_temp_new();
2434 bool update_fdp
= true;
2436 tcg_gen_mov_tl(last_addr
, ea
);
2437 gen_lea_v_seg(s
, ea
, a
.def_seg
, s
->override
);
2440 case 0x00 ... 0x07: /* fxxxs */
2441 case 0x10 ... 0x17: /* fixxxl */
2442 case 0x20 ... 0x27: /* fxxxl */
2443 case 0x30 ... 0x37: /* fixxx */
2450 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2451 s
->mem_index
, MO_LEUL
);
2452 gen_helper_flds_FT0(tcg_env
, s
->tmp2_i32
);
2455 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2456 s
->mem_index
, MO_LEUL
);
2457 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
2460 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
2461 s
->mem_index
, MO_LEUQ
);
2462 gen_helper_fldl_FT0(tcg_env
, s
->tmp1_i64
);
2466 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2467 s
->mem_index
, MO_LESW
);
2468 gen_helper_fildl_FT0(tcg_env
, s
->tmp2_i32
);
2472 gen_helper_fp_arith_ST0_FT0(op1
);
2474 /* fcomp needs pop */
2475 gen_helper_fpop(tcg_env
);
2479 case 0x08: /* flds */
2480 case 0x0a: /* fsts */
2481 case 0x0b: /* fstps */
2482 case 0x18 ... 0x1b: /* fildl, fisttpl, fistl, fistpl */
2483 case 0x28 ... 0x2b: /* fldl, fisttpll, fstl, fstpl */
2484 case 0x38 ... 0x3b: /* filds, fisttps, fists, fistps */
2489 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2490 s
->mem_index
, MO_LEUL
);
2491 gen_helper_flds_ST0(tcg_env
, s
->tmp2_i32
);
2494 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2495 s
->mem_index
, MO_LEUL
);
2496 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
2499 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
2500 s
->mem_index
, MO_LEUQ
);
2501 gen_helper_fldl_ST0(tcg_env
, s
->tmp1_i64
);
2505 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2506 s
->mem_index
, MO_LESW
);
2507 gen_helper_fildl_ST0(tcg_env
, s
->tmp2_i32
);
2512 /* XXX: the corresponding CPUID bit must be tested ! */
2515 gen_helper_fisttl_ST0(s
->tmp2_i32
, tcg_env
);
2516 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2517 s
->mem_index
, MO_LEUL
);
2520 gen_helper_fisttll_ST0(s
->tmp1_i64
, tcg_env
);
2521 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
2522 s
->mem_index
, MO_LEUQ
);
2526 gen_helper_fistt_ST0(s
->tmp2_i32
, tcg_env
);
2527 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2528 s
->mem_index
, MO_LEUW
);
2531 gen_helper_fpop(tcg_env
);
2536 gen_helper_fsts_ST0(s
->tmp2_i32
, tcg_env
);
2537 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2538 s
->mem_index
, MO_LEUL
);
2541 gen_helper_fistl_ST0(s
->tmp2_i32
, tcg_env
);
2542 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2543 s
->mem_index
, MO_LEUL
);
2546 gen_helper_fstl_ST0(s
->tmp1_i64
, tcg_env
);
2547 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
2548 s
->mem_index
, MO_LEUQ
);
2552 gen_helper_fist_ST0(s
->tmp2_i32
, tcg_env
);
2553 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2554 s
->mem_index
, MO_LEUW
);
2557 if ((op
& 7) == 3) {
2558 gen_helper_fpop(tcg_env
);
2563 case 0x0c: /* fldenv mem */
2564 gen_helper_fldenv(tcg_env
, s
->A0
,
2565 tcg_constant_i32(s
->dflag
- 1));
2566 update_fip
= update_fdp
= false;
2568 case 0x0d: /* fldcw mem */
2569 tcg_gen_qemu_ld_i32(s
->tmp2_i32
, s
->A0
,
2570 s
->mem_index
, MO_LEUW
);
2571 gen_helper_fldcw(tcg_env
, s
->tmp2_i32
);
2572 update_fip
= update_fdp
= false;
2574 case 0x0e: /* fnstenv mem */
2575 gen_helper_fstenv(tcg_env
, s
->A0
,
2576 tcg_constant_i32(s
->dflag
- 1));
2577 update_fip
= update_fdp
= false;
2579 case 0x0f: /* fnstcw mem */
2580 gen_helper_fnstcw(s
->tmp2_i32
, tcg_env
);
2581 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2582 s
->mem_index
, MO_LEUW
);
2583 update_fip
= update_fdp
= false;
2585 case 0x1d: /* fldt mem */
2586 gen_helper_fldt_ST0(tcg_env
, s
->A0
);
2588 case 0x1f: /* fstpt mem */
2589 gen_helper_fstt_ST0(tcg_env
, s
->A0
);
2590 gen_helper_fpop(tcg_env
);
2592 case 0x2c: /* frstor mem */
2593 gen_helper_frstor(tcg_env
, s
->A0
,
2594 tcg_constant_i32(s
->dflag
- 1));
2595 update_fip
= update_fdp
= false;
2597 case 0x2e: /* fnsave mem */
2598 gen_helper_fsave(tcg_env
, s
->A0
,
2599 tcg_constant_i32(s
->dflag
- 1));
2600 update_fip
= update_fdp
= false;
2602 case 0x2f: /* fnstsw mem */
2603 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
2604 tcg_gen_qemu_st_i32(s
->tmp2_i32
, s
->A0
,
2605 s
->mem_index
, MO_LEUW
);
2606 update_fip
= update_fdp
= false;
2608 case 0x3c: /* fbld */
2609 gen_helper_fbld_ST0(tcg_env
, s
->A0
);
2611 case 0x3e: /* fbstp */
2612 gen_helper_fbst_ST0(tcg_env
, s
->A0
);
2613 gen_helper_fpop(tcg_env
);
2615 case 0x3d: /* fildll */
2616 tcg_gen_qemu_ld_i64(s
->tmp1_i64
, s
->A0
,
2617 s
->mem_index
, MO_LEUQ
);
2618 gen_helper_fildll_ST0(tcg_env
, s
->tmp1_i64
);
2620 case 0x3f: /* fistpll */
2621 gen_helper_fistll_ST0(s
->tmp1_i64
, tcg_env
);
2622 tcg_gen_qemu_st_i64(s
->tmp1_i64
, s
->A0
,
2623 s
->mem_index
, MO_LEUQ
);
2624 gen_helper_fpop(tcg_env
);
2631 int last_seg
= s
->override
>= 0 ? s
->override
: a
.def_seg
;
2633 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
2634 offsetof(CPUX86State
,
2635 segs
[last_seg
].selector
));
2636 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
2637 offsetof(CPUX86State
, fpds
));
2638 tcg_gen_st_tl(last_addr
, tcg_env
,
2639 offsetof(CPUX86State
, fpdp
));
2642 /* register float ops */
2646 case 0x08: /* fld sti */
2647 gen_helper_fpush(tcg_env
);
2648 gen_helper_fmov_ST0_STN(tcg_env
,
2649 tcg_constant_i32((opreg
+ 1) & 7));
2651 case 0x09: /* fxchg sti */
2652 case 0x29: /* fxchg4 sti, undocumented op */
2653 case 0x39: /* fxchg7 sti, undocumented op */
2654 gen_helper_fxchg_ST0_STN(tcg_env
, tcg_constant_i32(opreg
));
2656 case 0x0a: /* grp d9/2 */
2660 * check exceptions (FreeBSD FPU probe)
2661 * needs to be treated as I/O because of ferr_irq
2663 translator_io_start(&s
->base
);
2664 gen_helper_fwait(tcg_env
);
2671 case 0x0c: /* grp d9/4 */
2674 gen_helper_fchs_ST0(tcg_env
);
2677 gen_helper_fabs_ST0(tcg_env
);
2680 gen_helper_fldz_FT0(tcg_env
);
2681 gen_helper_fcom_ST0_FT0(tcg_env
);
2684 gen_helper_fxam_ST0(tcg_env
);
2690 case 0x0d: /* grp d9/5 */
2694 gen_helper_fpush(tcg_env
);
2695 gen_helper_fld1_ST0(tcg_env
);
2698 gen_helper_fpush(tcg_env
);
2699 gen_helper_fldl2t_ST0(tcg_env
);
2702 gen_helper_fpush(tcg_env
);
2703 gen_helper_fldl2e_ST0(tcg_env
);
2706 gen_helper_fpush(tcg_env
);
2707 gen_helper_fldpi_ST0(tcg_env
);
2710 gen_helper_fpush(tcg_env
);
2711 gen_helper_fldlg2_ST0(tcg_env
);
2714 gen_helper_fpush(tcg_env
);
2715 gen_helper_fldln2_ST0(tcg_env
);
2718 gen_helper_fpush(tcg_env
);
2719 gen_helper_fldz_ST0(tcg_env
);
2726 case 0x0e: /* grp d9/6 */
2729 gen_helper_f2xm1(tcg_env
);
2732 gen_helper_fyl2x(tcg_env
);
2735 gen_helper_fptan(tcg_env
);
2737 case 3: /* fpatan */
2738 gen_helper_fpatan(tcg_env
);
2740 case 4: /* fxtract */
2741 gen_helper_fxtract(tcg_env
);
2743 case 5: /* fprem1 */
2744 gen_helper_fprem1(tcg_env
);
2746 case 6: /* fdecstp */
2747 gen_helper_fdecstp(tcg_env
);
2750 case 7: /* fincstp */
2751 gen_helper_fincstp(tcg_env
);
2755 case 0x0f: /* grp d9/7 */
2758 gen_helper_fprem(tcg_env
);
2760 case 1: /* fyl2xp1 */
2761 gen_helper_fyl2xp1(tcg_env
);
2764 gen_helper_fsqrt(tcg_env
);
2766 case 3: /* fsincos */
2767 gen_helper_fsincos(tcg_env
);
2769 case 5: /* fscale */
2770 gen_helper_fscale(tcg_env
);
2772 case 4: /* frndint */
2773 gen_helper_frndint(tcg_env
);
2776 gen_helper_fsin(tcg_env
);
2780 gen_helper_fcos(tcg_env
);
2784 case 0x00: case 0x01: case 0x04 ... 0x07: /* fxxx st, sti */
2785 case 0x20: case 0x21: case 0x24 ... 0x27: /* fxxx sti, st */
2786 case 0x30: case 0x31: case 0x34 ... 0x37: /* fxxxp sti, st */
2792 gen_helper_fp_arith_STN_ST0(op1
, opreg
);
2794 gen_helper_fpop(tcg_env
);
2797 gen_helper_fmov_FT0_STN(tcg_env
,
2798 tcg_constant_i32(opreg
));
2799 gen_helper_fp_arith_ST0_FT0(op1
);
2803 case 0x02: /* fcom */
2804 case 0x22: /* fcom2, undocumented op */
2805 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2806 gen_helper_fcom_ST0_FT0(tcg_env
);
2808 case 0x03: /* fcomp */
2809 case 0x23: /* fcomp3, undocumented op */
2810 case 0x32: /* fcomp5, undocumented op */
2811 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2812 gen_helper_fcom_ST0_FT0(tcg_env
);
2813 gen_helper_fpop(tcg_env
);
2815 case 0x15: /* da/5 */
2817 case 1: /* fucompp */
2818 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
2819 gen_helper_fucom_ST0_FT0(tcg_env
);
2820 gen_helper_fpop(tcg_env
);
2821 gen_helper_fpop(tcg_env
);
2829 case 0: /* feni (287 only, just do nop here) */
2831 case 1: /* fdisi (287 only, just do nop here) */
2834 gen_helper_fclex(tcg_env
);
2837 case 3: /* fninit */
2838 gen_helper_fninit(tcg_env
);
2841 case 4: /* fsetpm (287 only, just do nop here) */
2847 case 0x1d: /* fucomi */
2848 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
2851 gen_update_cc_op(s
);
2852 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2853 gen_helper_fucomi_ST0_FT0(tcg_env
);
2854 assume_cc_op(s
, CC_OP_EFLAGS
);
2856 case 0x1e: /* fcomi */
2857 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
2860 gen_update_cc_op(s
);
2861 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2862 gen_helper_fcomi_ST0_FT0(tcg_env
);
2863 assume_cc_op(s
, CC_OP_EFLAGS
);
2865 case 0x28: /* ffree sti */
2866 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
2868 case 0x2a: /* fst sti */
2869 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
2871 case 0x2b: /* fstp sti */
2872 case 0x0b: /* fstp1 sti, undocumented op */
2873 case 0x3a: /* fstp8 sti, undocumented op */
2874 case 0x3b: /* fstp9 sti, undocumented op */
2875 gen_helper_fmov_STN_ST0(tcg_env
, tcg_constant_i32(opreg
));
2876 gen_helper_fpop(tcg_env
);
2878 case 0x2c: /* fucom st(i) */
2879 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2880 gen_helper_fucom_ST0_FT0(tcg_env
);
2882 case 0x2d: /* fucomp st(i) */
2883 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2884 gen_helper_fucom_ST0_FT0(tcg_env
);
2885 gen_helper_fpop(tcg_env
);
2887 case 0x33: /* de/3 */
2889 case 1: /* fcompp */
2890 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(1));
2891 gen_helper_fcom_ST0_FT0(tcg_env
);
2892 gen_helper_fpop(tcg_env
);
2893 gen_helper_fpop(tcg_env
);
2899 case 0x38: /* ffreep sti, undocumented op */
2900 gen_helper_ffree_STN(tcg_env
, tcg_constant_i32(opreg
));
2901 gen_helper_fpop(tcg_env
);
2903 case 0x3c: /* df/4 */
2906 gen_helper_fnstsw(s
->tmp2_i32
, tcg_env
);
2907 tcg_gen_extu_i32_tl(s
->T0
, s
->tmp2_i32
);
2908 gen_op_mov_reg_v(s
, MO_16
, R_EAX
, s
->T0
);
2914 case 0x3d: /* fucomip */
2915 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
2918 gen_update_cc_op(s
);
2919 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2920 gen_helper_fucomi_ST0_FT0(tcg_env
);
2921 gen_helper_fpop(tcg_env
);
2922 assume_cc_op(s
, CC_OP_EFLAGS
);
2924 case 0x3e: /* fcomip */
2925 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
2928 gen_update_cc_op(s
);
2929 gen_helper_fmov_FT0_STN(tcg_env
, tcg_constant_i32(opreg
));
2930 gen_helper_fcomi_ST0_FT0(tcg_env
);
2931 gen_helper_fpop(tcg_env
);
2932 assume_cc_op(s
, CC_OP_EFLAGS
);
2934 case 0x10 ... 0x13: /* fcmovxx */
2939 static const uint8_t fcmov_cc
[8] = {
2946 if (!(s
->cpuid_features
& CPUID_CMOV
)) {
2949 op1
= fcmov_cc
[op
& 3] | (((op
>> 3) & 1) ^ 1);
2950 l1
= gen_new_label();
2951 gen_jcc1_noeob(s
, op1
, l1
);
2952 gen_helper_fmov_ST0_STN(tcg_env
,
2953 tcg_constant_i32(opreg
));
2963 tcg_gen_ld_i32(s
->tmp2_i32
, tcg_env
,
2964 offsetof(CPUX86State
, segs
[R_CS
].selector
));
2965 tcg_gen_st16_i32(s
->tmp2_i32
, tcg_env
,
2966 offsetof(CPUX86State
, fpcs
));
2967 tcg_gen_st_tl(eip_cur_tl(s
),
2968 tcg_env
, offsetof(CPUX86State
, fpip
));
2973 gen_illegal_opcode(s
);
2977 static void disas_insn_old(DisasContext
*s
, CPUState
*cpu
, int b
)
2979 CPUX86State
*env
= cpu_env(cpu
);
2980 int prefixes
= s
->prefix
;
2981 MemOp dflag
= s
->dflag
;
2983 int modrm
, reg
, rm
, mod
, op
, val
;
2985 /* now check op code */
2987 case 0x1c7: /* cmpxchg8b */
2988 modrm
= x86_ldub_code(env
, s
);
2989 mod
= (modrm
>> 6) & 3;
2990 switch ((modrm
>> 3) & 7) {
2991 case 1: /* CMPXCHG8, CMPXCHG16 */
2995 #ifdef TARGET_X86_64
2996 if (dflag
== MO_64
) {
2997 if (!(s
->cpuid_ext_features
& CPUID_EXT_CX16
)) {
3000 gen_cmpxchg16b(s
, env
, modrm
);
3004 if (!(s
->cpuid_features
& CPUID_CX8
)) {
3007 gen_cmpxchg8b(s
, env
, modrm
);
3010 case 7: /* RDSEED, RDPID with f3 prefix */
3012 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPNZ
))) {
3015 if (s
->prefix
& PREFIX_REPZ
) {
3016 if (!(s
->cpuid_7_0_ecx_features
& CPUID_7_0_ECX_RDPID
)) {
3019 gen_helper_rdpid(s
->T0
, tcg_env
);
3020 rm
= (modrm
& 7) | REX_B(s
);
3021 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3024 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_RDSEED
)) {
3030 case 6: /* RDRAND */
3032 (s
->prefix
& (PREFIX_LOCK
| PREFIX_REPZ
| PREFIX_REPNZ
)) ||
3033 !(s
->cpuid_ext_features
& CPUID_EXT_RDRAND
)) {
3037 translator_io_start(&s
->base
);
3038 gen_helper_rdrand(s
->T0
, tcg_env
);
3039 rm
= (modrm
& 7) | REX_B(s
);
3040 gen_op_mov_reg_v(s
, dflag
, rm
, s
->T0
);
3041 assume_cc_op(s
, CC_OP_EFLAGS
);
3049 /************************/
3050 /* bit operations */
3051 case 0x1ba: /* bt/bts/btr/btc Gv, im */
3053 modrm
= x86_ldub_code(env
, s
);
3054 op
= (modrm
>> 3) & 7;
3055 mod
= (modrm
>> 6) & 3;
3056 rm
= (modrm
& 7) | REX_B(s
);
3059 gen_lea_modrm(env
, s
, modrm
);
3060 if (!(s
->prefix
& PREFIX_LOCK
)) {
3061 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3064 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3067 val
= x86_ldub_code(env
, s
);
3068 tcg_gen_movi_tl(s
->T1
, val
);
3073 case 0x1a3: /* bt Gv, Ev */
3076 case 0x1ab: /* bts */
3079 case 0x1b3: /* btr */
3082 case 0x1bb: /* btc */
3086 modrm
= x86_ldub_code(env
, s
);
3087 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3088 mod
= (modrm
>> 6) & 3;
3089 rm
= (modrm
& 7) | REX_B(s
);
3090 gen_op_mov_v_reg(s
, MO_32
, s
->T1
, reg
);
3092 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
3093 /* specific case: we need to add a displacement */
3094 gen_exts(ot
, s
->T1
);
3095 tcg_gen_sari_tl(s
->tmp0
, s
->T1
, 3 + ot
);
3096 tcg_gen_shli_tl(s
->tmp0
, s
->tmp0
, ot
);
3097 tcg_gen_add_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false), s
->tmp0
);
3098 gen_lea_v_seg(s
, s
->A0
, a
.def_seg
, s
->override
);
3099 if (!(s
->prefix
& PREFIX_LOCK
)) {
3100 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3103 gen_op_mov_v_reg(s
, ot
, s
->T0
, rm
);
3106 tcg_gen_andi_tl(s
->T1
, s
->T1
, (1 << (3 + ot
)) - 1);
3107 tcg_gen_movi_tl(s
->tmp0
, 1);
3108 tcg_gen_shl_tl(s
->tmp0
, s
->tmp0
, s
->T1
);
3109 if (s
->prefix
& PREFIX_LOCK
) {
3112 /* Needs no atomic ops; we suppressed the normal
3113 memory load for LOCK above so do it now. */
3114 gen_op_ld_v(s
, ot
, s
->T0
, s
->A0
);
3117 tcg_gen_atomic_fetch_or_tl(s
->T0
, s
->A0
, s
->tmp0
,
3118 s
->mem_index
, ot
| MO_LE
);
3121 tcg_gen_not_tl(s
->tmp0
, s
->tmp0
);
3122 tcg_gen_atomic_fetch_and_tl(s
->T0
, s
->A0
, s
->tmp0
,
3123 s
->mem_index
, ot
| MO_LE
);
3127 tcg_gen_atomic_fetch_xor_tl(s
->T0
, s
->A0
, s
->tmp0
,
3128 s
->mem_index
, ot
| MO_LE
);
3131 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
3133 tcg_gen_shr_tl(s
->tmp4
, s
->T0
, s
->T1
);
3136 /* Data already loaded; nothing to do. */
3139 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->tmp0
);
3142 tcg_gen_andc_tl(s
->T0
, s
->T0
, s
->tmp0
);
3146 tcg_gen_xor_tl(s
->T0
, s
->T0
, s
->tmp0
);
3151 gen_op_st_v(s
, ot
, s
->T0
, s
->A0
);
3153 gen_op_mov_reg_v(s
, ot
, rm
, s
->T0
);
3158 /* Delay all CC updates until after the store above. Note that
3159 C is the result of the test, Z is unchanged, and the others
3160 are all undefined. */
3162 case CC_OP_MULB
... CC_OP_MULQ
:
3163 case CC_OP_ADDB
... CC_OP_ADDQ
:
3164 case CC_OP_ADCB
... CC_OP_ADCQ
:
3165 case CC_OP_SUBB
... CC_OP_SUBQ
:
3166 case CC_OP_SBBB
... CC_OP_SBBQ
:
3167 case CC_OP_LOGICB
... CC_OP_LOGICQ
:
3168 case CC_OP_INCB
... CC_OP_INCQ
:
3169 case CC_OP_DECB
... CC_OP_DECQ
:
3170 case CC_OP_SHLB
... CC_OP_SHLQ
:
3171 case CC_OP_SARB
... CC_OP_SARQ
:
3172 case CC_OP_BMILGB
... CC_OP_BMILGQ
:
3174 /* Z was going to be computed from the non-zero status of CC_DST.
3175 We can get that same Z value (and the new C value) by leaving
3176 CC_DST alone, setting CC_SRC, and using a CC_OP_SAR of the
3178 tcg_gen_mov_tl(cpu_cc_src
, s
->tmp4
);
3179 set_cc_op(s
, ((s
->cc_op
- CC_OP_MULB
) & 3) + CC_OP_SARB
);
3182 /* Otherwise, generate EFLAGS and replace the C bit. */
3183 gen_compute_eflags(s
);
3184 tcg_gen_deposit_tl(cpu_cc_src
, cpu_cc_src
, s
->tmp4
,
3190 modrm
= x86_ldub_code(env
, s
);
3191 mod
= (modrm
>> 6) & 3;
3192 op
= (modrm
>> 3) & 7;
3195 if (!PE(s
) || VM86(s
))
3197 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
3200 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_READ
);
3201 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
3202 offsetof(CPUX86State
, ldt
.selector
));
3203 ot
= mod
== 3 ? dflag
: MO_16
;
3204 gen_st_modrm(env
, s
, modrm
, ot
);
3207 if (!PE(s
) || VM86(s
))
3209 if (check_cpl0(s
)) {
3210 gen_svm_check_intercept(s
, SVM_EXIT_LDTR_WRITE
);
3211 gen_ld_modrm(env
, s
, modrm
, MO_16
);
3212 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3213 gen_helper_lldt(tcg_env
, s
->tmp2_i32
);
3217 if (!PE(s
) || VM86(s
))
3219 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
3222 gen_svm_check_intercept(s
, SVM_EXIT_TR_READ
);
3223 tcg_gen_ld32u_tl(s
->T0
, tcg_env
,
3224 offsetof(CPUX86State
, tr
.selector
));
3225 ot
= mod
== 3 ? dflag
: MO_16
;
3226 gen_st_modrm(env
, s
, modrm
, ot
);
3229 if (!PE(s
) || VM86(s
))
3231 if (check_cpl0(s
)) {
3232 gen_svm_check_intercept(s
, SVM_EXIT_TR_WRITE
);
3233 gen_ld_modrm(env
, s
, modrm
, MO_16
);
3234 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, s
->T0
);
3235 gen_helper_ltr(tcg_env
, s
->tmp2_i32
);
3240 if (!PE(s
) || VM86(s
))
3242 gen_ld_modrm(env
, s
, modrm
, MO_16
);
3243 gen_update_cc_op(s
);
3245 gen_helper_verr(tcg_env
, s
->T0
);
3247 gen_helper_verw(tcg_env
, s
->T0
);
3249 assume_cc_op(s
, CC_OP_EFLAGS
);
3257 modrm
= x86_ldub_code(env
, s
);
3259 CASE_MODRM_MEM_OP(0): /* sgdt */
3260 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
3263 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_READ
);
3264 gen_lea_modrm(env
, s
, modrm
);
3265 tcg_gen_ld32u_tl(s
->T0
,
3266 tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
3267 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
3268 gen_add_A0_im(s
, 2);
3269 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
3271 * NB: Despite a confusing description in Intel CPU documentation,
3272 * all 32-bits are written regardless of operand size.
3274 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
3277 case 0xc8: /* monitor */
3278 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
3281 gen_update_cc_op(s
);
3282 gen_update_eip_cur(s
);
3283 gen_lea_v_seg(s
, cpu_regs
[R_EAX
], R_DS
, s
->override
);
3284 gen_helper_monitor(tcg_env
, s
->A0
);
3287 case 0xc9: /* mwait */
3288 if (!(s
->cpuid_ext_features
& CPUID_EXT_MONITOR
) || CPL(s
) != 0) {
3291 gen_update_cc_op(s
);
3292 gen_update_eip_cur(s
);
3293 gen_helper_mwait(tcg_env
, cur_insn_len_i32(s
));
3294 s
->base
.is_jmp
= DISAS_NORETURN
;
3297 case 0xca: /* clac */
3298 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
3302 gen_reset_eflags(s
, AC_MASK
);
3303 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3306 case 0xcb: /* stac */
3307 if (!(s
->cpuid_7_0_ebx_features
& CPUID_7_0_EBX_SMAP
)
3311 gen_set_eflags(s
, AC_MASK
);
3312 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3315 CASE_MODRM_MEM_OP(1): /* sidt */
3316 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
3319 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_READ
);
3320 gen_lea_modrm(env
, s
, modrm
);
3321 tcg_gen_ld32u_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
3322 gen_op_st_v(s
, MO_16
, s
->T0
, s
->A0
);
3323 gen_add_A0_im(s
, 2);
3324 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
3326 * NB: Despite a confusing description in Intel CPU documentation,
3327 * all 32-bits are written regardless of operand size.
3329 gen_op_st_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
3332 case 0xd0: /* xgetbv */
3333 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
3334 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
3335 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
3338 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
3339 gen_helper_xgetbv(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
3340 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
3343 case 0xd1: /* xsetbv */
3344 if ((s
->cpuid_ext_features
& CPUID_EXT_XSAVE
) == 0
3345 || (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
3346 | PREFIX_REPZ
| PREFIX_REPNZ
))) {
3349 gen_svm_check_intercept(s
, SVM_EXIT_XSETBV
);
3350 if (!check_cpl0(s
)) {
3353 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
3355 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
3356 gen_helper_xsetbv(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
3357 /* End TB because translation flags may change. */
3358 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3361 case 0xd8: /* VMRUN */
3362 if (!SVME(s
) || !PE(s
)) {
3365 if (!check_cpl0(s
)) {
3368 gen_update_cc_op(s
);
3369 gen_update_eip_cur(s
);
3371 * Reloads INHIBIT_IRQ mask as well as TF and RF with guest state.
3372 * The usual gen_eob() handling is performed on vmexit after
3373 * host state is reloaded.
3375 gen_helper_vmrun(tcg_env
, tcg_constant_i32(s
->aflag
- 1),
3376 cur_insn_len_i32(s
));
3377 tcg_gen_exit_tb(NULL
, 0);
3378 s
->base
.is_jmp
= DISAS_NORETURN
;
3381 case 0xd9: /* VMMCALL */
3385 gen_update_cc_op(s
);
3386 gen_update_eip_cur(s
);
3387 gen_helper_vmmcall(tcg_env
);
3390 case 0xda: /* VMLOAD */
3391 if (!SVME(s
) || !PE(s
)) {
3394 if (!check_cpl0(s
)) {
3397 gen_update_cc_op(s
);
3398 gen_update_eip_cur(s
);
3399 gen_helper_vmload(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
3402 case 0xdb: /* VMSAVE */
3403 if (!SVME(s
) || !PE(s
)) {
3406 if (!check_cpl0(s
)) {
3409 gen_update_cc_op(s
);
3410 gen_update_eip_cur(s
);
3411 gen_helper_vmsave(tcg_env
, tcg_constant_i32(s
->aflag
- 1));
3414 case 0xdc: /* STGI */
3415 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
3419 if (!check_cpl0(s
)) {
3422 gen_update_cc_op(s
);
3423 gen_helper_stgi(tcg_env
);
3424 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3427 case 0xdd: /* CLGI */
3428 if (!SVME(s
) || !PE(s
)) {
3431 if (!check_cpl0(s
)) {
3434 gen_update_cc_op(s
);
3435 gen_update_eip_cur(s
);
3436 gen_helper_clgi(tcg_env
);
3439 case 0xde: /* SKINIT */
3440 if ((!SVME(s
) && !(s
->cpuid_ext3_features
& CPUID_EXT3_SKINIT
))
3444 gen_svm_check_intercept(s
, SVM_EXIT_SKINIT
);
3445 /* If not intercepted, not implemented -- raise #UD. */
3448 case 0xdf: /* INVLPGA */
3449 if (!SVME(s
) || !PE(s
)) {
3452 if (!check_cpl0(s
)) {
3455 gen_svm_check_intercept(s
, SVM_EXIT_INVLPGA
);
3456 if (s
->aflag
== MO_64
) {
3457 tcg_gen_mov_tl(s
->A0
, cpu_regs
[R_EAX
]);
3459 tcg_gen_ext32u_tl(s
->A0
, cpu_regs
[R_EAX
]);
3461 gen_helper_flush_page(tcg_env
, s
->A0
);
3462 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3465 CASE_MODRM_MEM_OP(2): /* lgdt */
3466 if (!check_cpl0(s
)) {
3469 gen_svm_check_intercept(s
, SVM_EXIT_GDTR_WRITE
);
3470 gen_lea_modrm(env
, s
, modrm
);
3471 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
3472 gen_add_A0_im(s
, 2);
3473 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
3474 if (dflag
== MO_16
) {
3475 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
3477 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, gdt
.base
));
3478 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, gdt
.limit
));
3481 CASE_MODRM_MEM_OP(3): /* lidt */
3482 if (!check_cpl0(s
)) {
3485 gen_svm_check_intercept(s
, SVM_EXIT_IDTR_WRITE
);
3486 gen_lea_modrm(env
, s
, modrm
);
3487 gen_op_ld_v(s
, MO_16
, s
->T1
, s
->A0
);
3488 gen_add_A0_im(s
, 2);
3489 gen_op_ld_v(s
, CODE64(s
) + MO_32
, s
->T0
, s
->A0
);
3490 if (dflag
== MO_16
) {
3491 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xffffff);
3493 tcg_gen_st_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, idt
.base
));
3494 tcg_gen_st32_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, idt
.limit
));
3497 CASE_MODRM_OP(4): /* smsw */
3498 if (s
->flags
& HF_UMIP_MASK
&& !check_cpl0(s
)) {
3501 gen_svm_check_intercept(s
, SVM_EXIT_READ_CR0
);
3502 tcg_gen_ld_tl(s
->T0
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
3504 * In 32-bit mode, the higher 16 bits of the destination
3505 * register are undefined. In practice CR0[31:0] is stored
3506 * just like in 64-bit mode.
3508 mod
= (modrm
>> 6) & 3;
3509 ot
= (mod
!= 3 ? MO_16
: s
->dflag
);
3510 gen_st_modrm(env
, s
, modrm
, ot
);
3512 case 0xee: /* rdpkru */
3513 if (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
3514 | PREFIX_REPZ
| PREFIX_REPNZ
)) {
3517 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
3518 gen_helper_rdpkru(s
->tmp1_i64
, tcg_env
, s
->tmp2_i32
);
3519 tcg_gen_extr_i64_tl(cpu_regs
[R_EAX
], cpu_regs
[R_EDX
], s
->tmp1_i64
);
3521 case 0xef: /* wrpkru */
3522 if (s
->prefix
& (PREFIX_LOCK
| PREFIX_DATA
3523 | PREFIX_REPZ
| PREFIX_REPNZ
)) {
3526 tcg_gen_concat_tl_i64(s
->tmp1_i64
, cpu_regs
[R_EAX
],
3528 tcg_gen_trunc_tl_i32(s
->tmp2_i32
, cpu_regs
[R_ECX
]);
3529 gen_helper_wrpkru(tcg_env
, s
->tmp2_i32
, s
->tmp1_i64
);
3532 CASE_MODRM_OP(6): /* lmsw */
3533 if (!check_cpl0(s
)) {
3536 gen_svm_check_intercept(s
, SVM_EXIT_WRITE_CR0
);
3537 gen_ld_modrm(env
, s
, modrm
, MO_16
);
3539 * Only the 4 lower bits of CR0 are modified.
3540 * PE cannot be set to zero if already set to one.
3542 tcg_gen_ld_tl(s
->T1
, tcg_env
, offsetof(CPUX86State
, cr
[0]));
3543 tcg_gen_andi_tl(s
->T0
, s
->T0
, 0xf);
3544 tcg_gen_andi_tl(s
->T1
, s
->T1
, ~0xe);
3545 tcg_gen_or_tl(s
->T0
, s
->T0
, s
->T1
);
3546 gen_helper_write_crN(tcg_env
, tcg_constant_i32(0), s
->T0
);
3547 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3550 CASE_MODRM_MEM_OP(7): /* invlpg */
3551 if (!check_cpl0(s
)) {
3554 gen_svm_check_intercept(s
, SVM_EXIT_INVLPG
);
3555 gen_lea_modrm(env
, s
, modrm
);
3556 gen_helper_flush_page(tcg_env
, s
->A0
);
3557 s
->base
.is_jmp
= DISAS_EOB_NEXT
;
3560 case 0xf8: /* swapgs */
3561 #ifdef TARGET_X86_64
3563 if (check_cpl0(s
)) {
3564 tcg_gen_mov_tl(s
->T0
, cpu_seg_base
[R_GS
]);
3565 tcg_gen_ld_tl(cpu_seg_base
[R_GS
], tcg_env
,
3566 offsetof(CPUX86State
, kernelgsbase
));
3567 tcg_gen_st_tl(s
->T0
, tcg_env
,
3568 offsetof(CPUX86State
, kernelgsbase
));
3575 case 0xf9: /* rdtscp */
3576 if (!(s
->cpuid_ext2_features
& CPUID_EXT2_RDTSCP
)) {
3579 gen_update_cc_op(s
);
3580 gen_update_eip_cur(s
);
3581 translator_io_start(&s
->base
);
3582 gen_helper_rdtsc(tcg_env
);
3583 gen_helper_rdpid(s
->T0
, tcg_env
);
3584 gen_op_mov_reg_v(s
, dflag
, R_ECX
, s
->T0
);
3593 modrm
= x86_ldub_code(env
, s
);
3594 if (s
->flags
& HF_MPX_EN_MASK
) {
3595 mod
= (modrm
>> 6) & 3;
3596 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3597 if (prefixes
& PREFIX_REPZ
) {
3600 || (prefixes
& PREFIX_LOCK
)
3601 || s
->aflag
== MO_16
) {
3604 gen_bndck(env
, s
, modrm
, TCG_COND_LTU
, cpu_bndl
[reg
]);
3605 } else if (prefixes
& PREFIX_REPNZ
) {
3608 || (prefixes
& PREFIX_LOCK
)
3609 || s
->aflag
== MO_16
) {
3612 TCGv_i64 notu
= tcg_temp_new_i64();
3613 tcg_gen_not_i64(notu
, cpu_bndu
[reg
]);
3614 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, notu
);
3615 } else if (prefixes
& PREFIX_DATA
) {
3616 /* bndmov -- from reg/mem */
3617 if (reg
>= 4 || s
->aflag
== MO_16
) {
3621 int reg2
= (modrm
& 7) | REX_B(s
);
3622 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
3625 if (s
->flags
& HF_MPX_IU_MASK
) {
3626 tcg_gen_mov_i64(cpu_bndl
[reg
], cpu_bndl
[reg2
]);
3627 tcg_gen_mov_i64(cpu_bndu
[reg
], cpu_bndu
[reg2
]);
3630 gen_lea_modrm(env
, s
, modrm
);
3632 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
3633 s
->mem_index
, MO_LEUQ
);
3634 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
3635 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
3636 s
->mem_index
, MO_LEUQ
);
3638 tcg_gen_qemu_ld_i64(cpu_bndl
[reg
], s
->A0
,
3639 s
->mem_index
, MO_LEUL
);
3640 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
3641 tcg_gen_qemu_ld_i64(cpu_bndu
[reg
], s
->A0
,
3642 s
->mem_index
, MO_LEUL
);
3644 /* bnd registers are now in-use */
3645 gen_set_hflag(s
, HF_MPX_IU_MASK
);
3647 } else if (mod
!= 3) {
3649 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
3651 || (prefixes
& PREFIX_LOCK
)
3652 || s
->aflag
== MO_16
3657 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
3659 tcg_gen_movi_tl(s
->A0
, 0);
3661 gen_lea_v_seg(s
, s
->A0
, a
.def_seg
, s
->override
);
3663 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
3665 tcg_gen_movi_tl(s
->T0
, 0);
3668 gen_helper_bndldx64(cpu_bndl
[reg
], tcg_env
, s
->A0
, s
->T0
);
3669 tcg_gen_ld_i64(cpu_bndu
[reg
], tcg_env
,
3670 offsetof(CPUX86State
, mmx_t0
.MMX_Q(0)));
3672 gen_helper_bndldx32(cpu_bndu
[reg
], tcg_env
, s
->A0
, s
->T0
);
3673 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndu
[reg
]);
3674 tcg_gen_shri_i64(cpu_bndu
[reg
], cpu_bndu
[reg
], 32);
3676 gen_set_hflag(s
, HF_MPX_IU_MASK
);
3679 gen_nop_modrm(env
, s
, modrm
);
3682 modrm
= x86_ldub_code(env
, s
);
3683 if (s
->flags
& HF_MPX_EN_MASK
) {
3684 mod
= (modrm
>> 6) & 3;
3685 reg
= ((modrm
>> 3) & 7) | REX_R(s
);
3686 if (mod
!= 3 && (prefixes
& PREFIX_REPZ
)) {
3689 || (prefixes
& PREFIX_LOCK
)
3690 || s
->aflag
== MO_16
) {
3693 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
3695 tcg_gen_extu_tl_i64(cpu_bndl
[reg
], cpu_regs
[a
.base
]);
3697 tcg_gen_ext32u_i64(cpu_bndl
[reg
], cpu_bndl
[reg
]);
3699 } else if (a
.base
== -1) {
3700 /* no base register has lower bound of 0 */
3701 tcg_gen_movi_i64(cpu_bndl
[reg
], 0);
3703 /* rip-relative generates #ud */
3706 tcg_gen_not_tl(s
->A0
, gen_lea_modrm_1(s
, a
, false));
3708 tcg_gen_ext32u_tl(s
->A0
, s
->A0
);
3710 tcg_gen_extu_tl_i64(cpu_bndu
[reg
], s
->A0
);
3711 /* bnd registers are now in-use */
3712 gen_set_hflag(s
, HF_MPX_IU_MASK
);
3714 } else if (prefixes
& PREFIX_REPNZ
) {
3717 || (prefixes
& PREFIX_LOCK
)
3718 || s
->aflag
== MO_16
) {
3721 gen_bndck(env
, s
, modrm
, TCG_COND_GTU
, cpu_bndu
[reg
]);
3722 } else if (prefixes
& PREFIX_DATA
) {
3723 /* bndmov -- to reg/mem */
3724 if (reg
>= 4 || s
->aflag
== MO_16
) {
3728 int reg2
= (modrm
& 7) | REX_B(s
);
3729 if (reg2
>= 4 || (prefixes
& PREFIX_LOCK
)) {
3732 if (s
->flags
& HF_MPX_IU_MASK
) {
3733 tcg_gen_mov_i64(cpu_bndl
[reg2
], cpu_bndl
[reg
]);
3734 tcg_gen_mov_i64(cpu_bndu
[reg2
], cpu_bndu
[reg
]);
3737 gen_lea_modrm(env
, s
, modrm
);
3739 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
3740 s
->mem_index
, MO_LEUQ
);
3741 tcg_gen_addi_tl(s
->A0
, s
->A0
, 8);
3742 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
3743 s
->mem_index
, MO_LEUQ
);
3745 tcg_gen_qemu_st_i64(cpu_bndl
[reg
], s
->A0
,
3746 s
->mem_index
, MO_LEUL
);
3747 tcg_gen_addi_tl(s
->A0
, s
->A0
, 4);
3748 tcg_gen_qemu_st_i64(cpu_bndu
[reg
], s
->A0
,
3749 s
->mem_index
, MO_LEUL
);
3752 } else if (mod
!= 3) {
3754 AddressParts a
= gen_lea_modrm_0(env
, s
, modrm
, false);
3756 || (prefixes
& PREFIX_LOCK
)
3757 || s
->aflag
== MO_16
3762 tcg_gen_addi_tl(s
->A0
, cpu_regs
[a
.base
], a
.disp
);
3764 tcg_gen_movi_tl(s
->A0
, 0);
3766 gen_lea_v_seg(s
, s
->A0
, a
.def_seg
, s
->override
);
3768 tcg_gen_mov_tl(s
->T0
, cpu_regs
[a
.index
]);
3770 tcg_gen_movi_tl(s
->T0
, 0);
3773 gen_helper_bndstx64(tcg_env
, s
->A0
, s
->T0
,
3774 cpu_bndl
[reg
], cpu_bndu
[reg
]);
3776 gen_helper_bndstx32(tcg_env
, s
->A0
, s
->T0
,
3777 cpu_bndl
[reg
], cpu_bndu
[reg
]);
3781 gen_nop_modrm(env
, s
, modrm
);
3784 g_assert_not_reached();
3788 gen_illegal_opcode(s
);
3791 gen_unknown_opcode(env
, s
);
3794 #include "decode-new.h"
3795 #include "emit.c.inc"
3796 #include "decode-new.c.inc"
3798 void tcg_x86_init(void)
3800 static const char reg_names
[CPU_NB_REGS
][4] = {
3801 #ifdef TARGET_X86_64
3829 static const char eip_name
[] = {
3830 #ifdef TARGET_X86_64
3836 static const char seg_base_names
[6][8] = {
3844 static const char bnd_regl_names
[4][8] = {
3845 "bnd0_lb", "bnd1_lb", "bnd2_lb", "bnd3_lb"
3847 static const char bnd_regu_names
[4][8] = {
3848 "bnd0_ub", "bnd1_ub", "bnd2_ub", "bnd3_ub"
3852 cpu_cc_op
= tcg_global_mem_new_i32(tcg_env
,
3853 offsetof(CPUX86State
, cc_op
), "cc_op");
3854 cpu_cc_dst
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_dst
),
3856 cpu_cc_src
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src
),
3858 cpu_cc_src2
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, cc_src2
),
3860 cpu_eip
= tcg_global_mem_new(tcg_env
, offsetof(CPUX86State
, eip
), eip_name
);
3862 for (i
= 0; i
< CPU_NB_REGS
; ++i
) {
3863 cpu_regs
[i
] = tcg_global_mem_new(tcg_env
,
3864 offsetof(CPUX86State
, regs
[i
]),
3868 for (i
= 0; i
< 6; ++i
) {
3870 = tcg_global_mem_new(tcg_env
,
3871 offsetof(CPUX86State
, segs
[i
].base
),
3875 for (i
= 0; i
< 4; ++i
) {
3877 = tcg_global_mem_new_i64(tcg_env
,
3878 offsetof(CPUX86State
, bnd_regs
[i
].lb
),
3881 = tcg_global_mem_new_i64(tcg_env
,
3882 offsetof(CPUX86State
, bnd_regs
[i
].ub
),
3887 static void i386_tr_init_disas_context(DisasContextBase
*dcbase
, CPUState
*cpu
)
3889 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
3890 CPUX86State
*env
= cpu_env(cpu
);
3891 uint32_t flags
= dc
->base
.tb
->flags
;
3892 uint32_t cflags
= tb_cflags(dc
->base
.tb
);
3893 int cpl
= (flags
>> HF_CPL_SHIFT
) & 3;
3894 int iopl
= (flags
>> IOPL_SHIFT
) & 3;
3896 dc
->cs_base
= dc
->base
.tb
->cs_base
;
3897 dc
->pc_save
= dc
->base
.pc_next
;
3899 #ifndef CONFIG_USER_ONLY
3904 /* We make some simplifying assumptions; validate they're correct. */
3905 g_assert(PE(dc
) == ((flags
& HF_PE_MASK
) != 0));
3906 g_assert(CPL(dc
) == cpl
);
3907 g_assert(IOPL(dc
) == iopl
);
3908 g_assert(VM86(dc
) == ((flags
& HF_VM_MASK
) != 0));
3909 g_assert(CODE32(dc
) == ((flags
& HF_CS32_MASK
) != 0));
3910 g_assert(CODE64(dc
) == ((flags
& HF_CS64_MASK
) != 0));
3911 g_assert(SS32(dc
) == ((flags
& HF_SS32_MASK
) != 0));
3912 g_assert(LMA(dc
) == ((flags
& HF_LMA_MASK
) != 0));
3913 g_assert(ADDSEG(dc
) == ((flags
& HF_ADDSEG_MASK
) != 0));
3914 g_assert(SVME(dc
) == ((flags
& HF_SVME_MASK
) != 0));
3915 g_assert(GUEST(dc
) == ((flags
& HF_GUEST_MASK
) != 0));
3917 dc
->cc_op
= CC_OP_DYNAMIC
;
3918 dc
->cc_op_dirty
= false;
3919 /* select memory access functions */
3920 dc
->mem_index
= cpu_mmu_index(cpu
, false);
3921 dc
->cpuid_features
= env
->features
[FEAT_1_EDX
];
3922 dc
->cpuid_ext_features
= env
->features
[FEAT_1_ECX
];
3923 dc
->cpuid_ext2_features
= env
->features
[FEAT_8000_0001_EDX
];
3924 dc
->cpuid_ext3_features
= env
->features
[FEAT_8000_0001_ECX
];
3925 dc
->cpuid_7_0_ebx_features
= env
->features
[FEAT_7_0_EBX
];
3926 dc
->cpuid_7_0_ecx_features
= env
->features
[FEAT_7_0_ECX
];
3927 dc
->cpuid_7_1_eax_features
= env
->features
[FEAT_7_1_EAX
];
3928 dc
->cpuid_xsave_features
= env
->features
[FEAT_XSAVE
];
3929 dc
->jmp_opt
= !((cflags
& CF_NO_GOTO_TB
) ||
3930 (flags
& (HF_RF_MASK
| HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)));
3932 * If jmp_opt, we want to handle each string instruction individually.
3933 * For icount also disable repz optimization so that each iteration
3934 * is accounted separately.
3936 * FIXME: this is messy; it makes REP string instructions a lot less
3937 * efficient than they should be and it gets in the way of correct
3938 * handling of RF (interrupts or traps arriving after any iteration
3939 * of a repeated string instruction but the last should set RF to 1).
3940 * Perhaps it would be more efficient if REP string instructions were
3941 * always at the beginning of the TB, or even their own TB? That
3942 * would even allow accounting up to 64k iterations at once for icount.
3944 dc
->repz_opt
= !dc
->jmp_opt
&& !(cflags
& CF_USE_ICOUNT
);
3946 dc
->T0
= tcg_temp_new();
3947 dc
->T1
= tcg_temp_new();
3948 dc
->A0
= tcg_temp_new();
3950 dc
->tmp0
= tcg_temp_new();
3951 dc
->tmp1_i64
= tcg_temp_new_i64();
3952 dc
->tmp2_i32
= tcg_temp_new_i32();
3953 dc
->tmp3_i32
= tcg_temp_new_i32();
3954 dc
->tmp4
= tcg_temp_new();
3955 dc
->cc_srcT
= tcg_temp_new();
3958 static void i386_tr_tb_start(DisasContextBase
*db
, CPUState
*cpu
)
3962 static void i386_tr_insn_start(DisasContextBase
*dcbase
, CPUState
*cpu
)
3964 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
3965 target_ulong pc_arg
= dc
->base
.pc_next
;
3967 dc
->prev_insn_start
= dc
->base
.insn_start
;
3968 dc
->prev_insn_end
= tcg_last_op();
3969 if (tb_cflags(dcbase
->tb
) & CF_PCREL
) {
3970 pc_arg
&= ~TARGET_PAGE_MASK
;
3972 tcg_gen_insn_start(pc_arg
, dc
->cc_op
);
3975 static void i386_tr_translate_insn(DisasContextBase
*dcbase
, CPUState
*cpu
)
3977 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
3978 bool orig_cc_op_dirty
= dc
->cc_op_dirty
;
3979 CCOp orig_cc_op
= dc
->cc_op
;
3980 target_ulong orig_pc_save
= dc
->pc_save
;
3982 #ifdef TARGET_VSYSCALL_PAGE
3984 * Detect entry into the vsyscall page and invoke the syscall.
3986 if ((dc
->base
.pc_next
& TARGET_PAGE_MASK
) == TARGET_VSYSCALL_PAGE
) {
3987 gen_exception(dc
, EXCP_VSYSCALL
);
3988 dc
->base
.pc_next
= dc
->pc
+ 1;
3993 switch (sigsetjmp(dc
->jmpbuf
, 0)) {
3995 disas_insn(dc
, cpu
);
3998 gen_exception_gpf(dc
);
4001 /* Restore state that may affect the next instruction. */
4002 dc
->pc
= dc
->base
.pc_next
;
4004 * TODO: These save/restore can be removed after the table-based
4005 * decoder is complete; we will be decoding the insn completely
4006 * before any code generation that might affect these variables.
4008 dc
->cc_op_dirty
= orig_cc_op_dirty
;
4009 dc
->cc_op
= orig_cc_op
;
4010 dc
->pc_save
= orig_pc_save
;
4012 dc
->base
.num_insns
--;
4013 tcg_remove_ops_after(dc
->prev_insn_end
);
4014 dc
->base
.insn_start
= dc
->prev_insn_start
;
4015 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
4018 g_assert_not_reached();
4022 * Instruction decoding completed (possibly with #GP if the
4023 * 15-byte boundary was exceeded).
4025 dc
->base
.pc_next
= dc
->pc
;
4026 if (dc
->base
.is_jmp
== DISAS_NEXT
) {
4027 if (dc
->flags
& (HF_TF_MASK
| HF_INHIBIT_IRQ_MASK
)) {
4029 * If single step mode, we generate only one instruction and
4030 * generate an exception.
4031 * If irq were inhibited with HF_INHIBIT_IRQ_MASK, we clear
4032 * the flag and abort the translation to give the irqs a
4035 dc
->base
.is_jmp
= DISAS_EOB_NEXT
;
4036 } else if (!is_same_page(&dc
->base
, dc
->base
.pc_next
)) {
4037 dc
->base
.is_jmp
= DISAS_TOO_MANY
;
4042 static void i386_tr_tb_stop(DisasContextBase
*dcbase
, CPUState
*cpu
)
4044 DisasContext
*dc
= container_of(dcbase
, DisasContext
, base
);
4046 switch (dc
->base
.is_jmp
) {
4047 case DISAS_NORETURN
:
4049 * Most instructions should not use DISAS_NORETURN, as that suppresses
4050 * the handling of hflags normally done by gen_eob(). We can
4052 * - for exception and interrupts
4053 * - for jump optimization (which is disabled by INHIBIT_IRQ/RF/TF)
4054 * - for VMRUN because RF/TF handling for the host is done after vmexit,
4055 * and INHIBIT_IRQ is loaded from the VMCB
4056 * - for HLT/PAUSE/MWAIT to exit the main loop with specific EXCP_* values;
4057 * the helpers handle themselves the tasks normally done by gen_eob().
4060 case DISAS_TOO_MANY
:
4061 gen_update_cc_op(dc
);
4062 gen_jmp_rel_csize(dc
, 0, 0);
4064 case DISAS_EOB_NEXT
:
4065 case DISAS_EOB_INHIBIT_IRQ
:
4066 assert(dc
->base
.pc_next
== dc
->pc
);
4067 gen_update_eip_cur(dc
);
4069 case DISAS_EOB_ONLY
:
4070 case DISAS_EOB_RECHECK_TF
:
4072 gen_eob(dc
, dc
->base
.is_jmp
);
4075 g_assert_not_reached();
4079 static const TranslatorOps i386_tr_ops
= {
4080 .init_disas_context
= i386_tr_init_disas_context
,
4081 .tb_start
= i386_tr_tb_start
,
4082 .insn_start
= i386_tr_insn_start
,
4083 .translate_insn
= i386_tr_translate_insn
,
4084 .tb_stop
= i386_tr_tb_stop
,
4087 /* generate intermediate code for basic block 'tb'. */
4088 void gen_intermediate_code(CPUState
*cpu
, TranslationBlock
*tb
, int *max_insns
,
4089 vaddr pc
, void *host_pc
)
4093 translator_loop(cpu
, tb
, max_insns
, pc
, host_pc
, &i386_tr_ops
, &dc
.base
);