2 /*---------------------------------------------------------------*/
3 /*--- begin host_x86_defs.h ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2017 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 #ifndef __VEX_HOST_X86_DEFS_H
35 #define __VEX_HOST_X86_DEFS_H
37 #include "libvex_basictypes.h"
38 #include "libvex.h" // VexArch
39 #include "host_generic_regs.h" // HReg
41 /* --------- Registers. --------- */
43 /* The usual HReg abstraction. There are 8 real int regs,
44 6 real float regs, and 8 real vector regs.
47 #define ST_IN static inline
48 ST_IN HReg
hregX86_EBX ( void ) { return mkHReg(False
, HRcInt32
, 3, 0); }
49 ST_IN HReg
hregX86_ESI ( void ) { return mkHReg(False
, HRcInt32
, 6, 1); }
50 ST_IN HReg
hregX86_EDI ( void ) { return mkHReg(False
, HRcInt32
, 7, 2); }
51 ST_IN HReg
hregX86_EAX ( void ) { return mkHReg(False
, HRcInt32
, 0, 3); }
52 ST_IN HReg
hregX86_ECX ( void ) { return mkHReg(False
, HRcInt32
, 1, 4); }
53 ST_IN HReg
hregX86_EDX ( void ) { return mkHReg(False
, HRcInt32
, 2, 5); }
55 ST_IN HReg
hregX86_FAKE0 ( void ) { return mkHReg(False
, HRcFlt64
, 0, 6); }
56 ST_IN HReg
hregX86_FAKE1 ( void ) { return mkHReg(False
, HRcFlt64
, 1, 7); }
57 ST_IN HReg
hregX86_FAKE2 ( void ) { return mkHReg(False
, HRcFlt64
, 2, 8); }
58 ST_IN HReg
hregX86_FAKE3 ( void ) { return mkHReg(False
, HRcFlt64
, 3, 9); }
59 ST_IN HReg
hregX86_FAKE4 ( void ) { return mkHReg(False
, HRcFlt64
, 4, 10); }
60 ST_IN HReg
hregX86_FAKE5 ( void ) { return mkHReg(False
, HRcFlt64
, 5, 11); }
62 ST_IN HReg
hregX86_XMM0 ( void ) { return mkHReg(False
, HRcVec128
, 0, 12); }
63 ST_IN HReg
hregX86_XMM1 ( void ) { return mkHReg(False
, HRcVec128
, 1, 13); }
64 ST_IN HReg
hregX86_XMM2 ( void ) { return mkHReg(False
, HRcVec128
, 2, 14); }
65 ST_IN HReg
hregX86_XMM3 ( void ) { return mkHReg(False
, HRcVec128
, 3, 15); }
66 ST_IN HReg
hregX86_XMM4 ( void ) { return mkHReg(False
, HRcVec128
, 4, 16); }
67 ST_IN HReg
hregX86_XMM5 ( void ) { return mkHReg(False
, HRcVec128
, 5, 17); }
68 ST_IN HReg
hregX86_XMM6 ( void ) { return mkHReg(False
, HRcVec128
, 6, 18); }
69 ST_IN HReg
hregX86_XMM7 ( void ) { return mkHReg(False
, HRcVec128
, 7, 19); }
71 ST_IN HReg
hregX86_ESP ( void ) { return mkHReg(False
, HRcInt32
, 4, 20); }
72 ST_IN HReg
hregX86_EBP ( void ) { return mkHReg(False
, HRcInt32
, 5, 21); }
75 extern UInt
ppHRegX86 ( HReg
);
78 /* --------- Condition codes, Intel encoding. --------- */
82 Xcc_O
= 0, /* overflow */
83 Xcc_NO
= 1, /* no overflow */
85 Xcc_B
= 2, /* below */
86 Xcc_NB
= 3, /* not below */
89 Xcc_NZ
= 5, /* not zero */
91 Xcc_BE
= 6, /* below or equal */
92 Xcc_NBE
= 7, /* not below or equal */
94 Xcc_S
= 8, /* negative */
95 Xcc_NS
= 9, /* not negative */
97 Xcc_P
= 10, /* parity even */
98 Xcc_NP
= 11, /* not parity even */
100 Xcc_L
= 12, /* jump less */
101 Xcc_NL
= 13, /* not less */
103 Xcc_LE
= 14, /* less or equal */
104 Xcc_NLE
= 15, /* not less or equal */
106 Xcc_ALWAYS
= 16 /* the usual hack */
110 extern const HChar
* showX86CondCode ( X86CondCode
);
113 /* --------- Memory address expressions (amodes). --------- */
117 Xam_IR
, /* Immediate + Reg */
118 Xam_IRRS
/* Immediate + Reg1 + (Reg2 << Shift) */
134 Int shift
; /* 0, 1, 2 or 3 only */
140 extern X86AMode
* X86AMode_IR ( UInt
, HReg
);
141 extern X86AMode
* X86AMode_IRRS ( UInt
, HReg
, HReg
, Int
);
143 extern X86AMode
* dopyX86AMode ( X86AMode
* );
145 extern void ppX86AMode ( X86AMode
* );
148 /* --------- Operand, which can be reg, immediate or memory. --------- */
176 extern X86RMI
* X86RMI_Imm ( UInt
);
177 extern X86RMI
* X86RMI_Reg ( HReg
);
178 extern X86RMI
* X86RMI_Mem ( X86AMode
* );
180 extern void ppX86RMI ( X86RMI
* );
183 /* --------- Operand, which can be reg or immediate only. --------- */
207 extern X86RI
* X86RI_Imm ( UInt
);
208 extern X86RI
* X86RI_Reg ( HReg
);
210 extern void ppX86RI ( X86RI
* );
213 /* --------- Operand, which can be reg or memory only. --------- */
237 extern X86RM
* X86RM_Reg ( HReg
);
238 extern X86RM
* X86RM_Mem ( X86AMode
* );
240 extern void ppX86RM ( X86RM
* );
243 /* --------- Instructions. --------- */
253 extern const HChar
* showX86UnaryOp ( X86UnaryOp
);
262 Xalu_ADD
, Xalu_SUB
, Xalu_ADC
, Xalu_SBB
,
263 Xalu_AND
, Xalu_OR
, Xalu_XOR
,
268 extern const HChar
* showX86AluOp ( X86AluOp
);
275 Xsh_SHL
, Xsh_SHR
, Xsh_SAR
279 extern const HChar
* showX86ShiftOp ( X86ShiftOp
);
287 Xfp_ADD
, Xfp_SUB
, Xfp_MUL
, Xfp_DIV
,
288 Xfp_SCALE
, Xfp_ATAN
, Xfp_YL2X
, Xfp_YL2XP1
, Xfp_PREM
, Xfp_PREM1
,
290 Xfp_SQRT
, Xfp_ABS
, Xfp_NEG
, Xfp_MOV
, Xfp_SIN
, Xfp_COS
, Xfp_TAN
,
295 extern const HChar
* showX86FpOp ( X86FpOp
);
304 /* Floating point binary */
305 Xsse_ADDF
, Xsse_SUBF
, Xsse_MULF
, Xsse_DIVF
,
306 Xsse_MAXF
, Xsse_MINF
,
307 Xsse_CMPEQF
, Xsse_CMPLTF
, Xsse_CMPLEF
, Xsse_CMPUNF
,
308 /* Floating point unary */
309 Xsse_RCPF
, Xsse_RSQRTF
, Xsse_SQRTF
,
311 Xsse_AND
, Xsse_OR
, Xsse_XOR
, Xsse_ANDN
,
313 Xsse_ADD8
, Xsse_ADD16
, Xsse_ADD32
, Xsse_ADD64
,
314 Xsse_QADD8U
, Xsse_QADD16U
,
315 Xsse_QADD8S
, Xsse_QADD16S
,
316 Xsse_SUB8
, Xsse_SUB16
, Xsse_SUB32
, Xsse_SUB64
,
317 Xsse_QSUB8U
, Xsse_QSUB16U
,
318 Xsse_QSUB8S
, Xsse_QSUB16S
,
322 Xsse_AVG8U
, Xsse_AVG16U
,
327 Xsse_CMPEQ8
, Xsse_CMPEQ16
, Xsse_CMPEQ32
,
328 Xsse_CMPGT8S
, Xsse_CMPGT16S
, Xsse_CMPGT32S
,
329 Xsse_SHL16
, Xsse_SHL32
, Xsse_SHL64
,
330 Xsse_SHR16
, Xsse_SHR32
, Xsse_SHR64
,
331 Xsse_SAR16
, Xsse_SAR32
,
332 Xsse_PACKSSD
, Xsse_PACKSSW
, Xsse_PACKUSW
,
333 Xsse_UNPCKHB
, Xsse_UNPCKHW
, Xsse_UNPCKHD
, Xsse_UNPCKHQ
,
334 Xsse_UNPCKLB
, Xsse_UNPCKLW
, Xsse_UNPCKLD
, Xsse_UNPCKLQ
338 extern const HChar
* showX86SseOp ( X86SseOp
);
344 Xin_Alu32R
, /* 32-bit mov/arith/logical, dst=REG */
345 Xin_Alu32M
, /* 32-bit mov/arith/logical, dst=MEM */
346 Xin_Sh32
, /* 32-bit shift/rotate, dst=REG */
347 Xin_Test32
, /* 32-bit test of REG or MEM against imm32 (AND, set
348 flags, discard result) */
349 Xin_Unary32
, /* 32-bit not and neg */
350 Xin_Lea32
, /* 32-bit compute EA into a reg */
351 Xin_MulL
, /* 32 x 32 -> 64 multiply */
352 Xin_Div
, /* 64/32 -> (32,32) div and mod */
353 Xin_Sh3232
, /* shldl or shrdl */
354 Xin_Push
, /* push (32-bit?) value on stack */
355 Xin_Call
, /* call to address in register */
356 Xin_XDirect
, /* direct transfer to GA */
357 Xin_XIndir
, /* indirect transfer to GA */
358 Xin_XAssisted
, /* assisted transfer to GA */
359 Xin_CMov32
, /* conditional move */
360 Xin_LoadEX
, /* mov{s,z}{b,w}l from mem to reg */
361 Xin_Store
, /* store 16/8 bit value in memory */
362 Xin_Set32
, /* convert condition code to 32-bit value */
363 Xin_Bsfr32
, /* 32-bit bsf/bsr */
364 Xin_MFence
, /* mem fence (not just sse2, but sse0 and 1/mmxext too) */
365 Xin_ACAS
, /* 8/16/32-bit lock;cmpxchg */
366 Xin_DACAS
, /* lock;cmpxchg8b (doubleword ACAS, 2 x 32-bit only) */
368 Xin_FpUnary
, /* FP fake unary op */
369 Xin_FpBinary
, /* FP fake binary op */
370 Xin_FpLdSt
, /* FP fake load/store */
371 Xin_FpLdStI
, /* FP fake load/store, converting to/from Int */
372 Xin_Fp64to32
, /* FP round IEEE754 double to IEEE754 single */
373 Xin_FpCMov
, /* FP fake floating point conditional move */
374 Xin_FpLdCW
, /* fldcw */
375 Xin_FpStSW_AX
, /* fstsw %ax */
376 Xin_FpCmp
, /* FP compare, generating a C320 value into int reg */
378 Xin_SseConst
, /* Generate restricted SSE literal */
379 Xin_SseLdSt
, /* SSE load/store, no alignment constraints */
380 Xin_SseLdzLO
, /* SSE load low 32/64 bits, zero remainder of reg */
381 Xin_Sse32Fx4
, /* SSE binary, 32Fx4 */
382 Xin_Sse32FLo
, /* SSE binary, 32F in lowest lane only */
383 Xin_Sse64Fx2
, /* SSE binary, 64Fx2 */
384 Xin_Sse64FLo
, /* SSE binary, 64F in lowest lane only */
385 Xin_SseReRg
, /* SSE binary general reg-reg, Re, Rg */
386 Xin_SseCMov
, /* SSE conditional move */
387 Xin_SseShuf
, /* SSE2 shuffle (pshufd) */
388 Xin_EvCheck
, /* Event check */
389 Xin_ProfInc
/* 64-bit profile counter increment */
393 /* Destinations are on the RIGHT (second operand) */
411 UInt src
; /* shift amount, or 0 means %cl */
416 X86RM
* dst
; /* not written, only read */
423 /* 32-bit compute EA into a reg */
428 /* EDX:EAX = EAX *s/u r/m32 */
433 /* x86 div/idiv instruction. Modifies EDX and EAX and reads src. */
438 /* shld/shrd. op may only be Xsh_SHL or Xsh_SHR */
441 UInt amt
; /* shift amount, or 0 means %cl */
448 /* Pseudo-insn. Call target (an absolute address), on given
449 condition (which could be Xcc_ALWAYS). */
453 Int regparms
; /* 0 .. 3 */
454 RetLoc rloc
; /* where the return value will be */
456 /* Update the guest EIP value, then exit requesting to chain
457 to it. May be conditional. Urr, use of Addr32 implicitly
458 assumes that wordsize(guest) == wordsize(host). */
460 Addr32 dstGA
; /* next guest address */
461 X86AMode
* amEIP
; /* amode in guest state for EIP */
462 X86CondCode cond
; /* can be Xcc_ALWAYS */
463 Bool toFastEP
; /* chain to the slow or fast point? */
465 /* Boring transfer to a guest address not known at JIT time.
466 Not chainable. May be conditional. */
470 X86CondCode cond
; /* can be Xcc_ALWAYS */
472 /* Assisted transfer to a guest address, most general case.
473 Not chainable. May be conditional. */
477 X86CondCode cond
; /* can be Xcc_ALWAYS */
480 /* Mov src to dst on the given condition, which may not
481 be the bogus Xcc_ALWAYS. */
487 /* Sign/Zero extending loads. Dst size is always 32 bits. */
494 /* 16/8 bit stores, which are troublesome (particularly
497 UChar sz
; /* only 1 or 2 */
501 /* Convert a x86 condition code to a 32-bit value (0 or 1). */
506 /* 32-bit bsf or bsr. */
512 /* Mem fence (not just sse2, but sse0 and sse1/mmxext too).
513 In short, an insn which flushes all preceding loads and
514 stores as much as possible before continuing. On SSE2
515 we emit a real "mfence", on SSE1 or the MMXEXT subset
516 "sfence ; lock addl $0,0(%esp)" and on SSE0
517 "lock addl $0,0(%esp)". This insn therefore carries the
518 host's hwcaps so the assembler knows what to emit. */
522 /* "lock;cmpxchg": mem address in .addr,
523 expected value in %eax, new value in %ebx */
526 UChar sz
; /* 1, 2 or 4 */
528 /* "lock;cmpxchg8b": mem address in .addr, expected value in
529 %edx:%eax, new value in %ecx:%ebx */
534 /* X86 Floating point (fake 3-operand, "flat reg file" insns) */
548 UChar sz
; /* only 4 (IEEE single) or 8 (IEEE double) */
552 /* Move 64-bit float to/from memory, converting to/from
553 signed int on the way. Note the conversions will observe
554 the host FPU rounding mode currently in force. */
557 UChar sz
; /* only 2, 4 or 8 */
561 /* By observing the current FPU rounding mode, round (etc)
562 src into dst given that dst should be interpreted as an
563 IEEE754 32-bit (float) type. */
568 /* Mov src to dst on the given condition, which may not
569 be the bogus Xcc_ALWAYS. */
575 /* Load the FPU's 16-bit control word (fldcw) */
585 /* Do a compare, generating the C320 bits into the dst. */
592 /* Simplistic SSE[123] */
603 UChar sz
; /* 4 or 8 only */
632 /* Mov src to dst on the given condition, which may not
633 be the bogus Xcc_ALWAYS. */
640 Int order
; /* 0 <= order <= 0xFF */
646 X86AMode
* amFailAddr
;
649 /* No fields. The address of the counter to inc is
650 installed later, post-translation, by patching it in,
651 as it is not known at translation time. */
658 extern X86Instr
* X86Instr_Alu32R ( X86AluOp
, X86RMI
*, HReg
);
659 extern X86Instr
* X86Instr_Alu32M ( X86AluOp
, X86RI
*, X86AMode
* );
660 extern X86Instr
* X86Instr_Unary32 ( X86UnaryOp op
, HReg dst
);
661 extern X86Instr
* X86Instr_Lea32 ( X86AMode
* am
, HReg dst
);
663 extern X86Instr
* X86Instr_Sh32 ( X86ShiftOp
, UInt
, HReg
);
664 extern X86Instr
* X86Instr_Test32 ( UInt imm32
, X86RM
* dst
);
665 extern X86Instr
* X86Instr_MulL ( Bool syned
, X86RM
* );
666 extern X86Instr
* X86Instr_Div ( Bool syned
, X86RM
* );
667 extern X86Instr
* X86Instr_Sh3232 ( X86ShiftOp
, UInt amt
, HReg src
, HReg dst
);
668 extern X86Instr
* X86Instr_Push ( X86RMI
* );
669 extern X86Instr
* X86Instr_Call ( X86CondCode
, Addr32
, Int
, RetLoc
);
670 extern X86Instr
* X86Instr_XDirect ( Addr32 dstGA
, X86AMode
* amEIP
,
671 X86CondCode cond
, Bool toFastEP
);
672 extern X86Instr
* X86Instr_XIndir ( HReg dstGA
, X86AMode
* amEIP
,
674 extern X86Instr
* X86Instr_XAssisted ( HReg dstGA
, X86AMode
* amEIP
,
675 X86CondCode cond
, IRJumpKind jk
);
676 extern X86Instr
* X86Instr_CMov32 ( X86CondCode
, X86RM
* src
, HReg dst
);
677 extern X86Instr
* X86Instr_LoadEX ( UChar szSmall
, Bool syned
,
678 X86AMode
* src
, HReg dst
);
679 extern X86Instr
* X86Instr_Store ( UChar sz
, HReg src
, X86AMode
* dst
);
680 extern X86Instr
* X86Instr_Set32 ( X86CondCode cond
, HReg dst
);
681 extern X86Instr
* X86Instr_Bsfr32 ( Bool isFwds
, HReg src
, HReg dst
);
682 extern X86Instr
* X86Instr_MFence ( UInt hwcaps
);
683 extern X86Instr
* X86Instr_ACAS ( X86AMode
* addr
, UChar sz
);
684 extern X86Instr
* X86Instr_DACAS ( X86AMode
* addr
);
686 extern X86Instr
* X86Instr_FpUnary ( X86FpOp op
, HReg src
, HReg dst
);
687 extern X86Instr
* X86Instr_FpBinary ( X86FpOp op
, HReg srcL
, HReg srcR
, HReg dst
);
688 extern X86Instr
* X86Instr_FpLdSt ( Bool isLoad
, UChar sz
, HReg reg
, X86AMode
* );
689 extern X86Instr
* X86Instr_FpLdStI ( Bool isLoad
, UChar sz
, HReg reg
, X86AMode
* );
690 extern X86Instr
* X86Instr_Fp64to32 ( HReg src
, HReg dst
);
691 extern X86Instr
* X86Instr_FpCMov ( X86CondCode
, HReg src
, HReg dst
);
692 extern X86Instr
* X86Instr_FpLdCW ( X86AMode
* );
693 extern X86Instr
* X86Instr_FpStSW_AX ( void );
694 extern X86Instr
* X86Instr_FpCmp ( HReg srcL
, HReg srcR
, HReg dst
);
696 extern X86Instr
* X86Instr_SseConst ( UShort con
, HReg dst
);
697 extern X86Instr
* X86Instr_SseLdSt ( Bool isLoad
, HReg
, X86AMode
* );
698 extern X86Instr
* X86Instr_SseLdzLO ( Int sz
, HReg
, X86AMode
* );
699 extern X86Instr
* X86Instr_Sse32Fx4 ( X86SseOp
, HReg
, HReg
);
700 extern X86Instr
* X86Instr_Sse32FLo ( X86SseOp
, HReg
, HReg
);
701 extern X86Instr
* X86Instr_Sse64Fx2 ( X86SseOp
, HReg
, HReg
);
702 extern X86Instr
* X86Instr_Sse64FLo ( X86SseOp
, HReg
, HReg
);
703 extern X86Instr
* X86Instr_SseReRg ( X86SseOp
, HReg
, HReg
);
704 extern X86Instr
* X86Instr_SseCMov ( X86CondCode
, HReg src
, HReg dst
);
705 extern X86Instr
* X86Instr_SseShuf ( Int order
, HReg src
, HReg dst
);
706 extern X86Instr
* X86Instr_EvCheck ( X86AMode
* amCounter
,
707 X86AMode
* amFailAddr
);
708 extern X86Instr
* X86Instr_ProfInc ( void );
711 extern void ppX86Instr ( const X86Instr
*, Bool
);
713 /* Some functions that insulate the register allocator from details
714 of the underlying instruction set. */
715 extern void getRegUsage_X86Instr ( HRegUsage
*, const X86Instr
*, Bool
);
716 extern void mapRegs_X86Instr ( HRegRemap
*, X86Instr
*, Bool
);
717 extern Int
emit_X86Instr ( /*MB_MOD*/Bool
* is_profInc
,
718 UChar
* buf
, Int nbuf
, const X86Instr
* i
,
720 VexEndness endness_host
,
721 const void* disp_cp_chain_me_to_slowEP
,
722 const void* disp_cp_chain_me_to_fastEP
,
723 const void* disp_cp_xindir
,
724 const void* disp_cp_xassisted
);
726 extern void genSpill_X86 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
727 HReg rreg
, Int offset
, Bool
);
728 extern void genReload_X86 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
729 HReg rreg
, Int offset
, Bool
);
730 extern X86Instr
* genMove_X86(HReg from
, HReg to
, Bool
);
731 extern X86Instr
* directReload_X86 ( X86Instr
* i
, HReg vreg
, Short spill_off
);
733 extern const RRegUniverse
* getRRegUniverse_X86 ( void );
735 extern HInstrArray
* iselSB_X86 ( const IRSB
*,
739 Int offs_Host_EvC_Counter
,
740 Int offs_Host_EvC_FailAddr
,
741 Bool chainingAllowed
,
745 /* How big is an event check? This is kind of a kludge because it
746 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
747 and so assumes that they are both <= 128, and so can use the short
748 offset encoding. This is all checked with assertions, so in the
749 worst case we will merely assert at startup. */
750 extern Int
evCheckSzB_X86 (void);
752 /* Perform a chaining and unchaining of an XDirect jump. */
753 extern VexInvalRange
chainXDirect_X86 ( VexEndness endness_host
,
754 void* place_to_chain
,
755 const void* disp_cp_chain_me_EXPECTED
,
756 const void* place_to_jump_to
);
758 extern VexInvalRange
unchainXDirect_X86 ( VexEndness endness_host
,
759 void* place_to_unchain
,
760 const void* place_to_jump_to_EXPECTED
,
761 const void* disp_cp_chain_me
);
763 /* Patch the counter location into an existing ProfInc point. */
764 extern VexInvalRange
patchProfInc_X86 ( VexEndness endness_host
,
765 void* place_to_patch
,
766 const ULong
* location_of_counter
);
769 #endif /* ndef __VEX_HOST_X86_DEFS_H */
771 /*---------------------------------------------------------------*/
772 /*--- end host_x86_defs.h ---*/
773 /*---------------------------------------------------------------*/