2 /*---------------------------------------------------------------*/
3 /*--- begin host_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2004-2017 OpenWorks LLP
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 #ifndef __VEX_HOST_AMD64_DEFS_H
35 #define __VEX_HOST_AMD64_DEFS_H
37 #include "libvex_basictypes.h"
38 #include "libvex.h" // VexArch
39 #include "host_generic_regs.h" // HReg
41 /* --------- Registers. --------- */
43 /* The usual HReg abstraction. There are 16 real int regs, 6 real
44 float regs, and 16 real vector regs.
47 #define ST_IN static inline
48 ST_IN HReg
hregAMD64_R12 ( void ) { return mkHReg(False
, HRcInt64
, 12, 0); }
49 ST_IN HReg
hregAMD64_R13 ( void ) { return mkHReg(False
, HRcInt64
, 13, 1); }
50 ST_IN HReg
hregAMD64_R14 ( void ) { return mkHReg(False
, HRcInt64
, 14, 2); }
51 ST_IN HReg
hregAMD64_R15 ( void ) { return mkHReg(False
, HRcInt64
, 15, 3); }
52 ST_IN HReg
hregAMD64_RBX ( void ) { return mkHReg(False
, HRcInt64
, 3, 4); }
53 ST_IN HReg
hregAMD64_RSI ( void ) { return mkHReg(False
, HRcInt64
, 6, 5); }
54 ST_IN HReg
hregAMD64_RDI ( void ) { return mkHReg(False
, HRcInt64
, 7, 6); }
55 ST_IN HReg
hregAMD64_R8 ( void ) { return mkHReg(False
, HRcInt64
, 8, 7); }
56 ST_IN HReg
hregAMD64_R9 ( void ) { return mkHReg(False
, HRcInt64
, 9, 8); }
57 ST_IN HReg
hregAMD64_R10 ( void ) { return mkHReg(False
, HRcInt64
, 10, 9); }
59 ST_IN HReg
hregAMD64_XMM3 ( void ) { return mkHReg(False
, HRcVec128
, 3, 10); }
60 ST_IN HReg
hregAMD64_XMM4 ( void ) { return mkHReg(False
, HRcVec128
, 4, 11); }
61 ST_IN HReg
hregAMD64_XMM5 ( void ) { return mkHReg(False
, HRcVec128
, 5, 12); }
62 ST_IN HReg
hregAMD64_XMM6 ( void ) { return mkHReg(False
, HRcVec128
, 6, 13); }
63 ST_IN HReg
hregAMD64_XMM7 ( void ) { return mkHReg(False
, HRcVec128
, 7, 14); }
64 ST_IN HReg
hregAMD64_XMM8 ( void ) { return mkHReg(False
, HRcVec128
, 8, 15); }
65 ST_IN HReg
hregAMD64_XMM9 ( void ) { return mkHReg(False
, HRcVec128
, 9, 16); }
66 ST_IN HReg
hregAMD64_XMM10 ( void ) { return mkHReg(False
, HRcVec128
, 10, 17); }
67 ST_IN HReg
hregAMD64_XMM11 ( void ) { return mkHReg(False
, HRcVec128
, 11, 18); }
68 ST_IN HReg
hregAMD64_XMM12 ( void ) { return mkHReg(False
, HRcVec128
, 12, 19); }
70 ST_IN HReg
hregAMD64_RAX ( void ) { return mkHReg(False
, HRcInt64
, 0, 20); }
71 ST_IN HReg
hregAMD64_RCX ( void ) { return mkHReg(False
, HRcInt64
, 1, 21); }
72 ST_IN HReg
hregAMD64_RDX ( void ) { return mkHReg(False
, HRcInt64
, 2, 22); }
73 ST_IN HReg
hregAMD64_RSP ( void ) { return mkHReg(False
, HRcInt64
, 4, 23); }
74 ST_IN HReg
hregAMD64_RBP ( void ) { return mkHReg(False
, HRcInt64
, 5, 24); }
75 ST_IN HReg
hregAMD64_R11 ( void ) { return mkHReg(False
, HRcInt64
, 11, 25); }
77 ST_IN HReg
hregAMD64_XMM0 ( void ) { return mkHReg(False
, HRcVec128
, 0, 26); }
78 ST_IN HReg
hregAMD64_XMM1 ( void ) { return mkHReg(False
, HRcVec128
, 1, 27); }
81 extern UInt
ppHRegAMD64 ( HReg
);
84 /* --------- Condition codes, AMD encoding. --------- */
88 Acc_O
= 0, /* overflow */
89 Acc_NO
= 1, /* no overflow */
91 Acc_B
= 2, /* below */
92 Acc_NB
= 3, /* not below */
95 Acc_NZ
= 5, /* not zero */
97 Acc_BE
= 6, /* below or equal */
98 Acc_NBE
= 7, /* not below or equal */
100 Acc_S
= 8, /* negative */
101 Acc_NS
= 9, /* not negative */
103 Acc_P
= 10, /* parity even */
104 Acc_NP
= 11, /* not parity even */
106 Acc_L
= 12, /* jump less */
107 Acc_NL
= 13, /* not less */
109 Acc_LE
= 14, /* less or equal */
110 Acc_NLE
= 15, /* not less or equal */
112 Acc_ALWAYS
= 16 /* the usual hack */
116 extern const HChar
* showAMD64CondCode ( AMD64CondCode
);
119 /* --------- Memory address expressions (amodes). --------- */
123 Aam_IR
, /* Immediate + Reg */
124 Aam_IRRS
/* Immediate + Reg1 + (Reg2 << Shift) */
140 Int shift
; /* 0, 1, 2 or 3 only */
146 extern AMD64AMode
* AMD64AMode_IR ( UInt
, HReg
);
147 extern AMD64AMode
* AMD64AMode_IRRS ( UInt
, HReg
, HReg
, Int
);
149 extern AMD64AMode
* dopyAMD64AMode ( AMD64AMode
* );
151 extern void ppAMD64AMode ( AMD64AMode
* );
154 /* --------- Operand, which can be reg, immediate or memory. --------- */
182 extern AMD64RMI
* AMD64RMI_Imm ( UInt
);
183 extern AMD64RMI
* AMD64RMI_Reg ( HReg
);
184 extern AMD64RMI
* AMD64RMI_Mem ( AMD64AMode
* );
186 extern void ppAMD64RMI ( AMD64RMI
* );
187 extern void ppAMD64RMI_lo32 ( AMD64RMI
* );
190 /* --------- Operand, which can be reg or immediate only. --------- */
214 extern AMD64RI
* AMD64RI_Imm ( UInt
);
215 extern AMD64RI
* AMD64RI_Reg ( HReg
);
217 extern void ppAMD64RI ( AMD64RI
* );
220 /* --------- Operand, which can be reg or memory only. --------- */
244 extern AMD64RM
* AMD64RM_Reg ( HReg
);
245 extern AMD64RM
* AMD64RM_Mem ( AMD64AMode
* );
247 extern void ppAMD64RM ( AMD64RM
* );
250 /* --------- Instructions. --------- */
260 extern const HChar
* showAMD64UnaryOp ( AMD64UnaryOp
);
269 Aalu_ADD
, Aalu_SUB
, Aalu_ADC
, Aalu_SBB
,
270 Aalu_AND
, Aalu_OR
, Aalu_XOR
,
275 extern const HChar
* showAMD64AluOp ( AMD64AluOp
);
282 Ash_SHL
, Ash_SHR
, Ash_SAR
286 extern const HChar
* showAMD64ShiftOp ( AMD64ShiftOp
);
294 Afp_SCALE
, Afp_ATAN
, Afp_YL2X
, Afp_YL2XP1
, Afp_PREM
, Afp_PREM1
,
297 Afp_SIN
, Afp_COS
, Afp_TAN
,
302 extern const HChar
* showA87FpOp ( A87FpOp
);
311 /* Floating point binary */
312 Asse_ADDF
, Asse_SUBF
, Asse_MULF
, Asse_DIVF
,
313 Asse_MAXF
, Asse_MINF
,
314 Asse_CMPEQF
, Asse_CMPLTF
, Asse_CMPLEF
, Asse_CMPUNF
,
315 /* Floating point unary */
316 Asse_RCPF
, Asse_RSQRTF
, Asse_SQRTF
,
317 /* Floating point conversion */
318 Asse_I2F
, // i32-signed to float conversion, aka cvtdq2ps in vec form
319 Asse_F2I
, // float to i32-signed conversion, aka cvtps2dq in vec form
321 Asse_AND
, Asse_OR
, Asse_XOR
, Asse_ANDN
,
322 Asse_ADD8
, Asse_ADD16
, Asse_ADD32
, Asse_ADD64
,
323 Asse_QADD8U
, Asse_QADD16U
,
324 Asse_QADD8S
, Asse_QADD16S
,
325 Asse_SUB8
, Asse_SUB16
, Asse_SUB32
, Asse_SUB64
,
326 Asse_QSUB8U
, Asse_QSUB16U
,
327 Asse_QSUB8S
, Asse_QSUB16S
,
331 Asse_AVG8U
, Asse_AVG16U
,
336 Asse_CMPEQ8
, Asse_CMPEQ16
, Asse_CMPEQ32
,
337 Asse_CMPGT8S
, Asse_CMPGT16S
, Asse_CMPGT32S
,
338 Asse_SHL16
, Asse_SHL32
, Asse_SHL64
, Asse_SHL128
,
339 Asse_SHR16
, Asse_SHR32
, Asse_SHR64
, Asse_SHR128
,
340 Asse_SAR16
, Asse_SAR32
,
341 Asse_PACKSSD
, Asse_PACKSSW
, Asse_PACKUSW
,
342 Asse_UNPCKHB
, Asse_UNPCKHW
, Asse_UNPCKHD
, Asse_UNPCKHQ
,
343 Asse_UNPCKLB
, Asse_UNPCKLW
, Asse_UNPCKLD
, Asse_UNPCKLQ
,
344 // Only for SSSE3 capable hosts:
347 // Only for F16C capable hosts:
348 Asse_F32toF16
, // F32 to F16 conversion, aka vcvtps2ph
349 Asse_F16toF32
, // F16 to F32 conversion, aka vcvtph2ps
350 // Only for FMA (FMA3) capable hosts:
351 Asse_VFMADD213
, // Fused Multiply-Add, aka vfmadd213ss
355 extern const HChar
* showAMD64SseOp ( AMD64SseOp
);
361 Ain_Imm64
, /* Generate 64-bit literal to register */
362 Ain_Alu64R
, /* 64-bit mov/arith/logical, dst=REG */
363 Ain_Alu64M
, /* 64-bit mov/arith/logical, dst=MEM */
364 Ain_Sh64
, /* 64-bit shift, dst=REG */
365 Ain_Sh32
, /* 32-bit shift, dst=REG */
366 Ain_Test64
, /* 64-bit test (AND, set flags, discard result) */
367 Ain_Unary64
, /* 64-bit not and neg */
368 Ain_Lea64
, /* 64-bit compute EA into a reg */
369 Ain_Alu32R
, /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
370 Ain_MulL
, /* widening multiply */
371 Ain_Div
, /* div and mod */
372 Ain_Push
, /* push 64-bit value on stack */
373 Ain_Call
, /* call to address in register */
374 Ain_XDirect
, /* direct transfer to GA */
375 Ain_XIndir
, /* indirect transfer to GA */
376 Ain_XAssisted
, /* assisted transfer to GA */
377 Ain_CMov64
, /* conditional move, 64-bit reg-reg only */
378 Ain_CLoad
, /* cond. load to int reg, 32 bit ZX or 64 bit only */
379 Ain_CStore
, /* cond. store from int reg, 32 or 64 bit only */
380 Ain_MovxLQ
, /* reg-reg move, zx-ing/sx-ing top half */
381 Ain_LoadEX
, /* mov{s,z}{b,w,l}q from mem to reg */
382 Ain_Store
, /* store 32/16/8 bit value in memory */
383 Ain_Set64
, /* convert condition code to 64-bit value */
384 Ain_Bsfr64
, /* 64-bit bsf/bsr */
385 Ain_MFence
, /* mem fence */
386 Ain_ACAS
, /* 8/16/32/64-bit lock;cmpxchg */
387 Ain_DACAS
, /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
388 32-bit or 2 x 64-bit only) */
389 Ain_A87Free
, /* free up x87 registers */
390 Ain_A87PushPop
, /* x87 loads/stores */
391 Ain_A87FpOp
, /* x87 operations */
392 Ain_A87LdCW
, /* load x87 control word */
393 Ain_A87StSW
, /* store x87 status word */
394 Ain_LdMXCSR
, /* load %mxcsr */
395 Ain_SseUComIS
, /* ucomisd/ucomiss, then get %rflags into int
397 Ain_SseSI2SF
, /* scalar 32/64 int to 32/64 float conversion */
398 Ain_SseSF2SI
, /* scalar 32/64 float to 32/64 int conversion */
399 Ain_SseSDSS
, /* scalar float32 to/from float64 */
400 Ain_SseLdSt
, /* SSE load/store 32/64/128 bits, no alignment
401 constraints, upper 96/64/0 bits arbitrary */
402 Ain_SseCStore
, /* SSE conditional store, 128 bit only, any alignment */
403 Ain_SseCLoad
, /* SSE conditional load, 128 bit only, any alignment */
404 Ain_SseLdzLO
, /* SSE load low 32/64 bits, zero remainder of reg */
405 Ain_Sse32Fx4
, /* SSE binary, 32Fx4 */
406 Ain_Sse32FLo
, /* SSE binary, 32F in lowest lane only */
407 Ain_Sse64Fx2
, /* SSE binary, 64Fx2 */
408 Ain_Sse64FLo
, /* SSE binary, 64F in lowest lane only */
409 Ain_SseReRg
, /* SSE binary general reg-reg, Re, Rg */
410 Ain_SseCMov
, /* SSE conditional move */
411 Ain_SseShuf
, /* SSE2 shuffle (pshufd) */
412 Ain_SseShiftN
, /* SSE2 shift by immediate */
413 Ain_SseMOVQ
, /* SSE2 moves of xmm[63:0] to/from GPR */
414 //uu Ain_AvxLdSt, /* AVX load/store 256 bits,
415 //uu no alignment constraints */
416 //uu Ain_AvxReRg, /* AVX binary general reg-reg, Re, Rg */
417 Ain_Avx32FLo
, /* AVX binary 3 operand, 32F in lowest lane only */
418 Ain_Avx64FLo
, /* AVX binary 3 operand, 64F in lowest lane only */
419 Ain_EvCheck
, /* Event check */
420 Ain_ProfInc
/* 64-bit profile counter increment */
424 /* Destinations are on the RIGHT (second operand) */
446 UInt src
; /* shift amount, or 0 means %cl */
451 UInt src
; /* shift amount, or 0 means %cl */
463 /* 64-bit compute EA into a reg */
468 /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
474 /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
480 /* amd64 div/idiv instruction. Modifies RDX and RAX and
484 Int sz
; /* 4 or 8 only */
490 /* Pseudo-insn. Call target (an absolute address), on given
491 condition (which could be Xcc_ALWAYS). */
495 Int regparms
; /* 0 .. 6 */
496 RetLoc rloc
; /* where the return value will be */
498 /* Update the guest RIP value, then exit requesting to chain
499 to it. May be conditional. */
501 Addr64 dstGA
; /* next guest address */
502 AMD64AMode
* amRIP
; /* amode in guest state for RIP */
503 AMD64CondCode cond
; /* can be Acc_ALWAYS */
504 Bool toFastEP
; /* chain to the slow or fast point? */
506 /* Boring transfer to a guest address not known at JIT time.
507 Not chainable. May be conditional. */
511 AMD64CondCode cond
; /* can be Acc_ALWAYS */
513 /* Assisted transfer to a guest address, most general case.
514 Not chainable. May be conditional. */
518 AMD64CondCode cond
; /* can be Acc_ALWAYS */
521 /* Mov src to dst on the given condition, which may not
522 be the bogus Acc_ALWAYS. */
528 /* conditional load to int reg, 32 bit ZX or 64 bit only.
529 cond may not be Acc_ALWAYS. */
532 UChar szB
; /* 4 or 8 only */
536 /* cond. store from int reg, 32 or 64 bit only.
537 cond may not be Acc_ALWAYS. */
540 UChar szB
; /* 4 or 8 only */
544 /* reg-reg move, sx-ing/zx-ing top half */
550 /* Sign/Zero extending loads. Dst size is always 64 bits. */
552 UChar szSmall
; /* only 1, 2 or 4 */
557 /* 32/16/8 bit stores. */
559 UChar sz
; /* only 1, 2 or 4 */
563 /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
568 /* 64-bit bsf or bsr. */
574 /* Mem fence. In short, an insn which flushes all preceding
575 loads and stores as much as possible before continuing.
576 On AMD64 we emit a real "mfence". */
581 UChar sz
; /* 1, 2, 4 or 8 */
585 UChar sz
; /* 4 or 8 only */
590 /* A very minimal set of x87 insns, that operate exactly in a
591 stack-like way so no need to think about x87 registers. */
593 /* Do 'ffree' on %st(7) .. %st(7-nregs) */
595 Int nregs
; /* 1 <= nregs <= 7 */
598 /* Push a 32- or 64-bit FP value from memory onto the stack,
599 or move a value from the stack to memory and remove it
604 UChar szB
; /* 4 or 8 */
607 /* Do an operation on the top-of-stack. This can be unary, in
608 which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
614 /* Load the FPU control word. */
619 /* Store the FPU status word (fstsw m16) */
626 /* Load 32 bits into %mxcsr. */
631 /* ucomisd/ucomiss, then get %rflags into int register */
633 UChar sz
; /* 4 or 8 only */
638 /* scalar 32/64 int to 32/64 float conversion */
640 UChar szS
; /* 4 or 8 */
641 UChar szD
; /* 4 or 8 */
642 HReg src
; /* i class */
643 HReg dst
; /* v class */
645 /* scalar 32/64 float to 32/64 int conversion */
647 UChar szS
; /* 4 or 8 */
648 UChar szD
; /* 4 or 8 */
649 HReg src
; /* v class */
650 HReg dst
; /* i class */
652 /* scalar float32 to/from float64 */
654 Bool from64
; /* True: 64->32; False: 32->64 */
660 UChar sz
; /* 4, 8 or 16 only */
665 AMD64CondCode cond
; /* may not be Acc_ALWAYS */
670 AMD64CondCode cond
; /* may not be Acc_ALWAYS */
675 Int sz
; /* 4 or 8 only */
704 /* Mov src to dst on the given condition, which may not
705 be the bogus Xcc_ALWAYS. */
712 Int order
; /* 0 <= order <= 0xFF */
724 Bool toXMM
; // when moving to xmm, xmm[127:64] is zeroed out
729 //uu AMD64AMode* addr;
749 AMD64AMode
* amCounter
;
750 AMD64AMode
* amFailAddr
;
753 /* No fields. The address of the counter to inc is
754 installed later, post-translation, by patching it in,
755 as it is not known at translation time. */
762 extern AMD64Instr
* AMD64Instr_Imm64 ( ULong imm64
, HReg dst
);
763 extern AMD64Instr
* AMD64Instr_Alu64R ( AMD64AluOp
, AMD64RMI
*, HReg
);
764 extern AMD64Instr
* AMD64Instr_Alu64M ( AMD64AluOp
, AMD64RI
*, AMD64AMode
* );
765 extern AMD64Instr
* AMD64Instr_Unary64 ( AMD64UnaryOp op
, HReg dst
);
766 extern AMD64Instr
* AMD64Instr_Lea64 ( AMD64AMode
* am
, HReg dst
);
767 extern AMD64Instr
* AMD64Instr_Alu32R ( AMD64AluOp
, AMD64RMI
*, HReg
);
768 extern AMD64Instr
* AMD64Instr_Sh64 ( AMD64ShiftOp
, UInt
, HReg
);
769 extern AMD64Instr
* AMD64Instr_Sh32 ( AMD64ShiftOp
, UInt
, HReg
);
770 extern AMD64Instr
* AMD64Instr_Test64 ( UInt imm32
, HReg dst
);
771 extern AMD64Instr
* AMD64Instr_MulL ( Bool syned
, AMD64RM
* );
772 extern AMD64Instr
* AMD64Instr_Div ( Bool syned
, Int sz
, AMD64RM
* );
773 extern AMD64Instr
* AMD64Instr_Push ( AMD64RMI
* );
774 extern AMD64Instr
* AMD64Instr_Call ( AMD64CondCode
, Addr64
, Int
, RetLoc
);
775 extern AMD64Instr
* AMD64Instr_XDirect ( Addr64 dstGA
, AMD64AMode
* amRIP
,
776 AMD64CondCode cond
, Bool toFastEP
);
777 extern AMD64Instr
* AMD64Instr_XIndir ( HReg dstGA
, AMD64AMode
* amRIP
,
778 AMD64CondCode cond
);
779 extern AMD64Instr
* AMD64Instr_XAssisted ( HReg dstGA
, AMD64AMode
* amRIP
,
780 AMD64CondCode cond
, IRJumpKind jk
);
781 extern AMD64Instr
* AMD64Instr_CMov64 ( AMD64CondCode
, HReg src
, HReg dst
);
782 extern AMD64Instr
* AMD64Instr_CLoad ( AMD64CondCode cond
, UChar szB
,
783 AMD64AMode
* addr
, HReg dst
);
784 extern AMD64Instr
* AMD64Instr_CStore ( AMD64CondCode cond
, UChar szB
,
785 HReg src
, AMD64AMode
* addr
);
786 extern AMD64Instr
* AMD64Instr_MovxLQ ( Bool syned
, HReg src
, HReg dst
);
787 extern AMD64Instr
* AMD64Instr_LoadEX ( UChar szSmall
, Bool syned
,
788 AMD64AMode
* src
, HReg dst
);
789 extern AMD64Instr
* AMD64Instr_Store ( UChar sz
, HReg src
, AMD64AMode
* dst
);
790 extern AMD64Instr
* AMD64Instr_Set64 ( AMD64CondCode cond
, HReg dst
);
791 extern AMD64Instr
* AMD64Instr_Bsfr64 ( Bool isFwds
, HReg src
, HReg dst
);
792 extern AMD64Instr
* AMD64Instr_MFence ( void );
793 extern AMD64Instr
* AMD64Instr_ACAS ( AMD64AMode
* addr
, UChar sz
);
794 extern AMD64Instr
* AMD64Instr_DACAS ( AMD64AMode
* addr
, UChar sz
);
796 extern AMD64Instr
* AMD64Instr_A87Free ( Int nregs
);
797 extern AMD64Instr
* AMD64Instr_A87PushPop ( AMD64AMode
* addr
, Bool isPush
, UChar szB
);
798 extern AMD64Instr
* AMD64Instr_A87FpOp ( A87FpOp op
);
799 extern AMD64Instr
* AMD64Instr_A87LdCW ( AMD64AMode
* addr
);
800 extern AMD64Instr
* AMD64Instr_A87StSW ( AMD64AMode
* addr
);
801 extern AMD64Instr
* AMD64Instr_LdMXCSR ( AMD64AMode
* );
802 extern AMD64Instr
* AMD64Instr_SseUComIS ( Int sz
, HReg srcL
, HReg srcR
, HReg dst
);
803 extern AMD64Instr
* AMD64Instr_SseSI2SF ( Int szS
, Int szD
, HReg src
, HReg dst
);
804 extern AMD64Instr
* AMD64Instr_SseSF2SI ( Int szS
, Int szD
, HReg src
, HReg dst
);
805 extern AMD64Instr
* AMD64Instr_SseSDSS ( Bool from64
, HReg src
, HReg dst
);
806 extern AMD64Instr
* AMD64Instr_SseLdSt ( Bool isLoad
, Int sz
, HReg
, AMD64AMode
* );
807 extern AMD64Instr
* AMD64Instr_SseCStore ( AMD64CondCode
, HReg
, AMD64AMode
* );
808 extern AMD64Instr
* AMD64Instr_SseCLoad ( AMD64CondCode
, AMD64AMode
*, HReg
);
809 extern AMD64Instr
* AMD64Instr_SseLdzLO ( Int sz
, HReg
, AMD64AMode
* );
810 extern AMD64Instr
* AMD64Instr_Sse32Fx4 ( AMD64SseOp
, HReg
, HReg
);
811 extern AMD64Instr
* AMD64Instr_Sse32FLo ( AMD64SseOp
, HReg
, HReg
);
812 extern AMD64Instr
* AMD64Instr_Sse64Fx2 ( AMD64SseOp
, HReg
, HReg
);
813 extern AMD64Instr
* AMD64Instr_Sse64FLo ( AMD64SseOp
, HReg
, HReg
);
814 extern AMD64Instr
* AMD64Instr_SseReRg ( AMD64SseOp
, HReg
, HReg
);
815 extern AMD64Instr
* AMD64Instr_SseCMov ( AMD64CondCode
, HReg src
, HReg dst
);
816 extern AMD64Instr
* AMD64Instr_SseShuf ( Int order
, HReg src
, HReg dst
);
817 extern AMD64Instr
* AMD64Instr_SseShiftN ( AMD64SseOp
,
818 UInt shiftBits
, HReg dst
);
819 extern AMD64Instr
* AMD64Instr_SseMOVQ ( HReg gpr
, HReg xmm
, Bool toXMM
);
820 //uu extern AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad, HReg, AMD64AMode* );
821 //uu extern AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp, HReg, HReg );
822 extern AMD64Instr
* AMD64Instr_Avx32FLo ( AMD64SseOp
, HReg
, HReg
, HReg
);
823 extern AMD64Instr
* AMD64Instr_Avx64FLo ( AMD64SseOp
, HReg
, HReg
, HReg
);
824 extern AMD64Instr
* AMD64Instr_EvCheck ( AMD64AMode
* amCounter
,
825 AMD64AMode
* amFailAddr
);
826 extern AMD64Instr
* AMD64Instr_ProfInc ( void );
829 extern void ppAMD64Instr ( const AMD64Instr
*, Bool
);
831 /* Some functions that insulate the register allocator from details
832 of the underlying instruction set. */
833 extern void getRegUsage_AMD64Instr ( HRegUsage
*, const AMD64Instr
*, Bool
);
834 extern void mapRegs_AMD64Instr ( HRegRemap
*, AMD64Instr
*, Bool
);
835 extern Int
emit_AMD64Instr ( /*MB_MOD*/Bool
* is_profInc
,
836 UChar
* buf
, Int nbuf
,
839 VexEndness endness_host
,
840 const void* disp_cp_chain_me_to_slowEP
,
841 const void* disp_cp_chain_me_to_fastEP
,
842 const void* disp_cp_xindir
,
843 const void* disp_cp_xassisted
);
845 extern void genSpill_AMD64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
846 HReg rreg
, Int offset
, Bool
);
847 extern void genReload_AMD64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
848 HReg rreg
, Int offset
, Bool
);
849 extern AMD64Instr
* genMove_AMD64(HReg from
, HReg to
, Bool
);
850 extern AMD64Instr
* directReload_AMD64 ( AMD64Instr
* i
,
851 HReg vreg
, Short spill_off
);
853 extern const RRegUniverse
* getRRegUniverse_AMD64 ( void );
855 extern HInstrArray
* iselSB_AMD64 ( const IRSB
*,
859 Int offs_Host_EvC_Counter
,
860 Int offs_Host_EvC_FailAddr
,
861 Bool chainingAllowed
,
865 /* How big is an event check? This is kind of a kludge because it
866 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
867 and so assumes that they are both <= 128, and so can use the short
868 offset encoding. This is all checked with assertions, so in the
869 worst case we will merely assert at startup. */
870 extern Int
evCheckSzB_AMD64 (void);
872 /* Perform a chaining and unchaining of an XDirect jump. */
873 extern VexInvalRange
chainXDirect_AMD64 ( VexEndness endness_host
,
874 void* place_to_chain
,
875 const void* disp_cp_chain_me_EXPECTED
,
876 const void* place_to_jump_to
);
878 extern VexInvalRange
unchainXDirect_AMD64 ( VexEndness endness_host
,
879 void* place_to_unchain
,
880 const void* place_to_jump_to_EXPECTED
,
881 const void* disp_cp_chain_me
);
883 /* Patch the counter location into an existing ProfInc point. */
884 extern VexInvalRange
patchProfInc_AMD64 ( VexEndness endness_host
,
885 void* place_to_patch
,
886 const ULong
* location_of_counter
);
889 #endif /* ndef __VEX_HOST_AMD64_DEFS_H */
891 /*---------------------------------------------------------------*/
892 /*--- end host_amd64_defs.h ---*/
893 /*---------------------------------------------------------------*/