mips: Add some missing syscalls for mips32
[valgrind.git] / VEX / priv / host_amd64_defs.h
blobeae878e312d73fa6ea3f51b52167f9511f83205b
2 /*---------------------------------------------------------------*/
3 /*--- begin host_amd64_defs.h ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2004-2017 OpenWorks LLP
11 info@open-works.net
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, see <http://www.gnu.org/licenses/>.
26 The GNU General Public License is contained in the file COPYING.
28 Neither the names of the U.S. Department of Energy nor the
29 University of California nor the names of its contributors may be
30 used to endorse or promote products derived from this software
31 without prior written permission.
34 #ifndef __VEX_HOST_AMD64_DEFS_H
35 #define __VEX_HOST_AMD64_DEFS_H
37 #include "libvex_basictypes.h"
38 #include "libvex.h" // VexArch
39 #include "host_generic_regs.h" // HReg
41 /* --------- Registers. --------- */
43 /* The usual HReg abstraction. There are 16 real int regs, 6 real
44 float regs, and 16 real vector regs.
47 #define ST_IN static inline
48 ST_IN HReg hregAMD64_R12 ( void ) { return mkHReg(False, HRcInt64, 12, 0); }
49 ST_IN HReg hregAMD64_R13 ( void ) { return mkHReg(False, HRcInt64, 13, 1); }
50 ST_IN HReg hregAMD64_R14 ( void ) { return mkHReg(False, HRcInt64, 14, 2); }
51 ST_IN HReg hregAMD64_R15 ( void ) { return mkHReg(False, HRcInt64, 15, 3); }
52 ST_IN HReg hregAMD64_RBX ( void ) { return mkHReg(False, HRcInt64, 3, 4); }
53 ST_IN HReg hregAMD64_RSI ( void ) { return mkHReg(False, HRcInt64, 6, 5); }
54 ST_IN HReg hregAMD64_RDI ( void ) { return mkHReg(False, HRcInt64, 7, 6); }
55 ST_IN HReg hregAMD64_R8 ( void ) { return mkHReg(False, HRcInt64, 8, 7); }
56 ST_IN HReg hregAMD64_R9 ( void ) { return mkHReg(False, HRcInt64, 9, 8); }
57 ST_IN HReg hregAMD64_R10 ( void ) { return mkHReg(False, HRcInt64, 10, 9); }
59 ST_IN HReg hregAMD64_XMM3 ( void ) { return mkHReg(False, HRcVec128, 3, 10); }
60 ST_IN HReg hregAMD64_XMM4 ( void ) { return mkHReg(False, HRcVec128, 4, 11); }
61 ST_IN HReg hregAMD64_XMM5 ( void ) { return mkHReg(False, HRcVec128, 5, 12); }
62 ST_IN HReg hregAMD64_XMM6 ( void ) { return mkHReg(False, HRcVec128, 6, 13); }
63 ST_IN HReg hregAMD64_XMM7 ( void ) { return mkHReg(False, HRcVec128, 7, 14); }
64 ST_IN HReg hregAMD64_XMM8 ( void ) { return mkHReg(False, HRcVec128, 8, 15); }
65 ST_IN HReg hregAMD64_XMM9 ( void ) { return mkHReg(False, HRcVec128, 9, 16); }
66 ST_IN HReg hregAMD64_XMM10 ( void ) { return mkHReg(False, HRcVec128, 10, 17); }
67 ST_IN HReg hregAMD64_XMM11 ( void ) { return mkHReg(False, HRcVec128, 11, 18); }
68 ST_IN HReg hregAMD64_XMM12 ( void ) { return mkHReg(False, HRcVec128, 12, 19); }
70 ST_IN HReg hregAMD64_RAX ( void ) { return mkHReg(False, HRcInt64, 0, 20); }
71 ST_IN HReg hregAMD64_RCX ( void ) { return mkHReg(False, HRcInt64, 1, 21); }
72 ST_IN HReg hregAMD64_RDX ( void ) { return mkHReg(False, HRcInt64, 2, 22); }
73 ST_IN HReg hregAMD64_RSP ( void ) { return mkHReg(False, HRcInt64, 4, 23); }
74 ST_IN HReg hregAMD64_RBP ( void ) { return mkHReg(False, HRcInt64, 5, 24); }
75 ST_IN HReg hregAMD64_R11 ( void ) { return mkHReg(False, HRcInt64, 11, 25); }
77 ST_IN HReg hregAMD64_XMM0 ( void ) { return mkHReg(False, HRcVec128, 0, 26); }
78 ST_IN HReg hregAMD64_XMM1 ( void ) { return mkHReg(False, HRcVec128, 1, 27); }
79 #undef ST_IN
81 extern UInt ppHRegAMD64 ( HReg );
84 /* --------- Condition codes, AMD encoding. --------- */
86 typedef
87 enum {
88 Acc_O = 0, /* overflow */
89 Acc_NO = 1, /* no overflow */
91 Acc_B = 2, /* below */
92 Acc_NB = 3, /* not below */
94 Acc_Z = 4, /* zero */
95 Acc_NZ = 5, /* not zero */
97 Acc_BE = 6, /* below or equal */
98 Acc_NBE = 7, /* not below or equal */
100 Acc_S = 8, /* negative */
101 Acc_NS = 9, /* not negative */
103 Acc_P = 10, /* parity even */
104 Acc_NP = 11, /* not parity even */
106 Acc_L = 12, /* jump less */
107 Acc_NL = 13, /* not less */
109 Acc_LE = 14, /* less or equal */
110 Acc_NLE = 15, /* not less or equal */
112 Acc_ALWAYS = 16 /* the usual hack */
114 AMD64CondCode;
116 extern const HChar* showAMD64CondCode ( AMD64CondCode );
119 /* --------- Memory address expressions (amodes). --------- */
121 typedef
122 enum {
123 Aam_IR, /* Immediate + Reg */
124 Aam_IRRS /* Immediate + Reg1 + (Reg2 << Shift) */
126 AMD64AModeTag;
128 typedef
129 struct {
130 AMD64AModeTag tag;
131 union {
132 struct {
133 UInt imm;
134 HReg reg;
135 } IR;
136 struct {
137 UInt imm;
138 HReg base;
139 HReg index;
140 Int shift; /* 0, 1, 2 or 3 only */
141 } IRRS;
142 } Aam;
144 AMD64AMode;
146 extern AMD64AMode* AMD64AMode_IR ( UInt, HReg );
147 extern AMD64AMode* AMD64AMode_IRRS ( UInt, HReg, HReg, Int );
149 extern AMD64AMode* dopyAMD64AMode ( AMD64AMode* );
151 extern void ppAMD64AMode ( AMD64AMode* );
154 /* --------- Operand, which can be reg, immediate or memory. --------- */
156 typedef
157 enum {
158 Armi_Imm,
159 Armi_Reg,
160 Armi_Mem
162 AMD64RMITag;
164 typedef
165 struct {
166 AMD64RMITag tag;
167 union {
168 struct {
169 UInt imm32;
170 } Imm;
171 struct {
172 HReg reg;
173 } Reg;
174 struct {
175 AMD64AMode* am;
176 } Mem;
178 Armi;
180 AMD64RMI;
182 extern AMD64RMI* AMD64RMI_Imm ( UInt );
183 extern AMD64RMI* AMD64RMI_Reg ( HReg );
184 extern AMD64RMI* AMD64RMI_Mem ( AMD64AMode* );
186 extern void ppAMD64RMI ( AMD64RMI* );
187 extern void ppAMD64RMI_lo32 ( AMD64RMI* );
190 /* --------- Operand, which can be reg or immediate only. --------- */
192 typedef
193 enum {
194 Ari_Imm,
195 Ari_Reg
197 AMD64RITag;
199 typedef
200 struct {
201 AMD64RITag tag;
202 union {
203 struct {
204 UInt imm32;
205 } Imm;
206 struct {
207 HReg reg;
208 } Reg;
210 Ari;
212 AMD64RI;
214 extern AMD64RI* AMD64RI_Imm ( UInt );
215 extern AMD64RI* AMD64RI_Reg ( HReg );
217 extern void ppAMD64RI ( AMD64RI* );
220 /* --------- Operand, which can be reg or memory only. --------- */
222 typedef
223 enum {
224 Arm_Reg,
225 Arm_Mem
227 AMD64RMTag;
229 typedef
230 struct {
231 AMD64RMTag tag;
232 union {
233 struct {
234 HReg reg;
235 } Reg;
236 struct {
237 AMD64AMode* am;
238 } Mem;
240 Arm;
242 AMD64RM;
244 extern AMD64RM* AMD64RM_Reg ( HReg );
245 extern AMD64RM* AMD64RM_Mem ( AMD64AMode* );
247 extern void ppAMD64RM ( AMD64RM* );
250 /* --------- Instructions. --------- */
252 /* --------- */
253 typedef
254 enum {
255 Aun_NEG,
256 Aun_NOT
258 AMD64UnaryOp;
260 extern const HChar* showAMD64UnaryOp ( AMD64UnaryOp );
263 /* --------- */
264 typedef
265 enum {
266 Aalu_INVALID,
267 Aalu_MOV,
268 Aalu_CMP,
269 Aalu_ADD, Aalu_SUB, Aalu_ADC, Aalu_SBB,
270 Aalu_AND, Aalu_OR, Aalu_XOR,
271 Aalu_MUL
273 AMD64AluOp;
275 extern const HChar* showAMD64AluOp ( AMD64AluOp );
278 /* --------- */
279 typedef
280 enum {
281 Ash_INVALID,
282 Ash_SHL, Ash_SHR, Ash_SAR
284 AMD64ShiftOp;
286 extern const HChar* showAMD64ShiftOp ( AMD64ShiftOp );
289 /* --------- */
290 typedef
291 enum {
292 Afp_INVALID,
293 /* Binary */
294 Afp_SCALE, Afp_ATAN, Afp_YL2X, Afp_YL2XP1, Afp_PREM, Afp_PREM1,
295 /* Unary */
296 Afp_SQRT,
297 Afp_SIN, Afp_COS, Afp_TAN,
298 Afp_ROUND, Afp_2XM1
300 A87FpOp;
302 extern const HChar* showA87FpOp ( A87FpOp );
305 /* --------- */
306 typedef
307 enum {
308 Asse_INVALID,
309 /* mov */
310 Asse_MOV,
311 /* Floating point binary */
312 Asse_ADDF, Asse_SUBF, Asse_MULF, Asse_DIVF,
313 Asse_MAXF, Asse_MINF,
314 Asse_CMPEQF, Asse_CMPLTF, Asse_CMPLEF, Asse_CMPUNF,
315 /* Floating point unary */
316 Asse_RCPF, Asse_RSQRTF, Asse_SQRTF,
317 /* Floating point conversion */
318 Asse_I2F, // i32-signed to float conversion, aka cvtdq2ps in vec form
319 Asse_F2I, // float to i32-signed conversion, aka cvtps2dq in vec form
320 /* Bitwise */
321 Asse_AND, Asse_OR, Asse_XOR, Asse_ANDN,
322 Asse_ADD8, Asse_ADD16, Asse_ADD32, Asse_ADD64,
323 Asse_QADD8U, Asse_QADD16U,
324 Asse_QADD8S, Asse_QADD16S,
325 Asse_SUB8, Asse_SUB16, Asse_SUB32, Asse_SUB64,
326 Asse_QSUB8U, Asse_QSUB16U,
327 Asse_QSUB8S, Asse_QSUB16S,
328 Asse_MUL16,
329 Asse_MULHI16U,
330 Asse_MULHI16S,
331 Asse_AVG8U, Asse_AVG16U,
332 Asse_MAX16S,
333 Asse_MAX8U,
334 Asse_MIN16S,
335 Asse_MIN8U,
336 Asse_CMPEQ8, Asse_CMPEQ16, Asse_CMPEQ32,
337 Asse_CMPGT8S, Asse_CMPGT16S, Asse_CMPGT32S,
338 Asse_SHL16, Asse_SHL32, Asse_SHL64, Asse_SHL128,
339 Asse_SHR16, Asse_SHR32, Asse_SHR64, Asse_SHR128,
340 Asse_SAR16, Asse_SAR32,
341 Asse_PACKSSD, Asse_PACKSSW, Asse_PACKUSW,
342 Asse_UNPCKHB, Asse_UNPCKHW, Asse_UNPCKHD, Asse_UNPCKHQ,
343 Asse_UNPCKLB, Asse_UNPCKLW, Asse_UNPCKLD, Asse_UNPCKLQ,
344 // Only for SSSE3 capable hosts:
345 Asse_PSHUFB,
346 Asse_PMADDUBSW,
347 // Only for F16C capable hosts:
348 Asse_F32toF16, // F32 to F16 conversion, aka vcvtps2ph
349 Asse_F16toF32, // F16 to F32 conversion, aka vcvtph2ps
350 // Only for FMA (FMA3) capable hosts:
351 Asse_VFMADD213, // Fused Multiply-Add, aka vfmadd213ss
353 AMD64SseOp;
355 extern const HChar* showAMD64SseOp ( AMD64SseOp );
358 /* --------- */
359 typedef
360 enum {
361 Ain_Imm64, /* Generate 64-bit literal to register */
362 Ain_Alu64R, /* 64-bit mov/arith/logical, dst=REG */
363 Ain_Alu64M, /* 64-bit mov/arith/logical, dst=MEM */
364 Ain_Sh64, /* 64-bit shift, dst=REG */
365 Ain_Sh32, /* 32-bit shift, dst=REG */
366 Ain_Test64, /* 64-bit test (AND, set flags, discard result) */
367 Ain_Unary64, /* 64-bit not and neg */
368 Ain_Lea64, /* 64-bit compute EA into a reg */
369 Ain_Alu32R, /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
370 Ain_MulL, /* widening multiply */
371 Ain_Div, /* div and mod */
372 Ain_Push, /* push 64-bit value on stack */
373 Ain_Call, /* call to address in register */
374 Ain_XDirect, /* direct transfer to GA */
375 Ain_XIndir, /* indirect transfer to GA */
376 Ain_XAssisted, /* assisted transfer to GA */
377 Ain_CMov64, /* conditional move, 64-bit reg-reg only */
378 Ain_CLoad, /* cond. load to int reg, 32 bit ZX or 64 bit only */
379 Ain_CStore, /* cond. store from int reg, 32 or 64 bit only */
380 Ain_MovxLQ, /* reg-reg move, zx-ing/sx-ing top half */
381 Ain_LoadEX, /* mov{s,z}{b,w,l}q from mem to reg */
382 Ain_Store, /* store 32/16/8 bit value in memory */
383 Ain_Set64, /* convert condition code to 64-bit value */
384 Ain_Bsfr64, /* 64-bit bsf/bsr */
385 Ain_MFence, /* mem fence */
386 Ain_ACAS, /* 8/16/32/64-bit lock;cmpxchg */
387 Ain_DACAS, /* lock;cmpxchg8b/16b (doubleword ACAS, 2 x
388 32-bit or 2 x 64-bit only) */
389 Ain_A87Free, /* free up x87 registers */
390 Ain_A87PushPop, /* x87 loads/stores */
391 Ain_A87FpOp, /* x87 operations */
392 Ain_A87LdCW, /* load x87 control word */
393 Ain_A87StSW, /* store x87 status word */
394 Ain_LdMXCSR, /* load %mxcsr */
395 Ain_SseUComIS, /* ucomisd/ucomiss, then get %rflags into int
396 register */
397 Ain_SseSI2SF, /* scalar 32/64 int to 32/64 float conversion */
398 Ain_SseSF2SI, /* scalar 32/64 float to 32/64 int conversion */
399 Ain_SseSDSS, /* scalar float32 to/from float64 */
400 Ain_SseLdSt, /* SSE load/store 32/64/128 bits, no alignment
401 constraints, upper 96/64/0 bits arbitrary */
402 Ain_SseCStore, /* SSE conditional store, 128 bit only, any alignment */
403 Ain_SseCLoad, /* SSE conditional load, 128 bit only, any alignment */
404 Ain_SseLdzLO, /* SSE load low 32/64 bits, zero remainder of reg */
405 Ain_Sse32Fx4, /* SSE binary, 32Fx4 */
406 Ain_Sse32FLo, /* SSE binary, 32F in lowest lane only */
407 Ain_Sse64Fx2, /* SSE binary, 64Fx2 */
408 Ain_Sse64FLo, /* SSE binary, 64F in lowest lane only */
409 Ain_SseReRg, /* SSE binary general reg-reg, Re, Rg */
410 Ain_SseCMov, /* SSE conditional move */
411 Ain_SseShuf, /* SSE2 shuffle (pshufd) */
412 Ain_SseShiftN, /* SSE2 shift by immediate */
413 Ain_SseMOVQ, /* SSE2 moves of xmm[63:0] to/from GPR */
414 //uu Ain_AvxLdSt, /* AVX load/store 256 bits,
415 //uu no alignment constraints */
416 //uu Ain_AvxReRg, /* AVX binary general reg-reg, Re, Rg */
417 Ain_Avx32FLo, /* AVX binary 3 operand, 32F in lowest lane only */
418 Ain_Avx64FLo, /* AVX binary 3 operand, 64F in lowest lane only */
419 Ain_EvCheck, /* Event check */
420 Ain_ProfInc /* 64-bit profile counter increment */
422 AMD64InstrTag;
424 /* Destinations are on the RIGHT (second operand) */
426 typedef
427 struct {
428 AMD64InstrTag tag;
429 union {
430 struct {
431 ULong imm64;
432 HReg dst;
433 } Imm64;
434 struct {
435 AMD64AluOp op;
436 AMD64RMI* src;
437 HReg dst;
438 } Alu64R;
439 struct {
440 AMD64AluOp op;
441 AMD64RI* src;
442 AMD64AMode* dst;
443 } Alu64M;
444 struct {
445 AMD64ShiftOp op;
446 UInt src; /* shift amount, or 0 means %cl */
447 HReg dst;
448 } Sh64;
449 struct {
450 AMD64ShiftOp op;
451 UInt src; /* shift amount, or 0 means %cl */
452 HReg dst;
453 } Sh32;
454 struct {
455 UInt imm32;
456 HReg dst;
457 } Test64;
458 /* Not and Neg */
459 struct {
460 AMD64UnaryOp op;
461 HReg dst;
462 } Unary64;
463 /* 64-bit compute EA into a reg */
464 struct {
465 AMD64AMode* am;
466 HReg dst;
467 } Lea64;
468 /* 32-bit add/sub/and/or/xor/cmp, dst=REG (a la Alu64R) */
469 struct {
470 AMD64AluOp op;
471 AMD64RMI* src;
472 HReg dst;
473 } Alu32R;
474 /* 64 x 64 -> 128 bit widening multiply: RDX:RAX = RAX *s/u
475 r/m64 */
476 struct {
477 Bool syned;
478 AMD64RM* src;
479 } MulL;
480 /* amd64 div/idiv instruction. Modifies RDX and RAX and
481 reads src. */
482 struct {
483 Bool syned;
484 Int sz; /* 4 or 8 only */
485 AMD64RM* src;
486 } Div;
487 struct {
488 AMD64RMI* src;
489 } Push;
490 /* Pseudo-insn. Call target (an absolute address), on given
491 condition (which could be Xcc_ALWAYS). */
492 struct {
493 AMD64CondCode cond;
494 Addr64 target;
495 Int regparms; /* 0 .. 6 */
496 RetLoc rloc; /* where the return value will be */
497 } Call;
498 /* Update the guest RIP value, then exit requesting to chain
499 to it. May be conditional. */
500 struct {
501 Addr64 dstGA; /* next guest address */
502 AMD64AMode* amRIP; /* amode in guest state for RIP */
503 AMD64CondCode cond; /* can be Acc_ALWAYS */
504 Bool toFastEP; /* chain to the slow or fast point? */
505 } XDirect;
506 /* Boring transfer to a guest address not known at JIT time.
507 Not chainable. May be conditional. */
508 struct {
509 HReg dstGA;
510 AMD64AMode* amRIP;
511 AMD64CondCode cond; /* can be Acc_ALWAYS */
512 } XIndir;
513 /* Assisted transfer to a guest address, most general case.
514 Not chainable. May be conditional. */
515 struct {
516 HReg dstGA;
517 AMD64AMode* amRIP;
518 AMD64CondCode cond; /* can be Acc_ALWAYS */
519 IRJumpKind jk;
520 } XAssisted;
521 /* Mov src to dst on the given condition, which may not
522 be the bogus Acc_ALWAYS. */
523 struct {
524 AMD64CondCode cond;
525 HReg src;
526 HReg dst;
527 } CMov64;
528 /* conditional load to int reg, 32 bit ZX or 64 bit only.
529 cond may not be Acc_ALWAYS. */
530 struct {
531 AMD64CondCode cond;
532 UChar szB; /* 4 or 8 only */
533 AMD64AMode* addr;
534 HReg dst;
535 } CLoad;
536 /* cond. store from int reg, 32 or 64 bit only.
537 cond may not be Acc_ALWAYS. */
538 struct {
539 AMD64CondCode cond;
540 UChar szB; /* 4 or 8 only */
541 HReg src;
542 AMD64AMode* addr;
543 } CStore;
544 /* reg-reg move, sx-ing/zx-ing top half */
545 struct {
546 Bool syned;
547 HReg src;
548 HReg dst;
549 } MovxLQ;
550 /* Sign/Zero extending loads. Dst size is always 64 bits. */
551 struct {
552 UChar szSmall; /* only 1, 2 or 4 */
553 Bool syned;
554 AMD64AMode* src;
555 HReg dst;
556 } LoadEX;
557 /* 32/16/8 bit stores. */
558 struct {
559 UChar sz; /* only 1, 2 or 4 */
560 HReg src;
561 AMD64AMode* dst;
562 } Store;
563 /* Convert an amd64 condition code to a 64-bit value (0 or 1). */
564 struct {
565 AMD64CondCode cond;
566 HReg dst;
567 } Set64;
568 /* 64-bit bsf or bsr. */
569 struct {
570 Bool isFwds;
571 HReg src;
572 HReg dst;
573 } Bsfr64;
574 /* Mem fence. In short, an insn which flushes all preceding
575 loads and stores as much as possible before continuing.
576 On AMD64 we emit a real "mfence". */
577 struct {
578 } MFence;
579 struct {
580 AMD64AMode* addr;
581 UChar sz; /* 1, 2, 4 or 8 */
582 } ACAS;
583 struct {
584 AMD64AMode* addr;
585 UChar sz; /* 4 or 8 only */
586 } DACAS;
588 /* --- X87 --- */
590 /* A very minimal set of x87 insns, that operate exactly in a
591 stack-like way so no need to think about x87 registers. */
593 /* Do 'ffree' on %st(7) .. %st(7-nregs) */
594 struct {
595 Int nregs; /* 1 <= nregs <= 7 */
596 } A87Free;
598 /* Push a 32- or 64-bit FP value from memory onto the stack,
599 or move a value from the stack to memory and remove it
600 from the stack. */
601 struct {
602 AMD64AMode* addr;
603 Bool isPush;
604 UChar szB; /* 4 or 8 */
605 } A87PushPop;
607 /* Do an operation on the top-of-stack. This can be unary, in
608 which case it is %st0 = OP( %st0 ), or binary: %st0 = OP(
609 %st0, %st1 ). */
610 struct {
611 A87FpOp op;
612 } A87FpOp;
614 /* Load the FPU control word. */
615 struct {
616 AMD64AMode* addr;
617 } A87LdCW;
619 /* Store the FPU status word (fstsw m16) */
620 struct {
621 AMD64AMode* addr;
622 } A87StSW;
624 /* --- SSE --- */
626 /* Load 32 bits into %mxcsr. */
627 struct {
628 AMD64AMode* addr;
630 LdMXCSR;
631 /* ucomisd/ucomiss, then get %rflags into int register */
632 struct {
633 UChar sz; /* 4 or 8 only */
634 HReg srcL; /* xmm */
635 HReg srcR; /* xmm */
636 HReg dst; /* int */
637 } SseUComIS;
638 /* scalar 32/64 int to 32/64 float conversion */
639 struct {
640 UChar szS; /* 4 or 8 */
641 UChar szD; /* 4 or 8 */
642 HReg src; /* i class */
643 HReg dst; /* v class */
644 } SseSI2SF;
645 /* scalar 32/64 float to 32/64 int conversion */
646 struct {
647 UChar szS; /* 4 or 8 */
648 UChar szD; /* 4 or 8 */
649 HReg src; /* v class */
650 HReg dst; /* i class */
651 } SseSF2SI;
652 /* scalar float32 to/from float64 */
653 struct {
654 Bool from64; /* True: 64->32; False: 32->64 */
655 HReg src;
656 HReg dst;
657 } SseSDSS;
658 struct {
659 Bool isLoad;
660 UChar sz; /* 4, 8 or 16 only */
661 HReg reg;
662 AMD64AMode* addr;
663 } SseLdSt;
664 struct {
665 AMD64CondCode cond; /* may not be Acc_ALWAYS */
666 HReg src;
667 AMD64AMode* addr;
668 } SseCStore;
669 struct {
670 AMD64CondCode cond; /* may not be Acc_ALWAYS */
671 AMD64AMode* addr;
672 HReg dst;
673 } SseCLoad;
674 struct {
675 Int sz; /* 4 or 8 only */
676 HReg reg;
677 AMD64AMode* addr;
678 } SseLdzLO;
679 struct {
680 AMD64SseOp op;
681 HReg src;
682 HReg dst;
683 } Sse32Fx4;
684 struct {
685 AMD64SseOp op;
686 HReg src;
687 HReg dst;
688 } Sse32FLo;
689 struct {
690 AMD64SseOp op;
691 HReg src;
692 HReg dst;
693 } Sse64Fx2;
694 struct {
695 AMD64SseOp op;
696 HReg src;
697 HReg dst;
698 } Sse64FLo;
699 struct {
700 AMD64SseOp op;
701 HReg src;
702 HReg dst;
703 } SseReRg;
704 /* Mov src to dst on the given condition, which may not
705 be the bogus Xcc_ALWAYS. */
706 struct {
707 AMD64CondCode cond;
708 HReg src;
709 HReg dst;
710 } SseCMov;
711 struct {
712 Int order; /* 0 <= order <= 0xFF */
713 HReg src;
714 HReg dst;
715 } SseShuf;
716 struct {
717 AMD64SseOp op;
718 UInt shiftBits;
719 HReg dst;
720 } SseShiftN;
721 struct {
722 HReg gpr;
723 HReg xmm;
724 Bool toXMM; // when moving to xmm, xmm[127:64] is zeroed out
725 } SseMOVQ;
726 //uu struct {
727 //uu Bool isLoad;
728 //uu HReg reg;
729 //uu AMD64AMode* addr;
730 //uu } AvxLdSt;
731 //uu struct {
732 //uu AMD64SseOp op;
733 //uu HReg src;
734 //uu HReg dst;
735 //uu } AvxReRg;
736 struct {
737 AMD64SseOp op;
738 HReg src1;
739 HReg src2;
740 HReg dst;
741 } Avx32FLo;
742 struct {
743 AMD64SseOp op;
744 HReg src1;
745 HReg src2;
746 HReg dst;
747 } Avx64FLo;
748 struct {
749 AMD64AMode* amCounter;
750 AMD64AMode* amFailAddr;
751 } EvCheck;
752 struct {
753 /* No fields. The address of the counter to inc is
754 installed later, post-translation, by patching it in,
755 as it is not known at translation time. */
756 } ProfInc;
758 } Ain;
760 AMD64Instr;
762 extern AMD64Instr* AMD64Instr_Imm64 ( ULong imm64, HReg dst );
763 extern AMD64Instr* AMD64Instr_Alu64R ( AMD64AluOp, AMD64RMI*, HReg );
764 extern AMD64Instr* AMD64Instr_Alu64M ( AMD64AluOp, AMD64RI*, AMD64AMode* );
765 extern AMD64Instr* AMD64Instr_Unary64 ( AMD64UnaryOp op, HReg dst );
766 extern AMD64Instr* AMD64Instr_Lea64 ( AMD64AMode* am, HReg dst );
767 extern AMD64Instr* AMD64Instr_Alu32R ( AMD64AluOp, AMD64RMI*, HReg );
768 extern AMD64Instr* AMD64Instr_Sh64 ( AMD64ShiftOp, UInt, HReg );
769 extern AMD64Instr* AMD64Instr_Sh32 ( AMD64ShiftOp, UInt, HReg );
770 extern AMD64Instr* AMD64Instr_Test64 ( UInt imm32, HReg dst );
771 extern AMD64Instr* AMD64Instr_MulL ( Bool syned, AMD64RM* );
772 extern AMD64Instr* AMD64Instr_Div ( Bool syned, Int sz, AMD64RM* );
773 extern AMD64Instr* AMD64Instr_Push ( AMD64RMI* );
774 extern AMD64Instr* AMD64Instr_Call ( AMD64CondCode, Addr64, Int, RetLoc );
775 extern AMD64Instr* AMD64Instr_XDirect ( Addr64 dstGA, AMD64AMode* amRIP,
776 AMD64CondCode cond, Bool toFastEP );
777 extern AMD64Instr* AMD64Instr_XIndir ( HReg dstGA, AMD64AMode* amRIP,
778 AMD64CondCode cond );
779 extern AMD64Instr* AMD64Instr_XAssisted ( HReg dstGA, AMD64AMode* amRIP,
780 AMD64CondCode cond, IRJumpKind jk );
781 extern AMD64Instr* AMD64Instr_CMov64 ( AMD64CondCode, HReg src, HReg dst );
782 extern AMD64Instr* AMD64Instr_CLoad ( AMD64CondCode cond, UChar szB,
783 AMD64AMode* addr, HReg dst );
784 extern AMD64Instr* AMD64Instr_CStore ( AMD64CondCode cond, UChar szB,
785 HReg src, AMD64AMode* addr );
786 extern AMD64Instr* AMD64Instr_MovxLQ ( Bool syned, HReg src, HReg dst );
787 extern AMD64Instr* AMD64Instr_LoadEX ( UChar szSmall, Bool syned,
788 AMD64AMode* src, HReg dst );
789 extern AMD64Instr* AMD64Instr_Store ( UChar sz, HReg src, AMD64AMode* dst );
790 extern AMD64Instr* AMD64Instr_Set64 ( AMD64CondCode cond, HReg dst );
791 extern AMD64Instr* AMD64Instr_Bsfr64 ( Bool isFwds, HReg src, HReg dst );
792 extern AMD64Instr* AMD64Instr_MFence ( void );
793 extern AMD64Instr* AMD64Instr_ACAS ( AMD64AMode* addr, UChar sz );
794 extern AMD64Instr* AMD64Instr_DACAS ( AMD64AMode* addr, UChar sz );
796 extern AMD64Instr* AMD64Instr_A87Free ( Int nregs );
797 extern AMD64Instr* AMD64Instr_A87PushPop ( AMD64AMode* addr, Bool isPush, UChar szB );
798 extern AMD64Instr* AMD64Instr_A87FpOp ( A87FpOp op );
799 extern AMD64Instr* AMD64Instr_A87LdCW ( AMD64AMode* addr );
800 extern AMD64Instr* AMD64Instr_A87StSW ( AMD64AMode* addr );
801 extern AMD64Instr* AMD64Instr_LdMXCSR ( AMD64AMode* );
802 extern AMD64Instr* AMD64Instr_SseUComIS ( Int sz, HReg srcL, HReg srcR, HReg dst );
803 extern AMD64Instr* AMD64Instr_SseSI2SF ( Int szS, Int szD, HReg src, HReg dst );
804 extern AMD64Instr* AMD64Instr_SseSF2SI ( Int szS, Int szD, HReg src, HReg dst );
805 extern AMD64Instr* AMD64Instr_SseSDSS ( Bool from64, HReg src, HReg dst );
806 extern AMD64Instr* AMD64Instr_SseLdSt ( Bool isLoad, Int sz, HReg, AMD64AMode* );
807 extern AMD64Instr* AMD64Instr_SseCStore ( AMD64CondCode, HReg, AMD64AMode* );
808 extern AMD64Instr* AMD64Instr_SseCLoad ( AMD64CondCode, AMD64AMode*, HReg );
809 extern AMD64Instr* AMD64Instr_SseLdzLO ( Int sz, HReg, AMD64AMode* );
810 extern AMD64Instr* AMD64Instr_Sse32Fx4 ( AMD64SseOp, HReg, HReg );
811 extern AMD64Instr* AMD64Instr_Sse32FLo ( AMD64SseOp, HReg, HReg );
812 extern AMD64Instr* AMD64Instr_Sse64Fx2 ( AMD64SseOp, HReg, HReg );
813 extern AMD64Instr* AMD64Instr_Sse64FLo ( AMD64SseOp, HReg, HReg );
814 extern AMD64Instr* AMD64Instr_SseReRg ( AMD64SseOp, HReg, HReg );
815 extern AMD64Instr* AMD64Instr_SseCMov ( AMD64CondCode, HReg src, HReg dst );
816 extern AMD64Instr* AMD64Instr_SseShuf ( Int order, HReg src, HReg dst );
817 extern AMD64Instr* AMD64Instr_SseShiftN ( AMD64SseOp,
818 UInt shiftBits, HReg dst );
819 extern AMD64Instr* AMD64Instr_SseMOVQ ( HReg gpr, HReg xmm, Bool toXMM );
820 //uu extern AMD64Instr* AMD64Instr_AvxLdSt ( Bool isLoad, HReg, AMD64AMode* );
821 //uu extern AMD64Instr* AMD64Instr_AvxReRg ( AMD64SseOp, HReg, HReg );
822 extern AMD64Instr* AMD64Instr_Avx32FLo ( AMD64SseOp, HReg, HReg, HReg );
823 extern AMD64Instr* AMD64Instr_Avx64FLo ( AMD64SseOp, HReg, HReg, HReg );
824 extern AMD64Instr* AMD64Instr_EvCheck ( AMD64AMode* amCounter,
825 AMD64AMode* amFailAddr );
826 extern AMD64Instr* AMD64Instr_ProfInc ( void );
829 extern void ppAMD64Instr ( const AMD64Instr*, Bool );
831 /* Some functions that insulate the register allocator from details
832 of the underlying instruction set. */
833 extern void getRegUsage_AMD64Instr ( HRegUsage*, const AMD64Instr*, Bool );
834 extern void mapRegs_AMD64Instr ( HRegRemap*, AMD64Instr*, Bool );
835 extern Int emit_AMD64Instr ( /*MB_MOD*/Bool* is_profInc,
836 UChar* buf, Int nbuf,
837 const AMD64Instr* i,
838 Bool mode64,
839 VexEndness endness_host,
840 const void* disp_cp_chain_me_to_slowEP,
841 const void* disp_cp_chain_me_to_fastEP,
842 const void* disp_cp_xindir,
843 const void* disp_cp_xassisted );
845 extern void genSpill_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
846 HReg rreg, Int offset, Bool );
847 extern void genReload_AMD64 ( /*OUT*/HInstr** i1, /*OUT*/HInstr** i2,
848 HReg rreg, Int offset, Bool );
849 extern AMD64Instr* genMove_AMD64(HReg from, HReg to, Bool);
850 extern AMD64Instr* directReload_AMD64 ( AMD64Instr* i,
851 HReg vreg, Short spill_off );
853 extern const RRegUniverse* getRRegUniverse_AMD64 ( void );
855 extern HInstrArray* iselSB_AMD64 ( const IRSB*,
856 VexArch,
857 const VexArchInfo*,
858 const VexAbiInfo*,
859 Int offs_Host_EvC_Counter,
860 Int offs_Host_EvC_FailAddr,
861 Bool chainingAllowed,
862 Bool addProfInc,
863 Addr max_ga );
865 /* How big is an event check? This is kind of a kludge because it
866 depends on the offsets of host_EvC_FAILADDR and host_EvC_COUNTER,
867 and so assumes that they are both <= 128, and so can use the short
868 offset encoding. This is all checked with assertions, so in the
869 worst case we will merely assert at startup. */
870 extern Int evCheckSzB_AMD64 (void);
872 /* Perform a chaining and unchaining of an XDirect jump. */
873 extern VexInvalRange chainXDirect_AMD64 ( VexEndness endness_host,
874 void* place_to_chain,
875 const void* disp_cp_chain_me_EXPECTED,
876 const void* place_to_jump_to );
878 extern VexInvalRange unchainXDirect_AMD64 ( VexEndness endness_host,
879 void* place_to_unchain,
880 const void* place_to_jump_to_EXPECTED,
881 const void* disp_cp_chain_me );
883 /* Patch the counter location into an existing ProfInc point. */
884 extern VexInvalRange patchProfInc_AMD64 ( VexEndness endness_host,
885 void* place_to_patch,
886 const ULong* location_of_counter );
889 #endif /* ndef __VEX_HOST_AMD64_DEFS_H */
891 /*---------------------------------------------------------------*/
892 /*--- end host_amd64_defs.h ---*/
893 /*---------------------------------------------------------------*/