2 /*---------------------------------------------------------------*/
3 /*--- begin host_arm64_defs.h ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2013-2017 OpenWorks
13 This program is free software; you can redistribute it and/or
14 modify it under the terms of the GNU General Public License as
15 published by the Free Software Foundation; either version 2 of the
16 License, or (at your option) any later version.
18 This program is distributed in the hope that it will be useful, but
19 WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with this program; if not, write to the Free Software
25 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
28 The GNU General Public License is contained in the file COPYING.
31 #ifndef __VEX_HOST_ARM64_DEFS_H
32 #define __VEX_HOST_ARM64_DEFS_H
34 #include "libvex_basictypes.h"
35 #include "libvex.h" // VexArch
36 #include "host_generic_regs.h" // HReg
39 /* --------- Registers. --------- */
41 #define ST_IN static inline
42 ST_IN HReg
hregARM64_X22 ( void ) { return mkHReg(False
, HRcInt64
, 22, 0); }
43 ST_IN HReg
hregARM64_X23 ( void ) { return mkHReg(False
, HRcInt64
, 23, 1); }
44 ST_IN HReg
hregARM64_X24 ( void ) { return mkHReg(False
, HRcInt64
, 24, 2); }
45 ST_IN HReg
hregARM64_X25 ( void ) { return mkHReg(False
, HRcInt64
, 25, 3); }
46 ST_IN HReg
hregARM64_X26 ( void ) { return mkHReg(False
, HRcInt64
, 26, 4); }
47 ST_IN HReg
hregARM64_X27 ( void ) { return mkHReg(False
, HRcInt64
, 27, 5); }
48 ST_IN HReg
hregARM64_X28 ( void ) { return mkHReg(False
, HRcInt64
, 28, 6); }
50 ST_IN HReg
hregARM64_X0 ( void ) { return mkHReg(False
, HRcInt64
, 0, 7); }
51 ST_IN HReg
hregARM64_X1 ( void ) { return mkHReg(False
, HRcInt64
, 1, 8); }
52 ST_IN HReg
hregARM64_X2 ( void ) { return mkHReg(False
, HRcInt64
, 2, 9); }
53 ST_IN HReg
hregARM64_X3 ( void ) { return mkHReg(False
, HRcInt64
, 3, 10); }
54 ST_IN HReg
hregARM64_X4 ( void ) { return mkHReg(False
, HRcInt64
, 4, 11); }
55 ST_IN HReg
hregARM64_X5 ( void ) { return mkHReg(False
, HRcInt64
, 5, 12); }
56 ST_IN HReg
hregARM64_X6 ( void ) { return mkHReg(False
, HRcInt64
, 6, 13); }
57 ST_IN HReg
hregARM64_X7 ( void ) { return mkHReg(False
, HRcInt64
, 7, 14); }
59 ST_IN HReg
hregARM64_Q16 ( void ) { return mkHReg(False
, HRcVec128
, 16, 15); }
60 ST_IN HReg
hregARM64_Q17 ( void ) { return mkHReg(False
, HRcVec128
, 17, 16); }
61 ST_IN HReg
hregARM64_Q18 ( void ) { return mkHReg(False
, HRcVec128
, 18, 17); }
62 ST_IN HReg
hregARM64_Q19 ( void ) { return mkHReg(False
, HRcVec128
, 19, 18); }
63 ST_IN HReg
hregARM64_Q20 ( void ) { return mkHReg(False
, HRcVec128
, 20, 19); }
65 ST_IN HReg
hregARM64_D8 ( void ) { return mkHReg(False
, HRcFlt64
, 8, 20); }
66 ST_IN HReg
hregARM64_D9 ( void ) { return mkHReg(False
, HRcFlt64
, 9, 21); }
67 ST_IN HReg
hregARM64_D10 ( void ) { return mkHReg(False
, HRcFlt64
, 10, 22); }
68 ST_IN HReg
hregARM64_D11 ( void ) { return mkHReg(False
, HRcFlt64
, 11, 23); }
69 ST_IN HReg
hregARM64_D12 ( void ) { return mkHReg(False
, HRcFlt64
, 12, 24); }
70 ST_IN HReg
hregARM64_D13 ( void ) { return mkHReg(False
, HRcFlt64
, 13, 25); }
72 ST_IN HReg
hregARM64_X8 ( void ) { return mkHReg(False
, HRcInt64
, 8, 26); }
73 ST_IN HReg
hregARM64_X9 ( void ) { return mkHReg(False
, HRcInt64
, 9, 27); }
74 ST_IN HReg
hregARM64_X21 ( void ) { return mkHReg(False
, HRcInt64
, 21, 28); }
77 extern UInt
ppHRegARM64 ( HReg
);
79 /* Number of registers used arg passing in function calls */
80 #define ARM64_N_ARGREGS 8 /* x0 .. x7 */
83 /* --------- Condition codes. --------- */
87 ARM64cc_EQ
= 0, /* equal : Z=1 */
88 ARM64cc_NE
= 1, /* not equal : Z=0 */
90 ARM64cc_CS
= 2, /* >=u (higher or same) : C=1 */
91 ARM64cc_CC
= 3, /* <u (lower) : C=0 */
93 ARM64cc_MI
= 4, /* minus (negative) : N=1 */
94 ARM64cc_PL
= 5, /* plus (zero or +ve) : N=0 */
96 ARM64cc_VS
= 6, /* overflow : V=1 */
97 ARM64cc_VC
= 7, /* no overflow : V=0 */
99 ARM64cc_HI
= 8, /* >u (higher) : C=1 && Z=0 */
100 ARM64cc_LS
= 9, /* <=u (lower or same) : !(C=1 && Z=0) */
102 ARM64cc_GE
= 10, /* >=s (signed greater or equal) : N=V */
103 ARM64cc_LT
= 11, /* <s (signed less than) : !(N=V) */
105 ARM64cc_GT
= 12, /* >s (signed greater) : Z=0 && N=V */
106 ARM64cc_LE
= 13, /* <=s (signed less or equal) : !(Z=0 && N=V) */
108 ARM64cc_AL
= 14, /* always (unconditional) */
109 ARM64cc_NV
= 15 /* in 64-bit mode also means "always" */
114 /* --------- Memory address expressions (amodes). --------- */
118 ARM64am_RI9
=10, /* reg + simm9 */
119 ARM64am_RI12
, /* reg + uimm12 * szB (iow, scaled by access size) */
120 ARM64am_RR
/* reg1 + reg2 */
130 Int simm9
; /* -256 .. +255 */
134 UInt uimm12
; /* 0 .. 4095 */
135 UChar szB
; /* 1, 2, 4, 8 (16 ?) */
145 extern ARM64AMode
* ARM64AMode_RI9 ( HReg reg
, Int simm9
);
146 extern ARM64AMode
* ARM64AMode_RI12 ( HReg reg
, Int uimm12
, UChar szB
);
147 extern ARM64AMode
* ARM64AMode_RR ( HReg base
, HReg index
);
150 /* --------- Reg or uimm12 or (uimm12 << 12) operands --------- */
154 ARM64riA_I12
=20, /* uimm12 << 0 or 12 only */
164 UShort imm12
; /* 0 .. 4095 */
165 UChar shift
; /* 0 or 12 only */
174 extern ARM64RIA
* ARM64RIA_I12 ( UShort imm12
, UChar shift
);
175 extern ARM64RIA
* ARM64RIA_R ( HReg
);
178 /* --------- Reg or "bitfield" (logic immediate) operands --------- */
182 ARM64riL_I13
=6, /* wierd-o bitfield immediate, 13 bits in total */
192 UChar bitN
; /* 0 .. 1 */
193 UChar immR
; /* 0 .. 63 */
194 UChar immS
; /* 0 .. 63 */
203 extern ARM64RIL
* ARM64RIL_I13 ( UChar bitN
, UChar immR
, UChar immS
);
204 extern ARM64RIL
* ARM64RIL_R ( HReg
);
207 /* --------------- Reg or uimm6 operands --------------- */
211 ARM64ri6_I6
=30, /* uimm6, 1 .. 63 only */
221 UInt imm6
; /* 1 .. 63 */
230 extern ARM64RI6
* ARM64RI6_I6 ( UInt imm6
);
231 extern ARM64RI6
* ARM64RI6_R ( HReg
);
234 /* --------------------- Instructions --------------------- */
262 ARM64mul_PLAIN
=70, /* lo64(64 * 64) */
263 ARM64mul_ZX
, /* hi64(64 *u 64) */
264 ARM64mul_SX
/* hi64(64 *s 64) */
269 /* These characterise an integer-FP conversion, but don't imply any
270 particular direction. */
272 ARM64cvt_F32_I32S
=80,
307 ARM64vecb_ADD64x2
=120, ARM64vecb_ADD32x4
,
308 ARM64vecb_ADD16x8
, ARM64vecb_ADD8x16
,
309 ARM64vecb_SUB64x2
, ARM64vecb_SUB32x4
,
310 ARM64vecb_SUB16x8
, ARM64vecb_SUB8x16
,
312 ARM64vecb_MUL16x8
, ARM64vecb_MUL8x16
,
313 ARM64vecb_FADD64x2
, ARM64vecb_FADD32x4
,
314 ARM64vecb_FSUB64x2
, ARM64vecb_FSUB32x4
,
315 ARM64vecb_FMUL64x2
, ARM64vecb_FMUL32x4
,
316 ARM64vecb_FDIV64x2
, ARM64vecb_FDIV32x4
,
317 ARM64vecb_FMAX64x2
, ARM64vecb_FMAX32x4
,
318 ARM64vecb_FMIN64x2
, ARM64vecb_FMIN32x4
,
320 ARM64vecb_UMAX16x8
, ARM64vecb_UMAX8x16
,
322 ARM64vecb_UMIN16x8
, ARM64vecb_UMIN8x16
,
324 ARM64vecb_SMAX16x8
, ARM64vecb_SMAX8x16
,
326 ARM64vecb_SMIN16x8
, ARM64vecb_SMIN8x16
,
330 ARM64vecb_CMEQ64x2
, ARM64vecb_CMEQ32x4
,
331 ARM64vecb_CMEQ16x8
, ARM64vecb_CMEQ8x16
,
332 ARM64vecb_CMHI64x2
, ARM64vecb_CMHI32x4
, /* >u */
333 ARM64vecb_CMHI16x8
, ARM64vecb_CMHI8x16
,
334 ARM64vecb_CMGT64x2
, ARM64vecb_CMGT32x4
, /* >s */
335 ARM64vecb_CMGT16x8
, ARM64vecb_CMGT8x16
,
336 ARM64vecb_FCMEQ64x2
, ARM64vecb_FCMEQ32x4
,
337 ARM64vecb_FCMGE64x2
, ARM64vecb_FCMGE32x4
,
338 ARM64vecb_FCMGT64x2
, ARM64vecb_FCMGT32x4
,
340 ARM64vecb_UZP164x2
, ARM64vecb_UZP132x4
,
341 ARM64vecb_UZP116x8
, ARM64vecb_UZP18x16
,
342 ARM64vecb_UZP264x2
, ARM64vecb_UZP232x4
,
343 ARM64vecb_UZP216x8
, ARM64vecb_UZP28x16
,
344 ARM64vecb_ZIP132x4
, ARM64vecb_ZIP116x8
,
345 ARM64vecb_ZIP18x16
, ARM64vecb_ZIP232x4
,
346 ARM64vecb_ZIP216x8
, ARM64vecb_ZIP28x16
,
350 ARM64vecb_UMULL4SHH
, ARM64vecb_UMULL8HBB
,
352 ARM64vecb_SMULL4SHH
, ARM64vecb_SMULL8HBB
,
353 ARM64vecb_SQADD64x2
, ARM64vecb_SQADD32x4
,
354 ARM64vecb_SQADD16x8
, ARM64vecb_SQADD8x16
,
355 ARM64vecb_UQADD64x2
, ARM64vecb_UQADD32x4
,
356 ARM64vecb_UQADD16x8
, ARM64vecb_UQADD8x16
,
357 ARM64vecb_SQSUB64x2
, ARM64vecb_SQSUB32x4
,
358 ARM64vecb_SQSUB16x8
, ARM64vecb_SQSUB8x16
,
359 ARM64vecb_UQSUB64x2
, ARM64vecb_UQSUB32x4
,
360 ARM64vecb_UQSUB16x8
, ARM64vecb_UQSUB8x16
,
361 ARM64vecb_SQDMULL2DSS
,
362 ARM64vecb_SQDMULL4SHH
,
363 ARM64vecb_SQDMULH32x4
,
364 ARM64vecb_SQDMULH16x8
,
365 ARM64vecb_SQRDMULH32x4
,
366 ARM64vecb_SQRDMULH16x8
,
367 ARM64vecb_SQSHL64x2
, ARM64vecb_SQSHL32x4
,
368 ARM64vecb_SQSHL16x8
, ARM64vecb_SQSHL8x16
,
369 ARM64vecb_UQSHL64x2
, ARM64vecb_UQSHL32x4
,
370 ARM64vecb_UQSHL16x8
, ARM64vecb_UQSHL8x16
,
371 ARM64vecb_SQRSHL64x2
, ARM64vecb_SQRSHL32x4
,
372 ARM64vecb_SQRSHL16x8
, ARM64vecb_SQRSHL8x16
,
373 ARM64vecb_UQRSHL64x2
, ARM64vecb_UQRSHL32x4
,
374 ARM64vecb_UQRSHL16x8
, ARM64vecb_UQRSHL8x16
,
375 ARM64vecb_SSHL64x2
, ARM64vecb_SSHL32x4
,
376 ARM64vecb_SSHL16x8
, ARM64vecb_SSHL8x16
,
377 ARM64vecb_USHL64x2
, ARM64vecb_USHL32x4
,
378 ARM64vecb_USHL16x8
, ARM64vecb_USHL8x16
,
379 ARM64vecb_SRSHL64x2
, ARM64vecb_SRSHL32x4
,
380 ARM64vecb_SRSHL16x8
, ARM64vecb_SRSHL8x16
,
381 ARM64vecb_URSHL64x2
, ARM64vecb_URSHL32x4
,
382 ARM64vecb_URSHL16x8
, ARM64vecb_URSHL8x16
,
383 ARM64vecb_FRECPS64x2
, ARM64vecb_FRECPS32x4
,
384 ARM64vecb_FRSQRTS64x2
, ARM64vecb_FRSQRTS32x4
,
391 ARM64vecmo_SUQADD64x2
=300, ARM64vecmo_SUQADD32x4
,
392 ARM64vecmo_SUQADD16x8
, ARM64vecmo_SUQADD8x16
,
393 ARM64vecmo_USQADD64x2
, ARM64vecmo_USQADD32x4
,
394 ARM64vecmo_USQADD16x8
, ARM64vecmo_USQADD8x16
,
401 ARM64vecu_FNEG64x2
=350, ARM64vecu_FNEG32x4
,
402 ARM64vecu_FABS64x2
, ARM64vecu_FABS32x4
,
404 ARM64vecu_ABS64x2
, ARM64vecu_ABS32x4
,
405 ARM64vecu_ABS16x8
, ARM64vecu_ABS8x16
,
406 ARM64vecu_CLS32x4
, ARM64vecu_CLS16x8
, ARM64vecu_CLS8x16
,
407 ARM64vecu_CLZ32x4
, ARM64vecu_CLZ16x8
, ARM64vecu_CLZ8x16
,
411 ARM64vecu_REV3216B
, ARM64vecu_REV328H
,
412 ARM64vecu_REV6416B
, ARM64vecu_REV648H
, ARM64vecu_REV644S
,
413 ARM64vecu_URECPE32x4
,
414 ARM64vecu_URSQRTE32x4
,
415 ARM64vecu_FRECPE64x2
, ARM64vecu_FRECPE32x4
,
416 ARM64vecu_FRSQRTE64x2
, ARM64vecu_FRSQRTE32x4
,
417 ARM64vecu_FSQRT64x2
, ARM64vecu_FSQRT32x4
,
424 ARM64vecshi_USHR64x2
=400, ARM64vecshi_USHR32x4
,
425 ARM64vecshi_USHR16x8
, ARM64vecshi_USHR8x16
,
426 ARM64vecshi_SSHR64x2
, ARM64vecshi_SSHR32x4
,
427 ARM64vecshi_SSHR16x8
, ARM64vecshi_SSHR8x16
,
428 ARM64vecshi_SHL64x2
, ARM64vecshi_SHL32x4
,
429 ARM64vecshi_SHL16x8
, ARM64vecshi_SHL8x16
,
430 /* These narrowing shifts zero out the top half of the destination
432 ARM64vecshi_SQSHRN2SD
, ARM64vecshi_SQSHRN4HS
, ARM64vecshi_SQSHRN8BH
,
433 ARM64vecshi_UQSHRN2SD
, ARM64vecshi_UQSHRN4HS
, ARM64vecshi_UQSHRN8BH
,
434 ARM64vecshi_SQSHRUN2SD
, ARM64vecshi_SQSHRUN4HS
, ARM64vecshi_SQSHRUN8BH
,
435 ARM64vecshi_SQRSHRN2SD
, ARM64vecshi_SQRSHRN4HS
, ARM64vecshi_SQRSHRN8BH
,
436 ARM64vecshi_UQRSHRN2SD
, ARM64vecshi_UQRSHRN4HS
, ARM64vecshi_UQRSHRN8BH
,
437 ARM64vecshi_SQRSHRUN2SD
, ARM64vecshi_SQRSHRUN4HS
, ARM64vecshi_SQRSHRUN8BH
,
438 /* Saturating left shifts, of various flavours. */
439 ARM64vecshi_UQSHL64x2
, ARM64vecshi_UQSHL32x4
,
440 ARM64vecshi_UQSHL16x8
, ARM64vecshi_UQSHL8x16
,
441 ARM64vecshi_SQSHL64x2
, ARM64vecshi_SQSHL32x4
,
442 ARM64vecshi_SQSHL16x8
, ARM64vecshi_SQSHL8x16
,
443 ARM64vecshi_SQSHLU64x2
, ARM64vecshi_SQSHLU32x4
,
444 ARM64vecshi_SQSHLU16x8
, ARM64vecshi_SQSHLU8x16
,
468 ARM64in_MovI
, /* int reg-reg move */
471 ARM64in_LdSt32
, /* w/ ZX loads */
472 ARM64in_LdSt16
, /* w/ ZX loads */
473 ARM64in_LdSt8
, /* w/ ZX loads */
474 ARM64in_XDirect
, /* direct transfer to GA */
475 ARM64in_XIndir
, /* indirect transfer to GA */
476 ARM64in_XAssisted
, /* assisted transfer to GA */
479 ARM64in_AddToSP
, /* move SP by small, signed constant */
480 ARM64in_FromSP
, /* move SP to integer register */
487 /* ARM64in_V*: scalar ops involving vector registers */
488 ARM64in_VLdStH
, /* ld/st to/from low 16 bits of vec reg, imm offset */
489 ARM64in_VLdStS
, /* ld/st to/from low 32 bits of vec reg, imm offset */
490 ARM64in_VLdStD
, /* ld/st to/from low 64 bits of vec reg, imm offset */
491 ARM64in_VLdStQ
, /* ld/st to/from all 128 bits of vec reg, no offset */
494 ARM64in_VCvtSD
, /* scalar 32 bit FP <--> 64 bit FP */
495 ARM64in_VCvtHS
, /* scalar 16 bit FP <--> 32 bit FP */
496 ARM64in_VCvtHD
, /* scalar 16 bit FP <--> 64 bit FP */
506 /* ARM64in_V*V: vector ops on vector registers */
514 ARM64in_VDfromX
, /* Move an Xreg to a Dreg */
515 ARM64in_VQfromX
, /* Move an Xreg to a Qreg lo64, and zero hi64 */
516 ARM64in_VQfromXX
, /* Move 2 Xregs to a Qreg */
517 ARM64in_VXfromQ
, /* Move half a Qreg to an Xreg */
518 ARM64in_VXfromDorS
, /* Move Dreg or Sreg(ZX) to an Xreg */
519 ARM64in_VMov
, /* vector reg-reg move, 16, 8 or 4 bytes */
521 ARM64in_EvCheck
, /* Event check */
522 ARM64in_ProfInc
/* 64-bit profile counter increment */
526 /* Destinations are on the LEFT (first operand) */
532 /* --- INTEGER INSTRUCTIONS --- */
533 /* 64 bit ADD/SUB reg, reg or uimm12<<{0,12} */
540 /* 64 or 32 bit CMP reg, reg or aimm (SUB and set flags) */
546 /* 64 bit AND/OR/XOR reg, reg or bitfield-immediate */
553 /* 64 bit TST reg, reg or bimm (AND and set flags) */
558 /* 64 bit SHL/SHR/SAR, 2nd arg is reg or imm */
565 /* NOT/NEG/CLZ, 64 bit only */
571 /* MOV dst, src -- reg-reg move for integer registers */
576 /* Pseudo-insn; make a 64-bit immediate */
581 /* 64-bit load or store */
587 /* zx-32-to-64-bit load, or 32-bit store */
593 /* zx-16-to-64-bit load, or 16-bit store */
599 /* zx-8-to-64-bit load, or 8-bit store */
605 /* Update the guest PC value, then exit requesting to chain
606 to it. May be conditional. Urr, use of Addr64 implicitly
607 assumes that wordsize(guest) == wordsize(host). */
609 Addr64 dstGA
; /* next guest address */
610 ARM64AMode
* amPC
; /* amode in guest state for PC */
611 ARM64CondCode cond
; /* can be ARM64cc_AL */
612 Bool toFastEP
; /* chain to the slow or fast point? */
614 /* Boring transfer to a guest address not known at JIT time.
615 Not chainable. May be conditional. */
619 ARM64CondCode cond
; /* can be ARM64cc_AL */
621 /* Assisted transfer to a guest address, most general case.
622 Not chainable. May be conditional. */
626 ARM64CondCode cond
; /* can be ARM64cc_AL */
629 /* CSEL: dst = if cond then argL else argR. cond may be anything. */
636 /* Pseudo-insn. Call target (an absolute address), on given
637 condition (which could be ARM64cc_AL). */
639 RetLoc rloc
; /* where the return value will be */
642 Int nArgRegs
; /* # regs carrying args: 0 .. 8 */
644 /* move SP by small, signed constant */
646 Int simm
; /* needs to be 0 % 16 and in the range -4095
649 /* move SP to integer register */
653 /* Integer multiply, with 3 variants:
654 (PLAIN) lo64(64 * 64)
664 /* LDXR{,H,B} x2, [x4] */
666 Int szB
; /* 1, 2, 4 or 8 */
668 /* STXR{,H,B} w0, x2, [x4] */
670 Int szB
; /* 1, 2, 4 or 8 */
672 /* x1 = CAS(x3(addr), x5(expected) -> x7(new)),
673 where x1[8*szB-1 : 0] == x5[8*szB-1 : 0] indicates success,
674 x1[8*szB-1 : 0] != x5[8*szB-1 : 0] indicates failure.
675 Uses x8 as scratch (but that's not allocatable).
676 Hence: RD x3, x5, x7; WR x1
679 (szB=4) and x8, x5, #0xFFFFFFFF
680 (szB=2) and x8, x5, #0xFFFF
681 (szB=1) and x8, x5, #0xFF
682 -- x8 is correctly zero-extended expected value
684 -- x1 is correctly zero-extended actual value
687 -- if branch taken, failure; x1[[8*szB-1 : 0] holds old value
690 -- if store successful, x1==0, so the eor is "x1 := x5"
691 -- if store failed, x1==1, so the eor makes x1 != x5
696 Int szB
; /* 1, 2, 4 or 8 */
698 /* Mem fence. An insn which fences all loads and stores as
699 much as possible before continuing. On ARM64 we emit the
700 sequence "dsb sy ; dmb sy ; isb sy", which is probably
701 total nuclear overkill, but better safe than sorry. */
704 /* A CLREX instruction. */
707 /* --- INSTRUCTIONS INVOLVING VECTOR REGISTERS --- */
708 /* ld/st to/from low 16 bits of vec reg, imm offset */
713 UInt uimm12
; /* 0 .. 8190 inclusive, 0 % 2 */
715 /* ld/st to/from low 32 bits of vec reg, imm offset */
720 UInt uimm12
; /* 0 .. 16380 inclusive, 0 % 4 */
722 /* ld/st to/from low 64 bits of vec reg, imm offset */
727 UInt uimm12
; /* 0 .. 32760 inclusive, 0 % 8 */
729 /* ld/st to/from all 128 bits of vec reg, no offset */
735 /* Scalar conversion of int to float. */
738 HReg rD
; // dst, a D or S register
739 HReg rS
; // src, a W or X register
741 /* Scalar conversion of float to int, w/ specified RM. */
744 HReg rD
; // dst, a W or X register
745 HReg rS
; // src, a D or S register
746 UChar armRM
; // ARM encoded RM:
747 // 00=nearest, 01=+inf, 10=-inf, 11=zero
749 /* Convert between 32-bit and 64-bit FP values (both ways). (FCVT) */
751 Bool sToD
; /* True: F32->F64. False: F64->F32 */
755 /* Convert between 16-bit and 32-bit FP values (both ways). (FCVT) */
757 Bool hToS
; /* True: F16->F32. False: F32->F16 */
761 /* Convert between 16-bit and 64-bit FP values (both ways). (FCVT) */
763 Bool hToD
; /* True: F16->F64. False: F64->F16 */
767 /* 64-bit FP unary */
773 /* 32-bit FP unary */
779 /* 64-bit FP binary arithmetic */
786 /* 32-bit FP binary arithmetic */
793 /* 64-bit FP compare */
798 /* 32-bit FP compare */
803 /* 32- or 64-bit FP conditional select */
812 /* Move a 32-bit value to/from the FPCR */
817 /* Move a 32-bit value to/from the FPSR */
822 /* binary vector operation on vector registers */
829 /* binary vector operation on vector registers.
830 Dst reg is also a src. */
836 /* unary vector operation on vector registers */
842 /* vector narrowing, Q -> Q. Result goes in the bottom half
843 of dst and the top half is zeroed out. Iow one of the
847 UInt dszBlg2
; // 0: 16to8_x8 1: 32to16_x4 2: 64to32_x2
851 /* Vector shift by immediate. For left shifts, |amt| must be
852 >= 0 and < implied lane size of |op|. For right shifts,
853 |amt| must be > 0 and <= implied lane size of |op|. Shifts
854 beyond these ranges are not allowed. */
856 ARM64VecShiftImmOp op
;
869 UShort imm
; /* Same 1-bit-per-byte encoding as IR */
887 UInt laneNo
; /* either 0 or 1 */
894 /* MOV dst, src -- reg-reg move for vector registers */
896 UInt szB
; // 16=mov qD,qS; 8=mov dD,dS; 4=mov sD,sS
901 ARM64AMode
* amCounter
;
902 ARM64AMode
* amFailAddr
;
905 /* No fields. The address of the counter to inc is
906 installed later, post-translation, by patching it in,
907 as it is not known at translation time. */
914 extern ARM64Instr
* ARM64Instr_Arith ( HReg
, HReg
, ARM64RIA
*, Bool isAdd
);
915 extern ARM64Instr
* ARM64Instr_Cmp ( HReg
, ARM64RIA
*, Bool is64
);
916 extern ARM64Instr
* ARM64Instr_Logic ( HReg
, HReg
, ARM64RIL
*, ARM64LogicOp
);
917 extern ARM64Instr
* ARM64Instr_Test ( HReg
, ARM64RIL
* );
918 extern ARM64Instr
* ARM64Instr_Shift ( HReg
, HReg
, ARM64RI6
*, ARM64ShiftOp
);
919 extern ARM64Instr
* ARM64Instr_Unary ( HReg
, HReg
, ARM64UnaryOp
);
920 extern ARM64Instr
* ARM64Instr_MovI ( HReg
, HReg
);
921 extern ARM64Instr
* ARM64Instr_Imm64 ( HReg
, ULong
);
922 extern ARM64Instr
* ARM64Instr_LdSt64 ( Bool isLoad
, HReg
, ARM64AMode
* );
923 extern ARM64Instr
* ARM64Instr_LdSt32 ( Bool isLoad
, HReg
, ARM64AMode
* );
924 extern ARM64Instr
* ARM64Instr_LdSt16 ( Bool isLoad
, HReg
, ARM64AMode
* );
925 extern ARM64Instr
* ARM64Instr_LdSt8 ( Bool isLoad
, HReg
, ARM64AMode
* );
926 extern ARM64Instr
* ARM64Instr_XDirect ( Addr64 dstGA
, ARM64AMode
* amPC
,
927 ARM64CondCode cond
, Bool toFastEP
);
928 extern ARM64Instr
* ARM64Instr_XIndir ( HReg dstGA
, ARM64AMode
* amPC
,
929 ARM64CondCode cond
);
930 extern ARM64Instr
* ARM64Instr_XAssisted ( HReg dstGA
, ARM64AMode
* amPC
,
931 ARM64CondCode cond
, IRJumpKind jk
);
932 extern ARM64Instr
* ARM64Instr_CSel ( HReg dst
, HReg argL
, HReg argR
,
933 ARM64CondCode cond
);
934 extern ARM64Instr
* ARM64Instr_Call ( ARM64CondCode
, Addr64
, Int nArgRegs
,
936 extern ARM64Instr
* ARM64Instr_AddToSP ( Int simm
);
937 extern ARM64Instr
* ARM64Instr_FromSP ( HReg dst
);
938 extern ARM64Instr
* ARM64Instr_Mul ( HReg dst
, HReg argL
, HReg argR
,
940 extern ARM64Instr
* ARM64Instr_LdrEX ( Int szB
);
941 extern ARM64Instr
* ARM64Instr_StrEX ( Int szB
);
942 extern ARM64Instr
* ARM64Instr_CAS ( Int szB
);
943 extern ARM64Instr
* ARM64Instr_MFence ( void );
944 extern ARM64Instr
* ARM64Instr_ClrEX ( void );
945 extern ARM64Instr
* ARM64Instr_VLdStH ( Bool isLoad
, HReg sD
, HReg rN
,
946 UInt uimm12
/* 0 .. 8190, 0 % 2 */ );
947 extern ARM64Instr
* ARM64Instr_VLdStS ( Bool isLoad
, HReg sD
, HReg rN
,
948 UInt uimm12
/* 0 .. 16380, 0 % 4 */ );
949 extern ARM64Instr
* ARM64Instr_VLdStD ( Bool isLoad
, HReg dD
, HReg rN
,
950 UInt uimm12
/* 0 .. 32760, 0 % 8 */ );
951 extern ARM64Instr
* ARM64Instr_VLdStQ ( Bool isLoad
, HReg rQ
, HReg rN
);
952 extern ARM64Instr
* ARM64Instr_VCvtI2F ( ARM64CvtOp how
, HReg rD
, HReg rS
);
953 extern ARM64Instr
* ARM64Instr_VCvtF2I ( ARM64CvtOp how
, HReg rD
, HReg rS
,
955 extern ARM64Instr
* ARM64Instr_VCvtSD ( Bool sToD
, HReg dst
, HReg src
);
956 extern ARM64Instr
* ARM64Instr_VCvtHS ( Bool hToS
, HReg dst
, HReg src
);
957 extern ARM64Instr
* ARM64Instr_VCvtHD ( Bool hToD
, HReg dst
, HReg src
);
958 extern ARM64Instr
* ARM64Instr_VUnaryD ( ARM64FpUnaryOp op
, HReg dst
, HReg src
);
959 extern ARM64Instr
* ARM64Instr_VUnaryS ( ARM64FpUnaryOp op
, HReg dst
, HReg src
);
960 extern ARM64Instr
* ARM64Instr_VBinD ( ARM64FpBinOp op
, HReg
, HReg
, HReg
);
961 extern ARM64Instr
* ARM64Instr_VBinS ( ARM64FpBinOp op
, HReg
, HReg
, HReg
);
962 extern ARM64Instr
* ARM64Instr_VCmpD ( HReg argL
, HReg argR
);
963 extern ARM64Instr
* ARM64Instr_VCmpS ( HReg argL
, HReg argR
);
964 extern ARM64Instr
* ARM64Instr_VFCSel ( HReg dst
, HReg argL
, HReg argR
,
965 ARM64CondCode cond
, Bool isD
);
966 extern ARM64Instr
* ARM64Instr_FPCR ( Bool toFPCR
, HReg iReg
);
967 extern ARM64Instr
* ARM64Instr_FPSR ( Bool toFPSR
, HReg iReg
);
968 extern ARM64Instr
* ARM64Instr_VBinV ( ARM64VecBinOp op
, HReg
, HReg
, HReg
);
969 extern ARM64Instr
* ARM64Instr_VModifyV ( ARM64VecModifyOp
, HReg
, HReg
);
970 extern ARM64Instr
* ARM64Instr_VUnaryV ( ARM64VecUnaryOp op
, HReg
, HReg
);
971 extern ARM64Instr
* ARM64Instr_VNarrowV ( ARM64VecNarrowOp op
, UInt dszBlg2
,
972 HReg dst
, HReg src
);
973 extern ARM64Instr
* ARM64Instr_VShiftImmV ( ARM64VecShiftImmOp op
,
974 HReg dst
, HReg src
, UInt amt
);
975 extern ARM64Instr
* ARM64Instr_VExtV ( HReg dst
,
976 HReg srcLo
, HReg srcHi
, UInt amtB
);
977 extern ARM64Instr
* ARM64Instr_VImmQ ( HReg
, UShort
);
978 extern ARM64Instr
* ARM64Instr_VDfromX ( HReg rD
, HReg rX
);
979 extern ARM64Instr
* ARM64Instr_VQfromX ( HReg rQ
, HReg rXlo
);
980 extern ARM64Instr
* ARM64Instr_VQfromXX( HReg rQ
, HReg rXhi
, HReg rXlo
);
981 extern ARM64Instr
* ARM64Instr_VXfromQ ( HReg rX
, HReg rQ
, UInt laneNo
);
982 extern ARM64Instr
* ARM64Instr_VXfromDorS ( HReg rX
, HReg rDorS
, Bool fromD
);
983 extern ARM64Instr
* ARM64Instr_VMov ( UInt szB
, HReg dst
, HReg src
);
985 extern ARM64Instr
* ARM64Instr_EvCheck ( ARM64AMode
* amCounter
,
986 ARM64AMode
* amFailAddr
);
987 extern ARM64Instr
* ARM64Instr_ProfInc ( void );
989 extern void ppARM64Instr ( const ARM64Instr
* );
992 /* Some functions that insulate the register allocator from details
993 of the underlying instruction set. */
994 extern void getRegUsage_ARM64Instr ( HRegUsage
*, const ARM64Instr
*, Bool
);
995 extern void mapRegs_ARM64Instr ( HRegRemap
*, ARM64Instr
*, Bool
);
996 extern Int
emit_ARM64Instr ( /*MB_MOD*/Bool
* is_profInc
,
997 UChar
* buf
, Int nbuf
, const ARM64Instr
* i
,
999 VexEndness endness_host
,
1000 const void* disp_cp_chain_me_to_slowEP
,
1001 const void* disp_cp_chain_me_to_fastEP
,
1002 const void* disp_cp_xindir
,
1003 const void* disp_cp_xassisted
);
1005 extern void genSpill_ARM64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
1006 HReg rreg
, Int offset
, Bool
);
1007 extern void genReload_ARM64 ( /*OUT*/HInstr
** i1
, /*OUT*/HInstr
** i2
,
1008 HReg rreg
, Int offset
, Bool
);
1009 extern ARM64Instr
* genMove_ARM64(HReg from
, HReg to
, Bool
);
1011 extern const RRegUniverse
* getRRegUniverse_ARM64 ( void );
1013 extern HInstrArray
* iselSB_ARM64 ( const IRSB
*,
1017 Int offs_Host_EvC_Counter
,
1018 Int offs_Host_EvC_FailAddr
,
1019 Bool chainingAllowed
,
1023 /* How big is an event check? This is kind of a kludge because it
1024 depends on the offsets of host_EvC_FAILADDR and
1025 host_EvC_COUNTER. */
1026 extern Int
evCheckSzB_ARM64 (void);
1028 /* Perform a chaining and unchaining of an XDirect jump. */
1029 extern VexInvalRange
chainXDirect_ARM64 ( VexEndness endness_host
,
1030 void* place_to_chain
,
1031 const void* disp_cp_chain_me_EXPECTED
,
1032 const void* place_to_jump_to
);
1034 extern VexInvalRange
unchainXDirect_ARM64 ( VexEndness endness_host
,
1035 void* place_to_unchain
,
1036 const void* place_to_jump_to_EXPECTED
,
1037 const void* disp_cp_chain_me
);
1039 /* Patch the counter location into an existing ProfInc point. */
1040 extern VexInvalRange
patchProfInc_ARM64 ( VexEndness endness_host
,
1041 void* place_to_patch
,
1042 const ULong
* location_of_counter
);
1045 #endif /* ndef __VEX_HOST_ARM64_DEFS_H */
1047 /*---------------------------------------------------------------*/
1048 /*--- end host_arm64_defs.h ---*/
1049 /*---------------------------------------------------------------*/