2 /*--------------------------------------------------------------------*/
3 /*--- begin guest_mips_toIR.c ---*/
4 /*--------------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2010-2017 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 /* Translates MIPS code to IR. */
30 #include "libvex_basictypes.h"
31 #include "libvex_ir.h"
33 #include "libvex_guest_mips32.h"
34 #include "libvex_guest_mips64.h"
36 #include "main_util.h"
37 #include "main_globals.h"
38 #include "guest_generic_bb_to_IR.h"
39 #include "guest_mips_defs.h"
40 #include "mips_defs.h"
42 /*------------------------------------------------------------*/
44 /*------------------------------------------------------------*/
46 /* These are set at the start of the translation of a instruction, so
47 that we don't have to pass them around endlessly. CONST means does
48 not change during translation of the instruction. */
50 /* CONST: what is the host's endianness? This has to do with float vs
51 double register accesses on VFP, but it's complex and not properly
53 static VexEndness host_endness
;
55 /* Pointer to the guest code area. */
56 const UChar
*guest_code
;
58 /* CONST: The guest address for the instruction currently being
60 #if defined(VGP_mips32_linux)
61 static Addr32 guest_PC_curr_instr
;
63 static Addr64 guest_PC_curr_instr
;
66 /* MOD: The IRSB* into which we're generating code. */
69 /* Is our guest binary 32 or 64bit? Set at each call to
70 disInstr_MIPS below. */
73 /* CPU has FPU and 32 dbl. prec. FP registers. */
74 static Bool fp_mode64
= False
;
76 /* FPU works in FRE mode */
77 static Bool fp_mode64_fre
= False
;
79 /* CPU has MSA unit */
80 static Bool has_msa
= False
;
82 /* Define 1.0 in single and double precision. */
83 #define ONE_SINGLE 0x3F800000
84 #define ONE_DOUBLE 0x3FF0000000000000ULL
86 /*------------------------------------------------------------*/
87 /*--- Helper bits and pieces for deconstructing the ---*/
88 /*--- mips insn stream. ---*/
89 /*------------------------------------------------------------*/
91 /* ---------------- Integer registers ---------------- */
93 static UInt
integerGuestRegOffset(UInt iregNo
)
95 /* Do we care about endianness here? We do if sub-parts of integer
96 registers are accessed, but I don't think that ever happens on
103 ret
= offsetof(VexGuestMIPS32State
, guest_r0
);
107 ret
= offsetof(VexGuestMIPS32State
, guest_r1
);
111 ret
= offsetof(VexGuestMIPS32State
, guest_r2
);
115 ret
= offsetof(VexGuestMIPS32State
, guest_r3
);
119 ret
= offsetof(VexGuestMIPS32State
, guest_r4
);
123 ret
= offsetof(VexGuestMIPS32State
, guest_r5
);
127 ret
= offsetof(VexGuestMIPS32State
, guest_r6
);
131 ret
= offsetof(VexGuestMIPS32State
, guest_r7
);
135 ret
= offsetof(VexGuestMIPS32State
, guest_r8
);
139 ret
= offsetof(VexGuestMIPS32State
, guest_r9
);
143 ret
= offsetof(VexGuestMIPS32State
, guest_r10
);
147 ret
= offsetof(VexGuestMIPS32State
, guest_r11
);
151 ret
= offsetof(VexGuestMIPS32State
, guest_r12
);
155 ret
= offsetof(VexGuestMIPS32State
, guest_r13
);
159 ret
= offsetof(VexGuestMIPS32State
, guest_r14
);
163 ret
= offsetof(VexGuestMIPS32State
, guest_r15
);
167 ret
= offsetof(VexGuestMIPS32State
, guest_r16
);
171 ret
= offsetof(VexGuestMIPS32State
, guest_r17
);
175 ret
= offsetof(VexGuestMIPS32State
, guest_r18
);
179 ret
= offsetof(VexGuestMIPS32State
, guest_r19
);
183 ret
= offsetof(VexGuestMIPS32State
, guest_r20
);
187 ret
= offsetof(VexGuestMIPS32State
, guest_r21
);
191 ret
= offsetof(VexGuestMIPS32State
, guest_r22
);
195 ret
= offsetof(VexGuestMIPS32State
, guest_r23
);
199 ret
= offsetof(VexGuestMIPS32State
, guest_r24
);
203 ret
= offsetof(VexGuestMIPS32State
, guest_r25
);
207 ret
= offsetof(VexGuestMIPS32State
, guest_r26
);
211 ret
= offsetof(VexGuestMIPS32State
, guest_r27
);
215 ret
= offsetof(VexGuestMIPS32State
, guest_r28
);
219 ret
= offsetof(VexGuestMIPS32State
, guest_r29
);
223 ret
= offsetof(VexGuestMIPS32State
, guest_r30
);
227 ret
= offsetof(VexGuestMIPS32State
, guest_r31
);
237 ret
= offsetof(VexGuestMIPS64State
, guest_r0
);
241 ret
= offsetof(VexGuestMIPS64State
, guest_r1
);
245 ret
= offsetof(VexGuestMIPS64State
, guest_r2
);
249 ret
= offsetof(VexGuestMIPS64State
, guest_r3
);
253 ret
= offsetof(VexGuestMIPS64State
, guest_r4
);
257 ret
= offsetof(VexGuestMIPS64State
, guest_r5
);
261 ret
= offsetof(VexGuestMIPS64State
, guest_r6
);
265 ret
= offsetof(VexGuestMIPS64State
, guest_r7
);
269 ret
= offsetof(VexGuestMIPS64State
, guest_r8
);
273 ret
= offsetof(VexGuestMIPS64State
, guest_r9
);
277 ret
= offsetof(VexGuestMIPS64State
, guest_r10
);
281 ret
= offsetof(VexGuestMIPS64State
, guest_r11
);
285 ret
= offsetof(VexGuestMIPS64State
, guest_r12
);
289 ret
= offsetof(VexGuestMIPS64State
, guest_r13
);
293 ret
= offsetof(VexGuestMIPS64State
, guest_r14
);
297 ret
= offsetof(VexGuestMIPS64State
, guest_r15
);
301 ret
= offsetof(VexGuestMIPS64State
, guest_r16
);
305 ret
= offsetof(VexGuestMIPS64State
, guest_r17
);
309 ret
= offsetof(VexGuestMIPS64State
, guest_r18
);
313 ret
= offsetof(VexGuestMIPS64State
, guest_r19
);
317 ret
= offsetof(VexGuestMIPS64State
, guest_r20
);
321 ret
= offsetof(VexGuestMIPS64State
, guest_r21
);
325 ret
= offsetof(VexGuestMIPS64State
, guest_r22
);
329 ret
= offsetof(VexGuestMIPS64State
, guest_r23
);
333 ret
= offsetof(VexGuestMIPS64State
, guest_r24
);
337 ret
= offsetof(VexGuestMIPS64State
, guest_r25
);
341 ret
= offsetof(VexGuestMIPS64State
, guest_r26
);
345 ret
= offsetof(VexGuestMIPS64State
, guest_r27
);
349 ret
= offsetof(VexGuestMIPS64State
, guest_r28
);
353 ret
= offsetof(VexGuestMIPS64State
, guest_r29
);
357 ret
= offsetof(VexGuestMIPS64State
, guest_r30
);
361 ret
= offsetof(VexGuestMIPS64State
, guest_r31
);
372 #if defined(VGP_mips32_linux)
373 #define OFFB_PC offsetof(VexGuestMIPS32State, guest_PC)
375 #define OFFB_PC offsetof(VexGuestMIPS64State, guest_PC)
378 /* ---------------- Floating point registers ---------------- */
380 static UInt
floatGuestRegOffset(UInt fregNo
)
382 vassert(fregNo
< 32);
388 ret
= offsetof(VexGuestMIPS32State
, guest_f0
);
392 ret
= offsetof(VexGuestMIPS32State
, guest_f1
);
396 ret
= offsetof(VexGuestMIPS32State
, guest_f2
);
400 ret
= offsetof(VexGuestMIPS32State
, guest_f3
);
404 ret
= offsetof(VexGuestMIPS32State
, guest_f4
);
408 ret
= offsetof(VexGuestMIPS32State
, guest_f5
);
412 ret
= offsetof(VexGuestMIPS32State
, guest_f6
);
416 ret
= offsetof(VexGuestMIPS32State
, guest_f7
);
420 ret
= offsetof(VexGuestMIPS32State
, guest_f8
);
424 ret
= offsetof(VexGuestMIPS32State
, guest_f9
);
428 ret
= offsetof(VexGuestMIPS32State
, guest_f10
);
432 ret
= offsetof(VexGuestMIPS32State
, guest_f11
);
436 ret
= offsetof(VexGuestMIPS32State
, guest_f12
);
440 ret
= offsetof(VexGuestMIPS32State
, guest_f13
);
444 ret
= offsetof(VexGuestMIPS32State
, guest_f14
);
448 ret
= offsetof(VexGuestMIPS32State
, guest_f15
);
452 ret
= offsetof(VexGuestMIPS32State
, guest_f16
);
456 ret
= offsetof(VexGuestMIPS32State
, guest_f17
);
460 ret
= offsetof(VexGuestMIPS32State
, guest_f18
);
464 ret
= offsetof(VexGuestMIPS32State
, guest_f19
);
468 ret
= offsetof(VexGuestMIPS32State
, guest_f20
);
472 ret
= offsetof(VexGuestMIPS32State
, guest_f21
);
476 ret
= offsetof(VexGuestMIPS32State
, guest_f22
);
480 ret
= offsetof(VexGuestMIPS32State
, guest_f23
);
484 ret
= offsetof(VexGuestMIPS32State
, guest_f24
);
488 ret
= offsetof(VexGuestMIPS32State
, guest_f25
);
492 ret
= offsetof(VexGuestMIPS32State
, guest_f26
);
496 ret
= offsetof(VexGuestMIPS32State
, guest_f27
);
500 ret
= offsetof(VexGuestMIPS32State
, guest_f28
);
504 ret
= offsetof(VexGuestMIPS32State
, guest_f29
);
508 ret
= offsetof(VexGuestMIPS32State
, guest_f30
);
512 ret
= offsetof(VexGuestMIPS32State
, guest_f31
);
522 ret
= offsetof(VexGuestMIPS64State
, guest_f0
);
526 ret
= offsetof(VexGuestMIPS64State
, guest_f1
);
530 ret
= offsetof(VexGuestMIPS64State
, guest_f2
);
534 ret
= offsetof(VexGuestMIPS64State
, guest_f3
);
538 ret
= offsetof(VexGuestMIPS64State
, guest_f4
);
542 ret
= offsetof(VexGuestMIPS64State
, guest_f5
);
546 ret
= offsetof(VexGuestMIPS64State
, guest_f6
);
550 ret
= offsetof(VexGuestMIPS64State
, guest_f7
);
554 ret
= offsetof(VexGuestMIPS64State
, guest_f8
);
558 ret
= offsetof(VexGuestMIPS64State
, guest_f9
);
562 ret
= offsetof(VexGuestMIPS64State
, guest_f10
);
566 ret
= offsetof(VexGuestMIPS64State
, guest_f11
);
570 ret
= offsetof(VexGuestMIPS64State
, guest_f12
);
574 ret
= offsetof(VexGuestMIPS64State
, guest_f13
);
578 ret
= offsetof(VexGuestMIPS64State
, guest_f14
);
582 ret
= offsetof(VexGuestMIPS64State
, guest_f15
);
586 ret
= offsetof(VexGuestMIPS64State
, guest_f16
);
590 ret
= offsetof(VexGuestMIPS64State
, guest_f17
);
594 ret
= offsetof(VexGuestMIPS64State
, guest_f18
);
598 ret
= offsetof(VexGuestMIPS64State
, guest_f19
);
602 ret
= offsetof(VexGuestMIPS64State
, guest_f20
);
606 ret
= offsetof(VexGuestMIPS64State
, guest_f21
);
610 ret
= offsetof(VexGuestMIPS64State
, guest_f22
);
614 ret
= offsetof(VexGuestMIPS64State
, guest_f23
);
618 ret
= offsetof(VexGuestMIPS64State
, guest_f24
);
622 ret
= offsetof(VexGuestMIPS64State
, guest_f25
);
626 ret
= offsetof(VexGuestMIPS64State
, guest_f26
);
630 ret
= offsetof(VexGuestMIPS64State
, guest_f27
);
634 ret
= offsetof(VexGuestMIPS64State
, guest_f28
);
638 ret
= offsetof(VexGuestMIPS64State
, guest_f29
);
642 ret
= offsetof(VexGuestMIPS64State
, guest_f30
);
646 ret
= offsetof(VexGuestMIPS64State
, guest_f31
);
657 /* ---------------- MIPS32 DSP ASE(r2) accumulators ---------------- */
659 UInt
accumulatorGuestRegOffset(UInt acNo
)
667 ret
= offsetof(VexGuestMIPS32State
, guest_ac0
);
671 ret
= offsetof(VexGuestMIPS32State
, guest_ac1
);
675 ret
= offsetof(VexGuestMIPS32State
, guest_ac2
);
679 ret
= offsetof(VexGuestMIPS32State
, guest_ac3
);
690 /* ---------------- MIPS32 MSA registers ---------------- */
692 static UInt
msaGuestRegOffset(UInt msaRegNo
)
694 vassert(msaRegNo
<= 31);
700 ret
= offsetof(VexGuestMIPS64State
, guest_w0
);
704 ret
= offsetof(VexGuestMIPS64State
, guest_w1
);
708 ret
= offsetof(VexGuestMIPS64State
, guest_w2
);
712 ret
= offsetof(VexGuestMIPS64State
, guest_w3
);
716 ret
= offsetof(VexGuestMIPS64State
, guest_w4
);
720 ret
= offsetof(VexGuestMIPS64State
, guest_w5
);
724 ret
= offsetof(VexGuestMIPS64State
, guest_w6
);
728 ret
= offsetof(VexGuestMIPS64State
, guest_w7
);
732 ret
= offsetof(VexGuestMIPS64State
, guest_w8
);
736 ret
= offsetof(VexGuestMIPS64State
, guest_w9
);
740 ret
= offsetof(VexGuestMIPS64State
, guest_w10
);
744 ret
= offsetof(VexGuestMIPS64State
, guest_w11
);
748 ret
= offsetof(VexGuestMIPS64State
, guest_w12
);
752 ret
= offsetof(VexGuestMIPS64State
, guest_w13
);
756 ret
= offsetof(VexGuestMIPS64State
, guest_w14
);
760 ret
= offsetof(VexGuestMIPS64State
, guest_w15
);
764 ret
= offsetof(VexGuestMIPS64State
, guest_w16
);
768 ret
= offsetof(VexGuestMIPS64State
, guest_w17
);
772 ret
= offsetof(VexGuestMIPS64State
, guest_w18
);
776 ret
= offsetof(VexGuestMIPS64State
, guest_w19
);
780 ret
= offsetof(VexGuestMIPS64State
, guest_w20
);
784 ret
= offsetof(VexGuestMIPS64State
, guest_w21
);
788 ret
= offsetof(VexGuestMIPS64State
, guest_w22
);
792 ret
= offsetof(VexGuestMIPS64State
, guest_w23
);
796 ret
= offsetof(VexGuestMIPS64State
, guest_w24
);
800 ret
= offsetof(VexGuestMIPS64State
, guest_w25
);
804 ret
= offsetof(VexGuestMIPS64State
, guest_w26
);
808 ret
= offsetof(VexGuestMIPS64State
, guest_w27
);
812 ret
= offsetof(VexGuestMIPS64State
, guest_w28
);
816 ret
= offsetof(VexGuestMIPS64State
, guest_w29
);
820 ret
= offsetof(VexGuestMIPS64State
, guest_w30
);
824 ret
= offsetof(VexGuestMIPS64State
, guest_w31
);
834 ret
= offsetof(VexGuestMIPS32State
, guest_w0
);
838 ret
= offsetof(VexGuestMIPS32State
, guest_w1
);
842 ret
= offsetof(VexGuestMIPS32State
, guest_w2
);
846 ret
= offsetof(VexGuestMIPS32State
, guest_w3
);
850 ret
= offsetof(VexGuestMIPS32State
, guest_w4
);
854 ret
= offsetof(VexGuestMIPS32State
, guest_w5
);
858 ret
= offsetof(VexGuestMIPS32State
, guest_w6
);
862 ret
= offsetof(VexGuestMIPS32State
, guest_w7
);
866 ret
= offsetof(VexGuestMIPS32State
, guest_w8
);
870 ret
= offsetof(VexGuestMIPS32State
, guest_w9
);
874 ret
= offsetof(VexGuestMIPS32State
, guest_w10
);
878 ret
= offsetof(VexGuestMIPS32State
, guest_w11
);
882 ret
= offsetof(VexGuestMIPS32State
, guest_w12
);
886 ret
= offsetof(VexGuestMIPS32State
, guest_w13
);
890 ret
= offsetof(VexGuestMIPS32State
, guest_w14
);
894 ret
= offsetof(VexGuestMIPS32State
, guest_w15
);
898 ret
= offsetof(VexGuestMIPS32State
, guest_w16
);
902 ret
= offsetof(VexGuestMIPS32State
, guest_w17
);
906 ret
= offsetof(VexGuestMIPS32State
, guest_w18
);
910 ret
= offsetof(VexGuestMIPS32State
, guest_w19
);
914 ret
= offsetof(VexGuestMIPS32State
, guest_w20
);
918 ret
= offsetof(VexGuestMIPS32State
, guest_w21
);
922 ret
= offsetof(VexGuestMIPS32State
, guest_w22
);
926 ret
= offsetof(VexGuestMIPS32State
, guest_w23
);
930 ret
= offsetof(VexGuestMIPS32State
, guest_w24
);
934 ret
= offsetof(VexGuestMIPS32State
, guest_w25
);
938 ret
= offsetof(VexGuestMIPS32State
, guest_w26
);
942 ret
= offsetof(VexGuestMIPS32State
, guest_w27
);
946 ret
= offsetof(VexGuestMIPS32State
, guest_w28
);
950 ret
= offsetof(VexGuestMIPS32State
, guest_w29
);
954 ret
= offsetof(VexGuestMIPS32State
, guest_w30
);
958 ret
= offsetof(VexGuestMIPS32State
, guest_w31
);
971 /* Do a endian load of a 32-bit word, regardless of the endianness of the
973 static inline UInt
getUInt(const UChar
* p
)
976 #if defined (_MIPSEL)
981 #elif defined (_MIPSEB)
990 #define BITS2(_b1,_b0) \
991 (((_b1) << 1) | (_b0))
993 #define BITS3(_b2,_b1,_b0) \
994 (((_b2) << 2) | ((_b1) << 1) | (_b0))
996 #define BITS4(_b3,_b2,_b1,_b0) \
997 (((_b3) << 3) | ((_b2) << 2) | ((_b1) << 1) | (_b0))
999 #define BITS5(_b4,_b3,_b2,_b1,_b0) \
1000 (((_b4) << 4) | BITS4((_b3),(_b2),(_b1),(_b0)))
1002 #define BITS6(_b5,_b4,_b3,_b2,_b1,_b0) \
1003 ((BITS2((_b5),(_b4)) << 4) \
1004 | BITS4((_b3),(_b2),(_b1),(_b0)))
1006 #define BITS8(_b7,_b6,_b5,_b4,_b3,_b2,_b1,_b0) \
1007 ((BITS4((_b7),(_b6),(_b5),(_b4)) << 4) \
1008 | BITS4((_b3),(_b2),(_b1),(_b0)))
1010 #define LOAD_STORE_PATTERN \
1011 t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
1013 assign(t1, binop(Iop_Add32, getIReg(rs), \
1014 mkU32(extend_s_16to32(imm)))); \
1016 assign(t1, binop(Iop_Add64, getIReg(rs), \
1017 mkU64(extend_s_16to64(imm)))); \
1019 #define LOAD_STORE_PATTERN_MSA(imm) \
1020 t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
1022 assign(t1, binop(Iop_Add32, getIReg(ws), \
1023 mkU32(extend_s_10to32(imm)))); \
1025 assign(t1, binop(Iop_Add64, getIReg(ws), \
1026 mkU64(extend_s_10to64(imm)))); \
1028 #define LOADX_STORE_PATTERN \
1029 t1 = newTemp(mode64 ? Ity_I64 : Ity_I32); \
1031 assign(t1, binop(Iop_Add32, getIReg(regRs), getIReg(regRt))); \
1033 assign(t1, binop(Iop_Add64, getIReg(regRs), getIReg(regRt)));
1035 #define LWX_SWX_PATTERN64 \
1036 t2 = newTemp(Ity_I64); \
1037 assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFFCULL))); \
1038 t4 = newTemp(Ity_I32); \
1039 assign(t4, mkNarrowTo32( ty, binop(Iop_And64, \
1040 mkexpr(t1), mkU64(0x3))));
1042 #define LWX_SWX_PATTERN64_1 \
1043 t2 = newTemp(Ity_I64); \
1044 assign(t2, binop(Iop_And64, mkexpr(t1), mkU64(0xFFFFFFFFFFFFFFF8ULL))); \
1045 t4 = newTemp(Ity_I64); \
1046 assign(t4, binop(Iop_And64, mkexpr(t1), mkU64(0x7)));
1048 #define LWX_SWX_PATTERN \
1049 t2 = newTemp(Ity_I32); \
1050 assign(t2, binop(Iop_And32, mkexpr(t1), mkU32(0xFFFFFFFC))); \
1051 t4 = newTemp(Ity_I32); \
1052 assign(t4, binop(Iop_And32, mkexpr(t1), mkU32(0x00000003)))
1054 #define SXXV_PATTERN(op) \
1055 putIReg(rd, binop(op, \
1066 #define SXXV_PATTERN64(op) \
1067 putIReg(rd, mkWidenFrom32(ty, binop(op, \
1068 mkNarrowTo32(ty, getIReg(rt)), \
1071 mkNarrowTo32(ty, getIReg(rs)), \
1078 #define SXX_PATTERN(op) \
1079 putIReg(rd, binop(op, getIReg(rt), mkU8(sa)));
1081 #define ALU_PATTERN(op) \
1082 putIReg(rd, binop(op, getIReg(rs), getIReg(rt)));
1084 #define ALUI_PATTERN(op) \
1085 putIReg(rt, binop(op, getIReg(rs), mkU32(imm)));
1087 #define ALUI_PATTERN64(op) \
1088 putIReg(rt, binop(op, getIReg(rs), mkU64(imm)));
1090 #define ALU_PATTERN64(op) \
1091 putIReg(rd, mkWidenFrom32(ty, binop(op, \
1092 mkNarrowTo32(ty, getIReg(rs)), \
1093 mkNarrowTo32(ty, getIReg(rt))), True));
1095 #define FP_CONDITIONAL_CODE \
1096 t3 = newTemp(Ity_I32); \
1097 assign(t3, binop(Iop_And32, \
1098 IRExpr_ITE( binop(Iop_CmpEQ32, mkU32(cc), mkU32(0)), \
1099 binop(Iop_Shr32, getFCSR(), mkU8(23)), \
1100 binop(Iop_Shr32, getFCSR(), mkU8(24+cc))), \
1105 #define ILLEGAL_INSTRUCTON \
1106 putPC(mkU32(guest_PC_curr_instr + 4)); \
1107 dres->jk_StopHere = Ijk_SigILL; \
1108 dres->whatNext = Dis_StopHere;
1110 #define LLADDR_INVALID \
1111 (mode64 ? mkU64(0xFFFFFFFFFFFFFFFFULL) : mkU32(0xFFFFFFFF))
1113 /*------------------------------------------------------------*/
1114 /*--- Field helpers ---*/
1115 /*------------------------------------------------------------*/
1117 static Bool
branch_or_jump(const UChar
* addr
)
1120 UInt cins
= getUInt(addr
);
1122 UInt opcode
= get_opcode(cins
);
1123 UInt rt
= get_rt(cins
);
1124 UInt function
= get_function(cins
);
1126 /* bgtz, blez, bne, beq, jal */
1127 if (opcode
== 0x07 || opcode
== 0x06 || opcode
== 0x05 || opcode
== 0x04
1128 || opcode
== 0x03 || opcode
== 0x02) {
1133 if (opcode
== 0x01 && rt
== 0x01) {
1138 if (opcode
== 0x01 && rt
== 0x11) {
1143 if (opcode
== 0x01 && rt
== 0x10) {
1148 if (opcode
== 0x01 && rt
== 0x00) {
1153 if (opcode
== 0x00 && function
== 0x09) {
1158 if (opcode
== 0x00 && function
== 0x08) {
1162 if (opcode
== 0x11) {
1164 fmt
= get_fmt(cins
);
1199 if (opcode
== 0x01 && rt
== 0x1c) {
1203 /* Cavium Specific instructions. */
1204 if (opcode
== 0x32 || opcode
== 0x3A || opcode
== 0x36 || opcode
== 0x3E) {
1205 /* BBIT0, BBIT1, BBIT032, BBIT132 */
1212 static Bool
is_Branch_or_Jump_and_Link(const UChar
* addr
)
1214 UInt cins
= getUInt(addr
);
1216 UInt opcode
= get_opcode(cins
);
1217 UInt rt
= get_rt(cins
);
1218 UInt function
= get_function(cins
);
1221 if (opcode
== 0x02) {
1225 /* bgezal or bal(r6) */
1226 if (opcode
== 0x01 && rt
== 0x11) {
1231 if (opcode
== 0x01 && rt
== 0x10) {
1236 if (opcode
== 0x00 && function
== 0x09) {
1243 static Bool
branch_or_link_likely(const UChar
* addr
)
1245 UInt cins
= getUInt(addr
);
1246 UInt opcode
= get_opcode(cins
);
1247 UInt rt
= get_rt(cins
);
1249 /* bgtzl, blezl, bnel, beql */
1250 if (opcode
== 0x17 || opcode
== 0x16 || opcode
== 0x15 || opcode
== 0x14)
1254 if (opcode
== 0x01 && rt
== 0x03)
1258 if (opcode
== 0x01 && rt
== 0x13)
1262 if (opcode
== 0x01 && rt
== 0x12)
1266 if (opcode
== 0x01 && rt
== 0x02)
1272 /*------------------------------------------------------------*/
1273 /*--- Helper bits and pieces for creating IR fragments. ---*/
1274 /*------------------------------------------------------------*/
1276 /* Generate an expression for SRC rotated right by ROT. */
1277 static IRExpr
*genROR32(IRExpr
* src
, Int rot
)
1279 vassert(rot
>= 0 && rot
< 32);
1284 return binop(Iop_Or32
, binop(Iop_Shl32
, src
, mkU8(32 - rot
)),
1285 binop(Iop_Shr32
, src
, mkU8(rot
)));
1288 static IRExpr
*genRORV32(IRExpr
* src
, IRExpr
* rs
)
1290 IRTemp t0
= newTemp(Ity_I8
);
1291 IRTemp t1
= newTemp(Ity_I8
);
1293 assign(t0
, unop(Iop_32to8
, binop(Iop_And32
, rs
, mkU32(0x0000001F))));
1294 assign(t1
, binop(Iop_Sub8
, mkU8(32), mkexpr(t0
)));
1295 return binop(Iop_Or32
, binop(Iop_Shl32
, src
, mkexpr(t1
)),
1296 binop(Iop_Shr32
, src
, mkexpr(t0
)));
1299 static void jmp_lit32 ( /*MOD*/ DisResult
* dres
, IRJumpKind kind
, Addr32 d32
)
1301 vassert(dres
->whatNext
== Dis_Continue
);
1302 vassert(dres
->len
== 0);
1303 vassert(dres
->jk_StopHere
== Ijk_INVALID
);
1304 dres
->whatNext
= Dis_StopHere
;
1305 dres
->jk_StopHere
= kind
;
1306 stmt( IRStmt_Put( OFFB_PC
, mkU32(d32
) ) );
1309 static void jmp_lit64 ( /*MOD*/ DisResult
* dres
, IRJumpKind kind
, Addr64 d64
)
1311 vassert(dres
->whatNext
== Dis_Continue
);
1312 vassert(dres
->len
== 0);
1313 vassert(dres
->jk_StopHere
== Ijk_INVALID
);
1314 dres
->whatNext
= Dis_StopHere
;
1315 dres
->jk_StopHere
= kind
;
1316 stmt(IRStmt_Put(OFFB_PC
, mkU64(d64
)));
1319 /* Get value from accumulator (helper function for MIPS32 DSP ASE instructions).
1320 This function should be called before any other operation if widening
1321 multiplications are used. */
1322 IRExpr
*getAcc(UInt acNo
)
1326 return IRExpr_Get(accumulatorGuestRegOffset(acNo
), Ity_I64
);
1329 /* Get value from DSPControl register (helper function for MIPS32 DSP ASE
1331 IRExpr
*getDSPControl(void)
1334 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_DSPControl
), Ity_I32
);
1337 /* Fetch a byte from the guest insn stream. */
1338 static UChar
getIByte(Int delta
)
1340 return guest_code
[delta
];
1343 IRExpr
*getIReg(UInt iregNo
)
1346 return mode64
? mkU64(0x0) : mkU32(0x0);
1348 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
1349 vassert(iregNo
< 32);
1350 return IRExpr_Get(integerGuestRegOffset(iregNo
), ty
);
1354 static IRExpr
*getWReg(UInt wregNo
)
1356 vassert(wregNo
<= 31);
1357 return IRExpr_Get(msaGuestRegOffset(wregNo
), Ity_V128
);
1360 static IRExpr
*getHI(void)
1363 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_HI
), Ity_I64
);
1365 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_HI
), Ity_I32
);
1368 static IRExpr
*getLO(void)
1371 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_LO
), Ity_I64
);
1373 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_LO
), Ity_I32
);
1376 static IRExpr
*getFCSR(void)
1379 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_FCSR
), Ity_I32
);
1381 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_FCSR
), Ity_I32
);
1384 static IRExpr
*getLLaddr(void)
1387 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_LLaddr
), Ity_I64
);
1389 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_LLaddr
), Ity_I32
);
1392 static IRExpr
*getLLdata(void)
1395 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_LLdata
), Ity_I64
);
1397 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_LLdata
), Ity_I32
);
1400 static IRExpr
*getMSACSR(void)
1403 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_MSACSR
), Ity_I32
);
1405 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_MSACSR
), Ity_I32
);
1408 /* Get byte from register reg, byte pos from 0 to 3 (or 7 for MIPS64) . */
1409 static IRExpr
*getByteFromReg(UInt reg
, UInt byte_pos
)
1411 UInt pos
= byte_pos
* 8;
1414 return unop(Iop_64to8
, binop(Iop_And64
,
1415 binop(Iop_Shr64
, getIReg(reg
), mkU8(pos
)),
1418 return unop(Iop_32to8
, binop(Iop_And32
,
1419 binop(Iop_Shr32
, getIReg(reg
), mkU8(pos
)),
1423 static void putFCSR(IRExpr
* e
)
1426 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_FCSR
), e
));
1428 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_FCSR
), e
));
1431 static void putLLaddr(IRExpr
* e
)
1434 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_LLaddr
), e
));
1436 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_LLaddr
), e
));
1439 static void putLLdata(IRExpr
* e
)
1442 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_LLdata
), e
));
1444 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_LLdata
), e
));
1447 static void putMSACSR(IRExpr
* e
)
1450 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_MSACSR
), e
));
1452 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_MSACSR
), e
));
1455 /* fs - fpu source register number.
1456 inst - fpu instruction that needs to be executed.
1457 sz32 - size of source register.
1458 opN - number of operads:
1459 1 - unary operation.
1460 2 - binary operation. */
1461 static void calculateFCSR(UInt fs
, UInt ft
, UInt inst
, Bool sz32
, UInt opN
)
1464 IRTemp fcsr
= newTemp(Ity_I32
);
1466 /* IRExpr_GSPTR() => Need to pass pointer to guest state to helper. */
1468 d
= unsafeIRDirty_1_N(fcsr
, 0,
1469 "mips_dirtyhelper_calculate_FCSR_fp64",
1470 &mips_dirtyhelper_calculate_FCSR_fp64
,
1471 mkIRExprVec_4(IRExpr_GSPTR(),
1476 d
= unsafeIRDirty_1_N(fcsr
, 0,
1477 "mips_dirtyhelper_calculate_FCSR_fp32",
1478 &mips_dirtyhelper_calculate_FCSR_fp32
,
1479 mkIRExprVec_4(IRExpr_GSPTR(),
1484 if (opN
== 1) { /* Unary operation. */
1485 /* Declare we're reading guest state. */
1486 if (sz32
|| fp_mode64
)
1491 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1493 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1496 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_FCSR
);
1498 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_FCSR
);
1500 d
->fxState
[0].size
= sizeof(UInt
);
1501 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1502 d
->fxState
[1].offset
= floatGuestRegOffset(fs
);
1503 d
->fxState
[1].size
= sizeof(ULong
);
1505 if (!(sz32
|| fp_mode64
)) {
1506 d
->fxState
[2].fx
= Ifx_Read
; /* read */
1507 d
->fxState
[2].offset
= floatGuestRegOffset(fs
+ 1);
1508 d
->fxState
[2].size
= sizeof(ULong
);
1510 } else if (opN
== 2) { /* Binary operation. */
1511 /* Declare we're reading guest state. */
1512 if (sz32
|| fp_mode64
)
1517 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1519 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1522 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_FCSR
);
1524 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_FCSR
);
1526 d
->fxState
[0].size
= sizeof(UInt
);
1527 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1528 d
->fxState
[1].offset
= floatGuestRegOffset(fs
);
1529 d
->fxState
[1].size
= sizeof(ULong
);
1530 d
->fxState
[2].fx
= Ifx_Read
; /* read */
1531 d
->fxState
[2].offset
= floatGuestRegOffset(ft
);
1532 d
->fxState
[2].size
= sizeof(ULong
);
1534 if (!(sz32
|| fp_mode64
)) {
1535 d
->fxState
[3].fx
= Ifx_Read
; /* read */
1536 d
->fxState
[3].offset
= floatGuestRegOffset(fs
+ 1);
1537 d
->fxState
[3].size
= sizeof(ULong
);
1538 d
->fxState
[4].fx
= Ifx_Read
; /* read */
1539 d
->fxState
[4].offset
= floatGuestRegOffset(ft
+ 1);
1540 d
->fxState
[4].size
= sizeof(ULong
);
1544 stmt(IRStmt_Dirty(d
));
1546 putFCSR(mkexpr(fcsr
));
1549 /* ws, wt - source MSA register numbers.
1550 inst - MSA fp instruction that needs to be executed.
1551 opN - number of operads:
1552 1 - unary operation.
1553 2 - binary operation. */
1554 static void calculateMSACSR(UInt ws
, UInt wt
, UInt inst
, UInt opN
)
1557 IRTemp msacsr
= newTemp(Ity_I32
);
1558 /* IRExpr_BBPTR() => Need to pass pointer to guest state to helper. */
1559 d
= unsafeIRDirty_1_N(msacsr
, 0,
1560 "mips_dirtyhelper_calculate_MSACSR",
1561 &mips_dirtyhelper_calculate_MSACSR
,
1562 mkIRExprVec_4(IRExpr_GSPTR(),
1567 if (opN
== 1) { /* Unary operation. */
1568 /* Declare we're reading guest state. */
1570 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1571 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1574 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_MSACSR
);
1576 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_MSACSR
);
1578 d
->fxState
[0].size
= sizeof(UInt
);
1579 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1580 d
->fxState
[1].offset
= msaGuestRegOffset(ws
);
1581 d
->fxState
[1].size
= sizeof(ULong
);
1582 } else if (opN
== 2) { /* Binary operation. */
1583 /* Declare we're reading guest state. */
1585 vex_bzero(&d
->fxState
, sizeof(d
->fxState
));
1586 d
->fxState
[0].fx
= Ifx_Read
; /* read */
1589 d
->fxState
[0].offset
= offsetof(VexGuestMIPS64State
, guest_MSACSR
);
1591 d
->fxState
[0].offset
= offsetof(VexGuestMIPS32State
, guest_MSACSR
);
1593 d
->fxState
[0].size
= sizeof(UInt
);
1594 d
->fxState
[1].fx
= Ifx_Read
; /* read */
1595 d
->fxState
[1].offset
= msaGuestRegOffset(ws
);
1596 d
->fxState
[1].size
= sizeof(ULong
);
1597 d
->fxState
[2].fx
= Ifx_Read
; /* read */
1598 d
->fxState
[2].offset
= msaGuestRegOffset(wt
);
1599 d
->fxState
[2].size
= sizeof(ULong
);
1602 stmt(IRStmt_Dirty(d
));
1603 putMSACSR(mkexpr(msacsr
));
1606 static IRExpr
*getULR(void)
1609 return IRExpr_Get(offsetof(VexGuestMIPS64State
, guest_ULR
), Ity_I64
);
1611 return IRExpr_Get(offsetof(VexGuestMIPS32State
, guest_ULR
), Ity_I32
);
1614 void putIReg(UInt archreg
, IRExpr
* e
)
1616 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
1617 vassert(archreg
< 32);
1618 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == ty
);
1621 stmt(IRStmt_Put(integerGuestRegOffset(archreg
), e
));
1624 static void putWReg(UInt wregNo
, IRExpr
* e
)
1626 vassert(wregNo
<= 31);
1627 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == Ity_V128
);
1628 stmt(IRStmt_Put(msaGuestRegOffset(wregNo
), e
));
1629 stmt(IRStmt_Put(floatGuestRegOffset(wregNo
),
1630 unop(Iop_ReinterpI64asF64
, unop(Iop_V128to64
, e
))));
1633 IRExpr
*mkNarrowTo32(IRType ty
, IRExpr
* src
)
1635 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1636 return ty
== Ity_I64
? unop(Iop_64to32
, src
) : src
;
1639 void putLO(IRExpr
* e
)
1642 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_LO
), e
));
1644 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_LO
), e
));
1645 /* Add value to lower 32 bits of ac0 to maintain compatibility between
1646 regular MIPS32 instruction set and MIPS DSP ASE. Keep higher 32bits
1648 IRTemp t_lo
= newTemp(Ity_I32
);
1649 IRTemp t_hi
= newTemp(Ity_I32
);
1651 assign(t_hi
, unop(Iop_64HIto32
, getAcc(0)));
1652 stmt(IRStmt_Put(accumulatorGuestRegOffset(0),
1653 binop(Iop_32HLto64
, mkexpr(t_hi
), mkexpr(t_lo
))));
1657 void putHI(IRExpr
* e
)
1660 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_HI
), e
));
1662 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_HI
), e
));
1663 /* Add value to higher 32 bits of ac0 to maintain compatibility between
1664 regular MIPS32 instruction set and MIPS DSP ASE. Keep lower 32bits
1666 IRTemp t_lo
= newTemp(Ity_I32
);
1667 IRTemp t_hi
= newTemp(Ity_I32
);
1669 assign(t_lo
, unop(Iop_64to32
, getAcc(0)));
1670 stmt(IRStmt_Put(accumulatorGuestRegOffset(0),
1671 binop(Iop_32HLto64
, mkexpr(t_hi
), mkexpr(t_lo
))));
1675 static IRExpr
*mkNarrowTo8 ( IRType ty
, IRExpr
* src
)
1677 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1678 return ty
== Ity_I64
? unop(Iop_64to8
, src
) : unop(Iop_32to8
, src
);
1681 static IRExpr
*mkNarrowTo16 ( IRType ty
, IRExpr
* src
)
1683 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1684 return ty
== Ity_I64
? unop(Iop_64to16
, src
) : unop(Iop_32to16
, src
);
1687 static void putPC(IRExpr
* e
)
1689 stmt(IRStmt_Put(OFFB_PC
, e
));
1692 static IRExpr
*mkWidenFrom32(IRType ty
, IRExpr
* src
, Bool sined
)
1694 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
1699 return (sined
) ? unop(Iop_32Sto64
, src
) : unop(Iop_32Uto64
, src
);
1702 /* Narrow 8/16/32 bit int expr to 8/16/32. Clearly only some
1703 of these combinations make sense. */
1704 static IRExpr
*narrowTo(IRType dst_ty
, IRExpr
* e
)
1706 IRType src_ty
= typeOfIRExpr(irsb
->tyenv
, e
);
1708 if (src_ty
== dst_ty
)
1711 if (src_ty
== Ity_I32
&& dst_ty
== Ity_I16
)
1712 return unop(Iop_32to16
, e
);
1714 if (src_ty
== Ity_I32
&& dst_ty
== Ity_I8
)
1715 return unop(Iop_32to8
, e
);
1717 if (src_ty
== Ity_I64
&& dst_ty
== Ity_I8
) {
1719 return unop(Iop_64to8
, e
);
1722 if (src_ty
== Ity_I64
&& dst_ty
== Ity_I16
) {
1724 return unop(Iop_64to16
, e
);
1727 vpanic("narrowTo(mips)");
1731 static IRExpr
*getLoFromF64(IRType ty
, IRExpr
* src
)
1733 vassert(ty
== Ity_F32
|| ty
== Ity_F64
);
1735 if (ty
== Ity_F64
) {
1737 t0
= newTemp(Ity_I64
);
1738 t1
= newTemp(Ity_I32
);
1739 assign(t0
, unop(Iop_ReinterpF64asI64
, src
));
1740 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
1741 return unop(Iop_ReinterpI32asF32
, mkexpr(t1
));
1746 static inline IRExpr
*getHiFromF64(IRExpr
* src
)
1748 vassert(typeOfIRExpr(irsb
->tyenv
, src
) == Ity_F64
);
1749 return unop(Iop_ReinterpI32asF32
, unop(Iop_64HIto32
,
1750 unop(Iop_ReinterpF64asI64
, src
)));
1753 static IRExpr
*mkWidenFromF32(IRType ty
, IRExpr
* src
)
1755 vassert(ty
== Ity_F32
|| ty
== Ity_F64
);
1757 if (ty
== Ity_F64
) {
1758 IRTemp t0
= newTemp(Ity_I32
);
1759 IRTemp t1
= newTemp(Ity_I64
);
1760 assign(t0
, unop(Iop_ReinterpF32asI32
, src
));
1761 assign(t1
, binop(Iop_32HLto64
, mkU32(0x0), mkexpr(t0
)));
1762 return unop(Iop_ReinterpI64asF64
, mkexpr(t1
));
1767 /* Convenience function to move to next instruction on condition. */
1768 static void mips_next_insn_if(IRExpr
*condition
)
1770 vassert(typeOfIRExpr(irsb
->tyenv
, condition
) == Ity_I1
);
1772 stmt(IRStmt_Exit(condition
, Ijk_Boring
,
1773 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
1774 IRConst_U32(guest_PC_curr_instr
+ 4),
1778 static IRExpr
*dis_branch_likely(IRExpr
* guard
, UInt imm
)
1780 ULong branch_offset
;
1783 /* PC = PC + (SignExtend(signed_immed_24) << 2)
1784 An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
1785 is added to the address of the instruction following
1786 the branch (not the branch itself), in the branch delay slot, to form
1787 a PC-relative effective target address. */
1789 branch_offset
= extend_s_18to64(imm
<< 2);
1791 branch_offset
= extend_s_18to32(imm
<< 2);
1793 t0
= newTemp(Ity_I1
);
1797 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
1798 IRConst_U64(guest_PC_curr_instr
+ 8), OFFB_PC
));
1800 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
1801 IRConst_U32(guest_PC_curr_instr
+ 8), OFFB_PC
));
1803 irsb
->jumpkind
= Ijk_Boring
;
1806 return mkU64(guest_PC_curr_instr
+ 4 + branch_offset
);
1808 return mkU32(guest_PC_curr_instr
+ 4 + branch_offset
);
1811 static void dis_branch(Bool link
, IRExpr
* guard
, UInt imm
, IRStmt
** set
)
1813 ULong branch_offset
;
1816 if (link
) { /* LR (GPR31) = addr of the 2nd instr after branch instr */
1818 putIReg(31, mkU64(guest_PC_curr_instr
+ 8));
1820 putIReg(31, mkU32(guest_PC_curr_instr
+ 8));
1823 /* PC = PC + (SignExtend(signed_immed_24) << 2)
1824 An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
1825 is added to the address of the instruction following
1826 the branch (not the branch itself), in the branch delay slot, to form
1827 a PC-relative effective target address. */
1830 branch_offset
= extend_s_18to64(imm
<< 2);
1832 branch_offset
= extend_s_18to32(imm
<< 2);
1834 t0
= newTemp(Ity_I1
);
1838 *set
= IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1839 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
1842 *set
= IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1843 IRConst_U32(guest_PC_curr_instr
+ 4 +
1844 (UInt
) branch_offset
), OFFB_PC
);
1847 static void dis_branch_compact(Bool link
, IRExpr
* guard
, UInt imm
,
1850 ULong branch_offset
;
1853 if (link
) { /* LR (GPR31) = addr of the instr after branch instr */
1855 putIReg(31, mkU64(guest_PC_curr_instr
+ 4));
1857 putIReg(31, mkU32(guest_PC_curr_instr
+ 4));
1859 dres
->jk_StopHere
= Ijk_Call
;
1861 dres
->jk_StopHere
= Ijk_Boring
;
1864 dres
->whatNext
= Dis_StopHere
;
1866 /* PC = PC + (SignExtend(signed_immed_24) << 2)
1867 An 18-bit signed offset (the 16-bit offset field shifted left 2 bits)
1868 is added to the address of the instruction following
1869 the branch (not the branch itself), in the branch delay slot, to form
1870 a PC-relative effective target address. */
1873 branch_offset
= extend_s_18to64(imm
<< 2);
1875 branch_offset
= extend_s_18to32(imm
<< 2);
1877 t0
= newTemp(Ity_I1
);
1881 stmt(IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1882 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
1884 putPC(mkU64(guest_PC_curr_instr
+ 4));
1886 stmt(IRStmt_Exit(mkexpr(t0
), link
? Ijk_Call
: Ijk_Boring
,
1887 IRConst_U32(guest_PC_curr_instr
+ 4 +
1888 (UInt
) branch_offset
), OFFB_PC
));
1889 putPC(mkU32(guest_PC_curr_instr
+ 4));
1893 static IRExpr
*getFReg(UInt fregNo
)
1895 vassert(fregNo
< 32);
1896 IRType ty
= fp_mode64
? Ity_F64
: Ity_F32
;
1897 return IRExpr_Get(floatGuestRegOffset(fregNo
), ty
);
1900 static IRExpr
*getDReg(UInt dregNo
)
1902 vassert(dregNo
< 32);
1905 return IRExpr_Get(floatGuestRegOffset(dregNo
), Ity_F64
);
1907 /* Read a floating point register pair and combine their contents into a
1909 IRTemp t0
= newTemp(Ity_F32
);
1910 IRTemp t1
= newTemp(Ity_F32
);
1911 IRTemp t2
= newTemp(Ity_F64
);
1912 IRTemp t3
= newTemp(Ity_I32
);
1913 IRTemp t4
= newTemp(Ity_I32
);
1914 IRTemp t5
= newTemp(Ity_I64
);
1916 assign(t0
, getFReg(dregNo
& (~1)));
1917 assign(t1
, getFReg(dregNo
| 1));
1919 assign(t3
, unop(Iop_ReinterpF32asI32
, mkexpr(t0
)));
1920 assign(t4
, unop(Iop_ReinterpF32asI32
, mkexpr(t1
)));
1921 assign(t5
, binop(Iop_32HLto64
, mkexpr(t4
), mkexpr(t3
)));
1922 assign(t2
, unop(Iop_ReinterpI64asF64
, mkexpr(t5
)));
1928 static void putFReg(UInt dregNo
, IRExpr
* e
)
1930 vassert(dregNo
< 32);
1931 IRType ty
= fp_mode64
? Ity_F64
: Ity_F32
;
1932 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == ty
);
1934 if (fp_mode64_fre
) {
1935 IRTemp t0
= newTemp(Ity_F32
);
1936 assign(t0
, getLoFromF64(ty
, e
));
1937 #if defined (_MIPSEL)
1938 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
), mkexpr(t0
)));
1941 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
) - 4, mkexpr(t0
)));
1944 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
) + 4, mkexpr(t0
)));
1947 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
& (~1)), mkexpr(t0
)));
1951 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
), e
));
1954 if (has_msa
&& fp_mode64
) {
1955 stmt(IRStmt_Put(msaGuestRegOffset(dregNo
),
1956 binop(Iop_64HLtoV128
,
1957 unop(Iop_ReinterpF64asI64
, e
),
1958 unop(Iop_ReinterpF64asI64
, e
))));
1962 static void putDReg(UInt dregNo
, IRExpr
* e
)
1965 vassert(dregNo
< 32);
1966 IRType ty
= Ity_F64
;
1967 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == ty
);
1968 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
), e
));
1970 if (fp_mode64_fre
) {
1971 IRTemp t0
= newTemp(Ity_F32
);
1974 assign(t0
, getLoFromF64(ty
, e
));
1975 #if defined (_MIPSEL)
1976 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
) - 4, mkexpr(t0
)));
1978 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
& (~1)), mkexpr(t0
)));
1981 assign(t0
, getHiFromF64(e
));
1982 #if defined (_MIPSEL)
1983 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
| 1), mkexpr(t0
)));
1985 stmt(IRStmt_Put(floatGuestRegOffset(dregNo
| 1) + 4, mkexpr(t0
)));
1991 stmt(IRStmt_Put(msaGuestRegOffset(dregNo
),
1992 binop(Iop_64HLtoV128
,
1993 unop(Iop_ReinterpF64asI64
, e
),
1994 unop(Iop_ReinterpF64asI64
, e
))));
1996 vassert(dregNo
< 32);
1997 vassert(typeOfIRExpr(irsb
->tyenv
, e
) == Ity_F64
);
1998 IRTemp t1
= newTemp(Ity_F64
);
1999 IRTemp t4
= newTemp(Ity_I32
);
2000 IRTemp t5
= newTemp(Ity_I32
);
2001 IRTemp t6
= newTemp(Ity_I64
);
2003 assign(t6
, unop(Iop_ReinterpF64asI64
, mkexpr(t1
)));
2004 assign(t4
, unop(Iop_64HIto32
, mkexpr(t6
))); /* hi */
2005 assign(t5
, unop(Iop_64to32
, mkexpr(t6
))); /* lo */
2006 putFReg(dregNo
& (~1), unop(Iop_ReinterpI32asF32
, mkexpr(t5
)));
2007 putFReg(dregNo
| 1, unop(Iop_ReinterpI32asF32
, mkexpr(t4
)));
2011 static void setFPUCondCode(IRExpr
* e
, UInt cc
)
2014 putFCSR(binop(Iop_And32
, getFCSR(), mkU32(0xFF7FFFFF)));
2015 putFCSR(binop(Iop_Or32
, getFCSR(), binop(Iop_Shl32
, e
, mkU8(23))));
2017 putFCSR(binop(Iop_And32
, getFCSR(), unop(Iop_Not32
,
2018 binop(Iop_Shl32
, mkU32(0x01000000), mkU8(cc
)))));
2019 putFCSR(binop(Iop_Or32
, getFCSR(), binop(Iop_Shl32
, e
, mkU8(24 + cc
))));
2023 static IRExpr
* get_IR_roundingmode ( void )
2026 rounding mode | MIPS | IR
2027 ------------------------
2028 to nearest | 00 | 00
2030 to +infinity | 10 | 10
2031 to -infinity | 11 | 01
2033 IRTemp rm_MIPS
= newTemp(Ity_I32
);
2034 /* Last two bits in FCSR are rounding mode. */
2037 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS64State
,
2038 guest_FCSR
), Ity_I32
), mkU32(3)));
2040 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS32State
,
2041 guest_FCSR
), Ity_I32
), mkU32(3)));
2043 /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */
2045 return binop(Iop_Xor32
, mkexpr(rm_MIPS
), binop(Iop_And32
,
2046 binop(Iop_Shl32
, mkexpr(rm_MIPS
), mkU8(1)), mkU32(2)));
2049 static IRExpr
* get_IR_roundingmode_MSA ( void )
2052 rounding mode | MIPS | IR
2053 ------------------------
2054 to nearest | 00 | 00
2056 to +infinity | 10 | 10
2057 to -infinity | 11 | 01
2059 IRTemp rm_MIPS
= newTemp(Ity_I32
);
2060 /* Last two bits in MSACSR are rounding mode. */
2063 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS64State
,
2064 guest_MSACSR
), Ity_I32
), mkU32(3)));
2066 assign(rm_MIPS
, binop(Iop_And32
, IRExpr_Get(offsetof(VexGuestMIPS32State
,
2067 guest_MSACSR
), Ity_I32
), mkU32(3)));
2069 /* rm_IR = XOR( rm_MIPS32, (rm_MIPS32 << 1) & 2) */
2070 return binop(Iop_Xor32
, mkexpr(rm_MIPS
), binop(Iop_And32
,
2071 binop(Iop_Shl32
, mkexpr(rm_MIPS
), mkU8(1)), mkU32(2)));
2074 /* sz, ULong -> IRExpr */
2075 static IRExpr
*mkSzImm ( IRType ty
, ULong imm64
)
2077 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
2078 return ty
== Ity_I64
? mkU64(imm64
) : mkU32((UInt
) imm64
);
2081 static IRConst
*mkSzConst ( IRType ty
, ULong imm64
)
2083 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
2084 return (ty
== Ity_I64
? IRConst_U64(imm64
) : IRConst_U32((UInt
) imm64
));
2087 /* Make sure we get valid 32 and 64bit addresses */
2088 static Addr64
mkSzAddr ( IRType ty
, Addr64 addr
)
2090 vassert(ty
== Ity_I32
|| ty
== Ity_I64
);
2091 return (ty
== Ity_I64
? (Addr64
) addr
:
2092 (Addr64
) extend_s_32to64(toUInt(addr
)));
2095 /* Shift and Rotate instructions for MIPS64 */
2096 static Bool
dis_instr_shrt ( UInt theInstr
)
2098 UInt opc2
= get_function(theInstr
);
2099 UChar regRs
= get_rs(theInstr
);
2100 UChar regRt
= get_rt(theInstr
);
2101 UChar regRd
= get_rd(theInstr
);
2102 UChar uImmsa
= get_sa(theInstr
);
2103 Long sImmsa
= extend_s_16to64(uImmsa
);
2104 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2105 IRTemp tmp
= newTemp(ty
);
2106 IRTemp tmpOr
= newTemp(ty
);
2107 IRTemp tmpRt
= newTemp(ty
);
2108 IRTemp tmpRs
= newTemp(ty
);
2109 IRTemp tmpRd
= newTemp(ty
);
2111 assign(tmpRs
, getIReg(regRs
));
2112 assign(tmpRt
, getIReg(regRt
));
2116 if ((regRs
& 0x01) == 0) {
2117 /* Doubleword Shift Right Logical - DSRL; MIPS64 */
2118 DIP("dsrl r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2119 assign(tmpRd
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2120 putIReg(regRd
, mkexpr(tmpRd
));
2121 } else if ((regRs
& 0x01) == 1) {
2122 /* Doubleword Rotate Right - DROTR; MIPS64r2 */
2124 DIP("drotr r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2125 IRTemp tmpL
= newTemp(ty
);
2126 IRTemp tmpR
= newTemp(ty
);
2127 assign(tmpR
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2128 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(63 - uImmsa
)));
2129 assign(tmpL
, binop(Iop_Shl64
, mkexpr(tmp
), mkU8(1)));
2130 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpL
), mkexpr(tmpR
)));
2131 putIReg(regRd
, mkexpr(tmpRd
));
2138 if ((regRs
& 0x01) == 0) {
2139 /* Doubleword Shift Right Logical Plus 32 - DSRL32; MIPS64 */
2140 DIP("dsrl32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
+ 32);
2141 assign(tmpRd
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2142 putIReg(regRd
, mkexpr(tmpRd
));
2143 } else if ((regRs
& 0x01) == 1) {
2144 /* Doubleword Rotate Right Plus 32 - DROTR32; MIPS64r2 */
2145 DIP("drotr32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2147 IRTemp tmpL
= newTemp(ty
);
2148 IRTemp tmpR
= newTemp(ty
);
2149 /* (tmpRt >> sa) | (tmpRt << (64 - sa)) */
2150 assign(tmpR
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2151 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRt
),
2152 mkU8(63 - (uImmsa
+ 32))));
2153 assign(tmpL
, binop(Iop_Shl64
, mkexpr(tmp
), mkU8(1)));
2154 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpL
), mkexpr(tmpR
)));
2155 putIReg(regRd
, mkexpr(tmpRd
));
2162 if ((uImmsa
& 0x01) == 0) {
2163 /* Doubleword Shift Right Logical Variable - DSRLV; MIPS64 */
2164 DIP("dsrlv r%u, r%u, r%u", regRd
, regRt
, regRs
);
2165 IRTemp tmpRs8
= newTemp(Ity_I8
);
2166 /* s = tmpRs[5..0] */
2167 assign(tmp
, binop(Iop_And64
, mkexpr(tmpRs
), mkU64(63)));
2168 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2169 assign(tmpRd
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2170 putIReg(regRd
, mkexpr(tmpRd
));
2171 } else if ((uImmsa
& 0x01) == 1) {
2172 /* Doubleword Rotate Right Variable - DROTRV; MIPS64r2 */
2173 DIP("drotrv r%u, r%u, r%u", regRd
, regRt
, regRs
);
2174 IRTemp tmpL
= newTemp(ty
);
2175 IRTemp tmpR
= newTemp(ty
);
2176 IRTemp tmpRs8
= newTemp(Ity_I8
);
2177 IRTemp tmpLs8
= newTemp(Ity_I8
);
2178 IRTemp tmp64
= newTemp(ty
);
2181 (tmpRt << s) | (tmpRt >> m) */
2183 assign(tmp64
, binop(Iop_And64
, mkexpr(tmpRs
), mkSzImm(ty
, 63)));
2184 assign(tmp
, binop(Iop_Sub64
, mkU64(63), mkexpr(tmp64
)));
2186 assign(tmpLs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2187 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp64
)));
2189 assign(tmpR
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2190 assign(tmpL
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkexpr(tmpLs8
)));
2191 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpL
), mkU8(1)));
2192 assign(tmpOr
, binop(Iop_Or64
, mkexpr(tmpRd
), mkexpr(tmpR
)));
2194 putIReg(regRd
, mkexpr(tmpOr
));
2200 case 0x38: /* Doubleword Shift Left Logical - DSLL; MIPS64 */
2201 DIP("dsll r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2203 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2204 putIReg(regRd
, mkexpr(tmpRd
));
2207 case 0x3C: /* Doubleword Shift Left Logical Plus 32 - DSLL32; MIPS64 */
2208 DIP("dsll32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2209 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2210 putIReg(regRd
, mkexpr(tmpRd
));
2213 case 0x14: { /* Doubleword Shift Left Logical Variable - DSLLV; MIPS64 */
2214 DIP("dsllv r%u, r%u, r%u", regRd
, regRt
, regRs
);
2215 IRTemp tmpRs8
= newTemp(Ity_I8
);
2217 assign(tmp
, binop(Iop_And64
, mkexpr(tmpRs
), mkSzImm(ty
, 63)));
2218 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2219 assign(tmpRd
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2220 putIReg(regRd
, mkexpr(tmpRd
));
2224 case 0x3B: /* Doubleword Shift Right Arithmetic - DSRA; MIPS64 */
2225 DIP("dsra r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2226 assign(tmpRd
, binop(Iop_Sar64
, mkexpr(tmpRt
), mkU8(uImmsa
)));
2227 putIReg(regRd
, mkexpr(tmpRd
));
2230 case 0x3F: /* Doubleword Shift Right Arithmetic Plus 32 - DSRA32;
2232 DIP("dsra32 r%u, r%u, %lld", regRd
, regRt
, sImmsa
);
2233 assign(tmpRd
, binop(Iop_Sar64
, mkexpr(tmpRt
), mkU8(uImmsa
+ 32)));
2234 putIReg(regRd
, mkexpr(tmpRd
));
2238 /* Doubleword Shift Right Arithmetic Variable - DSRAV;
2240 DIP("dsrav r%u, r%u, r%u", regRd
, regRt
, regRs
);
2241 IRTemp tmpRs8
= newTemp(Ity_I8
);
2242 assign(tmp
, binop(Iop_And64
, mkexpr(tmpRs
), mkSzImm(ty
, 63)));
2243 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
2244 assign(tmpRd
, binop(Iop_Sar64
, mkexpr(tmpRt
), mkexpr(tmpRs8
)));
2245 putIReg(regRd
, mkexpr(tmpRd
));
2258 static IROp
mkSzOp ( IRType ty
, IROp op8
)
2261 vassert(ty
== Ity_I8
|| ty
== Ity_I16
|| ty
== Ity_I32
|| ty
== Ity_I64
);
2262 vassert(op8
== Iop_Add8
|| op8
== Iop_Sub8
|| op8
== Iop_Mul8
2263 || op8
== Iop_Or8
|| op8
== Iop_And8
|| op8
== Iop_Xor8
2264 || op8
== Iop_Shl8
|| op8
== Iop_Shr8
|| op8
== Iop_Sar8
2265 || op8
== Iop_CmpEQ8
|| op8
== Iop_CmpNE8
|| op8
== Iop_Not8
);
2266 adj
= ty
== Ity_I8
? 0 : (ty
== Ity_I16
? 1 : (ty
== Ity_I32
? 2 : 3));
2270 /*********************************************************/
2271 /*--- Floating Point Compare ---*/
2272 /*********************************************************/
2273 /* Function that returns a string that represent mips cond
2274 mnemonic for the input code. */
2275 static const HChar
* showCondCode(UInt code
)
2345 vpanic("showCondCode");
2352 static Bool
dis_instr_CCondFmt ( UInt cins
)
2354 IRTemp t0
, t1
, t2
, t3
, tmp5
, tmp6
;
2355 IRTemp ccIR
= newTemp(Ity_I32
);
2356 IRTemp ccMIPS
= newTemp(Ity_I32
);
2357 UInt FC
= get_FC(cins
);
2358 UInt fmt
= get_fmt(cins
);
2359 UInt fs
= get_fs(cins
);
2360 UInt ft
= get_ft(cins
);
2361 UInt cond
= get_cond(cins
);
2363 if (FC
== 0x3) { /* C.cond.fmt */
2364 UInt fpc_cc
= get_fpc_cc(cins
);
2367 case 0x10: { /* C.cond.S */
2368 DIP("c.%s.s %u, f%u, f%u", showCondCode(cond
), fpc_cc
, fs
, ft
);
2371 t0
= newTemp(Ity_I32
);
2372 t1
= newTemp(Ity_I32
);
2373 t2
= newTemp(Ity_I32
);
2374 t3
= newTemp(Ity_I32
);
2376 tmp5
= newTemp(Ity_F64
);
2377 tmp6
= newTemp(Ity_F64
);
2379 assign(tmp5
, unop(Iop_F32toF64
, getLoFromF64(Ity_F64
,
2381 assign(tmp6
, unop(Iop_F32toF64
, getLoFromF64(Ity_F64
,
2384 assign(ccIR
, binop(Iop_CmpF64
, mkexpr(tmp5
), mkexpr(tmp6
)));
2385 putHI(mkWidenFrom32(mode64
? Ity_I64
: Ity_I32
,
2386 mkexpr(ccIR
), True
));
2387 /* Map compare result from IR to MIPS
2388 FP cmp result | MIPS | IR
2389 --------------------------
2396 /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
2397 assign(ccMIPS
, binop(Iop_Shl32
, mkU32(1), unop(Iop_32to8
,
2398 binop(Iop_Or32
, binop(Iop_And32
, unop(Iop_Not32
,
2399 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(5))), mkU32(2)),
2400 binop(Iop_And32
, binop(Iop_Xor32
, mkexpr(ccIR
),
2401 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(6))),
2403 putLO(mkWidenFrom32(mode64
? Ity_I64
: Ity_I32
,
2404 mkexpr(ccMIPS
), True
));
2407 assign(t0
, binop(Iop_And32
, mkexpr(ccMIPS
), mkU32(0x1)));
2409 assign(t1
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2410 mkU8(0x1)), mkU32(0x1)));
2412 assign(t2
, binop(Iop_And32
, unop(Iop_Not32
, binop(Iop_Shr32
,
2413 mkexpr(ccMIPS
), mkU8(0x2))), mkU32(0x1)));
2415 assign(t3
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2416 mkU8(0x3)), mkU32(0x1)));
2420 setFPUCondCode(mkU32(0), fpc_cc
);
2424 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2428 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2432 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2437 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2441 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2446 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2451 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2455 setFPUCondCode(mkU32(0), fpc_cc
);
2459 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2463 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2467 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2472 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2476 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2481 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2486 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2494 t0
= newTemp(Ity_I32
);
2495 t1
= newTemp(Ity_I32
);
2496 t2
= newTemp(Ity_I32
);
2497 t3
= newTemp(Ity_I32
);
2499 assign(ccIR
, binop(Iop_CmpF64
, unop(Iop_F32toF64
, getFReg(fs
)),
2500 unop(Iop_F32toF64
, getFReg(ft
))));
2501 /* Map compare result from IR to MIPS
2502 FP cmp result | MIPS | IR
2503 --------------------------
2510 /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
2511 assign(ccMIPS
, binop(Iop_Shl32
, mkU32(1), unop(Iop_32to8
,
2512 binop(Iop_Or32
, binop(Iop_And32
, unop(Iop_Not32
,
2513 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(5))),
2514 mkU32(2)), binop(Iop_And32
,
2515 binop(Iop_Xor32
, mkexpr(ccIR
),
2516 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(6))),
2519 assign(t0
, binop(Iop_And32
, mkexpr(ccMIPS
), mkU32(0x1)));
2521 assign(t1
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2522 mkU8(0x1)), mkU32(0x1)));
2524 assign(t2
, binop(Iop_And32
, unop(Iop_Not32
, binop(Iop_Shr32
,
2525 mkexpr(ccMIPS
), mkU8(0x2))), mkU32(0x1)));
2527 assign(t3
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2528 mkU8(0x3)), mkU32(0x1)));
2532 setFPUCondCode(mkU32(0), fpc_cc
);
2536 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2540 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2544 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2549 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2553 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2558 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2563 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2567 setFPUCondCode(mkU32(0), fpc_cc
);
2571 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2575 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2579 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2584 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2588 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2593 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2598 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2608 case 0x11: { /* C.cond.D */
2609 DIP("c.%s.d %u, f%u, f%u", showCondCode(cond
), fpc_cc
, fs
, ft
);
2610 t0
= newTemp(Ity_I32
);
2611 t1
= newTemp(Ity_I32
);
2612 t2
= newTemp(Ity_I32
);
2613 t3
= newTemp(Ity_I32
);
2614 assign(ccIR
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
2615 /* Map compare result from IR to MIPS
2616 FP cmp result | MIPS | IR
2617 --------------------------
2624 /* ccMIPS = Shl(1, (~(ccIR>>5) & 2) | ((ccIR ^ (ccIR>>6)) & 1) */
2625 assign(ccMIPS
, binop(Iop_Shl32
, mkU32(1), unop(Iop_32to8
,
2626 binop(Iop_Or32
, binop(Iop_And32
, unop(Iop_Not32
,
2627 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(5))), mkU32(2)),
2628 binop(Iop_And32
, binop(Iop_Xor32
, mkexpr(ccIR
),
2629 binop(Iop_Shr32
, mkexpr(ccIR
), mkU8(6))),
2633 assign(t0
, binop(Iop_And32
, mkexpr(ccMIPS
), mkU32(0x1)));
2635 assign(t1
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2636 mkU8(0x1)), mkU32(0x1)));
2638 assign(t2
, binop(Iop_And32
, unop(Iop_Not32
, binop(Iop_Shr32
,
2639 mkexpr(ccMIPS
), mkU8(0x2))), mkU32(0x1)));
2641 assign(t3
, binop(Iop_And32
, binop(Iop_Shr32
, mkexpr(ccMIPS
),
2642 mkU8(0x3)), mkU32(0x1)));
2646 setFPUCondCode(mkU32(0), fpc_cc
);
2650 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2654 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2658 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2663 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2667 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2672 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2677 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2681 setFPUCondCode(mkU32(0), fpc_cc
);
2685 setFPUCondCode(mkexpr(t0
), fpc_cc
);
2689 setFPUCondCode(mkexpr(t1
), fpc_cc
);
2693 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t1
)),
2698 setFPUCondCode(mkexpr(t3
), fpc_cc
);
2702 setFPUCondCode(binop(Iop_Or32
, mkexpr(t0
), mkexpr(t3
)),
2707 setFPUCondCode(binop(Iop_Or32
, mkexpr(t3
), mkexpr(t1
)),
2712 setFPUCondCode(mkexpr(t2
), fpc_cc
);
2731 /*********************************************************/
2732 /*--- Branch Instructions for mips64 ---*/
2733 /*********************************************************/
2734 static Bool
dis_instr_branch ( UInt theInstr
, DisResult
* dres
, IRStmt
** set
)
2737 UChar opc1
= get_opcode(theInstr
);
2738 UChar regRs
= get_rs(theInstr
);
2739 UChar regRt
= get_rt(theInstr
);
2740 UInt offset
= get_imm(theInstr
);
2741 Long sOffset
= extend_s_16to64(offset
);
2742 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2743 IROp opSlt
= mode64
? Iop_CmpLT64S
: Iop_CmpLT32S
;
2745 IRTemp tmp
= newTemp(ty
);
2746 IRTemp tmpRs
= newTemp(ty
);
2747 IRTemp tmpRt
= newTemp(ty
);
2748 IRTemp tmpLt
= newTemp(ty
);
2749 IRTemp tmpReg0
= newTemp(ty
);
2751 UChar regLnk
= 31; /* reg 31 is link reg in MIPS */
2753 Addr64 cia
= guest_PC_curr_instr
;
2755 IRExpr
*eConst0
= mkSzImm(ty
, (UInt
) 0);
2756 IRExpr
*eNia
= mkSzImm(ty
, cia
+ 8);
2757 IRExpr
*eCond
= NULL
;
2759 assign(tmpRs
, getIReg(regRs
));
2760 assign(tmpRt
, getIReg(regRt
));
2761 assign(tmpReg0
, getIReg(0));
2763 eCond
= binop(mkSzOp(ty
, Iop_CmpNE8
), mkexpr(tmpReg0
), mkexpr(tmpReg0
));
2768 case 0x00: { /* BLTZ rs, offset */
2769 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2770 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2772 assign(tmp
, eConst0
);
2773 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), mkexpr(tmp
)));
2774 assign(tmpLt
, mode64
? unop(Iop_1Uto64
, mkexpr(tmpLtRes
)) :
2775 unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2777 eCond
= binop(mkSzOp(ty
, Iop_CmpNE8
), mkexpr(tmpLt
),
2780 jmpKind
= Ijk_Boring
;
2784 case 0x01: { /* BGEZ rs, offset */
2785 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2786 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2788 assign(tmp
, eConst0
);
2789 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), mkexpr(tmp
)));
2790 assign(tmpLt
, mode64
? unop(Iop_1Uto64
, mkexpr(tmpLtRes
)) :
2791 unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2792 eCond
= binop(mkSzOp(ty
, Iop_CmpEQ8
), mkexpr(tmpLt
),
2795 jmpKind
= Ijk_Boring
;
2799 case 0x11: { /* BGEZAL rs, offset */
2800 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2801 putIReg(regLnk
, eNia
);
2802 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2804 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), eConst0
));
2805 assign(tmpLt
, mode64
? unop(Iop_1Uto64
, mkexpr(tmpLtRes
)) :
2806 unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2808 eCond
= binop(mkSzOp(ty
, Iop_CmpEQ8
), mkexpr(tmpLt
),
2815 case 0x10: { /* BLTZAL rs, offset */
2816 IRTemp tmpLtRes
= newTemp(Ity_I1
);
2817 IRTemp tmpRes
= newTemp(ty
);
2819 addrTgt
= mkSzAddr(ty
, cia
+ 4 + (sOffset
<< 2));
2820 putIReg(regLnk
, eNia
);
2822 assign(tmp
, eConst0
);
2823 assign(tmpLtRes
, binop(opSlt
, mkexpr(tmpRs
), mkexpr(tmp
)));
2824 assign(tmpRes
, mode64
? unop(Iop_1Uto64
,
2825 mkexpr(tmpLtRes
)) : unop(Iop_1Uto32
, mkexpr(tmpLtRes
)));
2826 eCond
= binop(mkSzOp(ty
, Iop_CmpNE8
), mkexpr(tmpRes
),
2841 *set
= IRStmt_Exit(eCond
, jmpKind
, mkSzConst(ty
, addrTgt
), OFFB_PC
);
2845 /*********************************************************/
2846 /*--- Cavium Specific Instructions ---*/
2847 /*********************************************************/
2849 /* Convenience function to yield to thread scheduler */
2850 static void jump_back(IRExpr
*condition
)
2852 stmt( IRStmt_Exit(condition
,
2854 IRConst_U64( guest_PC_curr_instr
),
2858 /* Based on s390_irgen_load_and_add32. */
2859 static void mips_load_store32(IRTemp op1addr
, IRTemp new_val
,
2860 IRTemp expd
, UChar rd
, Bool putIntoRd
)
2863 IRTemp old_mem
= newTemp(Ity_I32
);
2864 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2866 cas
= mkIRCAS(IRTemp_INVALID
, old_mem
,
2867 #if defined (_MIPSEL)
2868 Iend_LE
, mkexpr(op1addr
),
2870 Iend_BE
, mkexpr(op1addr
),
2872 NULL
, mkexpr(expd
), /* expected value */
2873 NULL
, mkexpr(new_val
) /* new value */);
2874 stmt(IRStmt_CAS(cas
));
2876 /* If old_mem contains the expected value, then the CAS succeeded.
2877 Otherwise, it did not */
2878 jump_back(binop(Iop_CmpNE32
, mkexpr(old_mem
), mkexpr(expd
)));
2881 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(old_mem
), True
));
2884 /* Based on s390_irgen_load_and_add64. */
2885 static void mips_load_store64(IRTemp op1addr
, IRTemp new_val
,
2886 IRTemp expd
, UChar rd
, Bool putIntoRd
)
2889 IRTemp old_mem
= newTemp(Ity_I64
);
2891 cas
= mkIRCAS(IRTemp_INVALID
, old_mem
,
2892 #if defined (_MIPSEL)
2893 Iend_LE
, mkexpr(op1addr
),
2895 Iend_BE
, mkexpr(op1addr
),
2897 NULL
, mkexpr(expd
), /* expected value */
2898 NULL
, mkexpr(new_val
) /* new value */);
2899 stmt(IRStmt_CAS(cas
));
2901 /* If old_mem contains the expected value, then the CAS succeeded.
2902 Otherwise, it did not */
2903 jump_back(binop(Iop_CmpNE64
, mkexpr(old_mem
), mkexpr(expd
)));
2906 putIReg(rd
, mkexpr(old_mem
));
2909 static Bool
dis_instr_CVM ( UInt theInstr
)
2911 UChar opc2
= get_function(theInstr
);
2912 UChar opc1
= get_opcode(theInstr
);
2913 UChar regRs
= get_rs(theInstr
);
2914 UChar regRt
= get_rt(theInstr
);
2915 UChar regRd
= get_rd(theInstr
);
2916 /* MIPS trap instructions extract code from theInstr[15:6].
2917 Cavium OCTEON instructions SNEI, SEQI extract immediate operands
2918 from the same bit field [15:6]. */
2919 UInt imm
= get_code(theInstr
);
2920 UChar lenM1
= get_msb(theInstr
);
2921 UChar p
= get_lsb(theInstr
);
2922 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
2923 IRTemp tmp
= newTemp(ty
);
2924 IRTemp tmpRs
= newTemp(ty
);
2925 IRTemp tmpRt
= newTemp(ty
);
2926 IRTemp t1
= newTemp(ty
);
2928 assign(tmpRs
, getIReg(regRs
));
2933 case 0x03: { /* DMUL rd, rs, rt */
2934 DIP("dmul r%u, r%u, r%u", regRd
, regRs
, regRt
);
2935 IRTemp t0
= newTemp(Ity_I128
);
2936 assign(t0
, binop(Iop_MullU64
, getIReg(regRs
), getIReg(regRt
)));
2937 putIReg(regRd
, unop(Iop_128to64
, mkexpr(t0
)));
2941 case 0x18: { /* Store Atomic Add Word - SAA; Cavium OCTEON */
2942 DIP("saa r%u, (r%u)", regRt
, regRs
);
2943 IRTemp addr
= newTemp(Ity_I64
);
2944 IRTemp new_val
= newTemp(Ity_I32
);
2945 IRTemp old
= newTemp(Ity_I32
);
2946 assign(addr
, getIReg(regRs
));
2947 assign(old
, load(Ity_I32
, mkexpr(addr
)));
2948 assign(new_val
, binop(Iop_Add32
,
2950 mkNarrowTo32(ty
, getIReg(regRt
))));
2951 mips_load_store32(addr
, new_val
, old
, 0, False
);
2955 /* Store Atomic Add Doubleword - SAAD; Cavium OCTEON */
2957 DIP( "saad r%u, (r%u)", regRt
, regRs
);
2958 IRTemp addr
= newTemp(Ity_I64
);
2959 IRTemp new_val
= newTemp(Ity_I64
);
2960 IRTemp old
= newTemp(Ity_I64
);
2961 assign(addr
, getIReg(regRs
));
2962 assign(old
, load(Ity_I64
, mkexpr(addr
)));
2963 assign(new_val
, binop(Iop_Add64
,
2966 mips_load_store64(addr
, new_val
, old
, 0, False
);
2970 /* LAI, LAID, LAD, LADD, LAS, LASD,
2971 LAC, LACD, LAA, LAAD, LAW, LAWD */
2973 UInt opc3
= get_sa(theInstr
);
2974 IRTemp addr
= newTemp(Ity_I64
);
2977 /* Load Atomic Increment Word - LAI; Cavium OCTEON2 */
2979 DIP("lai r%u,(r%u)\n", regRd
, regRs
);
2980 IRTemp new_val
= newTemp(Ity_I32
);
2981 IRTemp old
= newTemp(Ity_I32
);
2982 assign(addr
, getIReg(regRs
));
2983 assign(old
, load(Ity_I32
, mkexpr(addr
)));
2984 assign(new_val
, binop(Iop_Add32
,
2987 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
2991 /* Load Atomic Increment Doubleword - LAID; Cavium OCTEON2 */
2993 DIP("laid r%u,(r%u)\n", regRd
, regRs
);
2994 IRTemp new_val
= newTemp(Ity_I64
);
2995 IRTemp old
= newTemp(Ity_I64
);
2996 assign(addr
, getIReg(regRs
));
2997 assign(old
, load(Ity_I64
, mkexpr(addr
)));
2998 assign(new_val
, binop(Iop_Add64
,
3001 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3005 /* Load Atomic Decrement Word - LAD; Cavium OCTEON2 */
3007 DIP("lad r%u,(r%u)\n", regRd
, regRs
);
3008 IRTemp new_val
= newTemp(Ity_I32
);
3009 IRTemp old
= newTemp(Ity_I32
);
3010 assign(addr
, getIReg(regRs
));
3011 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3012 assign(new_val
, binop(Iop_Sub32
,
3015 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3019 /* Load Atomic Decrement Doubleword - LADD; Cavium OCTEON2 */
3021 DIP("ladd r%u,(r%u)\n", regRd
, regRs
);
3022 IRTemp new_val
= newTemp(Ity_I64
);
3023 IRTemp old
= newTemp(Ity_I64
);
3024 assign(addr
, getIReg(regRs
));
3025 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3026 assign(new_val
, binop(Iop_Sub64
,
3029 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3033 /* Load Atomic Set Word - LAS; Cavium OCTEON2 */
3035 DIP("las r%u,(r%u)\n", regRd
, regRs
);
3036 IRTemp new_val
= newTemp(Ity_I32
);
3037 IRTemp old
= newTemp(Ity_I32
);
3038 assign(addr
, getIReg(regRs
));
3039 assign(new_val
, mkU32(0xffffffff));
3040 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3041 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3045 /* Load Atomic Set Doubleword - LASD; Cavium OCTEON2 */
3047 DIP("lasd r%u,(r%u)\n", regRd
, regRs
);
3048 IRTemp new_val
= newTemp(Ity_I64
);
3049 IRTemp old
= newTemp(Ity_I64
);
3050 assign(addr
, getIReg(regRs
));
3051 assign(new_val
, mkU64(0xffffffffffffffffULL
));
3052 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3053 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3057 /* Load Atomic Clear Word - LAC; Cavium OCTEON2 */
3059 DIP("lac r%u,(r%u)\n", regRd
, regRs
);
3060 IRTemp new_val
= newTemp(Ity_I32
);
3061 IRTemp old
= newTemp(Ity_I32
);
3062 assign(addr
, getIReg(regRs
));
3063 assign(new_val
, mkU32(0));
3064 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3065 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3069 /* Load Atomic Clear Doubleword - LACD; Cavium OCTEON2 */
3071 DIP("lacd r%u,(r%u)\n", regRd
, regRs
);
3072 IRTemp new_val
= newTemp(Ity_I64
);
3073 IRTemp old
= newTemp(Ity_I64
);
3074 assign(addr
, getIReg(regRs
));
3075 assign(new_val
, mkU64(0));
3076 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3077 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3081 /* Load Atomic Add Word - LAA; Cavium OCTEON2 */
3083 DIP("laa r%u,(r%u),r%u\n", regRd
, regRs
, regRt
);
3084 IRTemp new_val
= newTemp(Ity_I32
);
3085 IRTemp old
= newTemp(Ity_I32
);
3086 assign(addr
, getIReg(regRs
));
3087 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3088 assign(new_val
, binop(Iop_Add32
,
3090 mkNarrowTo32(ty
, getIReg(regRt
))));
3091 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3095 /* Load Atomic Add Doubleword - LAAD; Cavium OCTEON2 */
3097 DIP("laad r%u,(r%u),r%u\n", regRd
, regRs
, regRt
);
3098 IRTemp new_val
= newTemp(Ity_I64
);
3099 IRTemp old
= newTemp(Ity_I64
);
3100 assign(addr
, getIReg(regRs
));
3101 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3102 assign(new_val
, binop(Iop_Add64
,
3103 load(Ity_I64
, mkexpr(addr
)),
3105 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3109 /* Load Atomic Swap Word - LAW; Cavium OCTEON2 */
3111 DIP("law r%u,(r%u)\n", regRd
, regRs
);
3112 IRTemp new_val
= newTemp(Ity_I32
);
3113 IRTemp old
= newTemp(Ity_I32
);
3114 assign(addr
, getIReg(regRs
));
3115 assign(new_val
, mkNarrowTo32(ty
, getIReg(regRt
)));
3116 assign(old
, load(Ity_I32
, mkexpr(addr
)));
3117 mips_load_store32(addr
, new_val
, old
, regRd
, True
);
3121 /* Load Atomic Swap Doubleword - LAWD; Cavium OCTEON2 */
3123 DIP("lawd r%u,(r%u)\n", regRd
, regRs
);
3124 IRTemp new_val
= newTemp(Ity_I64
);
3125 IRTemp old
= newTemp(Ity_I64
);
3126 assign(addr
, getIReg(regRs
));
3127 assign(new_val
, getIReg(regRt
));
3128 assign(old
, load(Ity_I64
, mkexpr(addr
)));
3129 mips_load_store64(addr
, new_val
, old
, regRd
, True
);
3134 vex_printf("Unknown laxx instruction, opc3=0x%x\n", opc3
);
3135 vex_printf("Instruction=0x%08x\n", theInstr
);
3142 /* Unsigned Byte Add - BADDU rd, rs, rt; Cavium OCTEON */
3144 DIP("BADDU r%u, r%u, r%u", regRs
, regRt
, regRd
);
3145 IRTemp t0
= newTemp(Ity_I8
);
3147 assign(t0
, binop(Iop_Add8
,
3148 mkNarrowTo8(ty
, getIReg(regRs
)),
3149 mkNarrowTo8(ty
, getIReg(regRt
))));
3152 putIReg(regRd
, binop(mkSzOp(ty
, Iop_And8
),
3153 unop(Iop_8Uto64
, mkexpr(t0
)),
3154 mkSzImm(ty
, 0xFF)));
3156 putIReg(regRd
, binop(mkSzOp(ty
, Iop_And8
),
3157 unop(Iop_8Uto32
, mkexpr(t0
)),
3158 mkSzImm(ty
, 0xFF)));
3163 case 0x2c: { /* Count Ones in a Word - POP; Cavium OCTEON */
3166 IRTemp old
= newTemp(ty
);
3167 IRTemp nyu
= IRTemp_INVALID
;
3168 assign(old
, getIReg(regRs
));
3169 DIP("pop r%u, r%u", regRd
, regRs
);
3171 for (i
= 0; i
< 5; i
++) {
3172 mask
[i
] = newTemp(ty
);
3177 assign(mask
[0], mkU64(0x0000000055555555));
3178 assign(mask
[1], mkU64(0x0000000033333333));
3179 assign(mask
[2], mkU64(0x000000000F0F0F0F));
3180 assign(mask
[3], mkU64(0x0000000000FF00FF));
3181 assign(mask
[4], mkU64(0x000000000000FFFF));
3183 for (i
= 0; i
< 5; i
++) {
3188 mkexpr(old
), mkexpr(mask
[i
])),
3191 mkexpr(old
), mkU8(shift
[i
])),
3196 assign(mask
[0], mkU32(0x55555555));
3197 assign(mask
[1], mkU32(0x33333333));
3198 assign(mask
[2], mkU32(0x0F0F0F0F));
3199 assign(mask
[3], mkU32(0x00FF00FF));
3200 assign(mask
[4], mkU32(0x0000FFFF));
3201 assign(old
, getIReg(regRs
));
3203 for (i
= 0; i
< 5; i
++) {
3208 mkexpr(old
), mkexpr(mask
[i
])),
3211 mkexpr(old
), mkU8(shift
[i
])),
3217 putIReg(regRd
, mkexpr(nyu
));
3221 /* Count Ones in a Doubleword - DPOP; Cavium OCTEON */
3225 IRTemp old
= newTemp(ty
);
3226 IRTemp nyu
= IRTemp_INVALID
;
3227 DIP("dpop r%u, r%u", regRd
, regRs
);
3229 for (i
= 0; i
< 6; i
++) {
3230 mask
[i
] = newTemp(ty
);
3234 vassert(mode64
); /*Caution! Only for Mode 64*/
3235 assign(mask
[0], mkU64(0x5555555555555555ULL
));
3236 assign(mask
[1], mkU64(0x3333333333333333ULL
));
3237 assign(mask
[2], mkU64(0x0F0F0F0F0F0F0F0FULL
));
3238 assign(mask
[3], mkU64(0x00FF00FF00FF00FFULL
));
3239 assign(mask
[4], mkU64(0x0000FFFF0000FFFFULL
));
3240 assign(mask
[5], mkU64(0x00000000FFFFFFFFULL
));
3241 assign(old
, getIReg(regRs
));
3243 for (i
= 0; i
< 6; i
++) {
3244 nyu
= newTemp(Ity_I64
);
3248 mkexpr(old
), mkexpr(mask
[i
])),
3251 mkexpr(old
), mkU8(shift
[i
])),
3256 putIReg(regRd
, mkexpr(nyu
));
3260 case 0x32: /* 5. CINS rd, rs, p, lenm1 */
3261 DIP("cins r%u, r%u, %u, %u\n", regRt
, regRs
, p
, lenM1
);
3262 assign ( tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
),
3263 mkU8(64 - ( lenM1
+ 1 ))));
3264 assign ( tmpRt
, binop(Iop_Shr64
, mkexpr( tmp
),
3265 mkU8(64 - (p
+ lenM1
+ 1))));
3266 putIReg( regRt
, mkexpr(tmpRt
));
3269 case 0x33: /* 6. CINS32 rd, rs, p+32, lenm1 */
3270 DIP("cins32 r%u, r%u, %d, %d\n", regRt
, regRs
, p
+ 32, lenM1
);
3271 assign ( tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
),
3272 mkU8(64 - ( lenM1
+ 1 ))));
3273 assign ( tmpRt
, binop(Iop_Shr64
, mkexpr( tmp
),
3274 mkU8(32 - (p
+ lenM1
+ 1))));
3275 putIReg( regRt
, mkexpr(tmpRt
));
3278 case 0x3A: /* 3. EXTS rt, rs, p len */
3279 DIP("exts r%u, r%u, %d, %d\n", regRt
, regRs
, p
, lenM1
);
3280 size
= lenM1
+ 1; /* lenm1+1 */
3281 UChar lsAmt
= 64 - (p
+ size
); /* p+lenm1+1 */
3282 UChar rsAmt
= 64 - size
; /* lenm1+1 */
3283 tmp
= newTemp(Ity_I64
);
3284 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
3285 putIReg(regRt
, binop(Iop_Sar64
, mkexpr(tmp
), mkU8(rsAmt
)));
3288 case 0x3B: /* 4. EXTS32 rt, rs, p len */
3289 DIP("exts32 r%u, r%u, %d, %d\n", regRt
, regRs
, p
, lenM1
);
3290 assign ( tmp
, binop(Iop_Shl64
, mkexpr(tmpRs
),
3291 mkU8(32 - (p
+ lenM1
+ 1))));
3292 assign ( tmpRt
, binop(Iop_Sar64
, mkexpr(tmp
),
3293 mkU8(64 - (lenM1
+ 1))) );
3294 putIReg( regRt
, mkexpr(tmpRt
));
3297 case 0x2B: /* 20. SNE rd, rs, rt */
3298 DIP("sne r%u, r%u, r%u", regRd
, regRs
, regRt
);
3301 putIReg(regRd
, unop(Iop_1Uto64
, binop(Iop_CmpNE64
,
3305 putIReg(regRd
, unop(Iop_1Uto32
, binop(Iop_CmpNE32
,
3311 case 0x2A: /* Set Equals - SEQ; Cavium OCTEON */
3312 DIP("seq r%u, r%u, %d", regRd
, regRs
, regRt
);
3315 putIReg(regRd
, unop(Iop_1Uto64
,
3316 binop(Iop_CmpEQ64
, getIReg(regRs
),
3319 putIReg(regRd
, unop(Iop_1Uto32
,
3320 binop(Iop_CmpEQ32
, getIReg(regRs
),
3325 case 0x2E: /* Set Equals Immediate - SEQI; Cavium OCTEON */
3326 DIP("seqi r%u, r%u, %u", regRt
, regRs
, imm
);
3329 putIReg(regRt
, unop(Iop_1Uto64
,
3330 binop(Iop_CmpEQ64
, getIReg(regRs
),
3331 mkU64(extend_s_10to64(imm
)))));
3333 putIReg(regRt
, unop(Iop_1Uto32
,
3334 binop(Iop_CmpEQ32
, getIReg(regRs
),
3335 mkU32(extend_s_10to32(imm
)))));
3339 case 0x2F: /* Set Not Equals Immediate - SNEI; Cavium OCTEON */
3340 DIP("snei r%u, r%u, %u", regRt
, regRs
, imm
);
3343 putIReg(regRt
, unop(Iop_1Uto64
,
3346 mkU64(extend_s_10to64(imm
)))));
3348 putIReg(regRt
, unop(Iop_1Uto32
,
3351 mkU32(extend_s_10to32(imm
)))));
3360 } /* opc1 0x1C ends here*/
3364 case 0x0A: { // lx - Load indexed instructions
3365 switch (get_sa(theInstr
)) {
3366 case 0x00: { // LWX rd, index(base)
3367 DIP("lwx r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3368 LOADX_STORE_PATTERN
;
3369 putIReg(regRd
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)),
3374 case 0x04: // LHX rd, index(base)
3375 DIP("lhx r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3376 LOADX_STORE_PATTERN
;
3379 putIReg(regRd
, unop(Iop_16Sto64
, load(Ity_I16
,
3382 putIReg(regRd
, unop(Iop_16Sto32
, load(Ity_I16
,
3387 case 0x08: { // LDX rd, index(base)
3388 DIP("ldx r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3389 vassert(mode64
); /* Currently Implemented only for n64 */
3390 LOADX_STORE_PATTERN
;
3391 putIReg(regRd
, load(Ity_I64
, mkexpr(t1
)));
3395 case 0x06: { // LBUX rd, index(base)
3396 DIP("lbux r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3397 LOADX_STORE_PATTERN
;
3400 putIReg(regRd
, unop(Iop_8Uto64
, load(Ity_I8
,
3403 putIReg(regRd
, unop(Iop_8Uto32
, load(Ity_I8
,
3409 case 0x10: { // LWUX rd, index(base) (Cavium OCTEON)
3410 DIP("lwux r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3411 LOADX_STORE_PATTERN
; /* same for both 32 and 64 modes*/
3412 putIReg(regRd
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)),
3417 case 0x14: { // LHUX rd, index(base) (Cavium OCTEON)
3418 DIP("lhux r%u, r%u(r%u)", regRd
, regRt
, regRs
);
3419 LOADX_STORE_PATTERN
;
3423 unop(Iop_16Uto64
, load(Ity_I16
, mkexpr(t1
))));
3426 unop(Iop_16Uto32
, load(Ity_I16
, mkexpr(t1
))));
3431 case 0x16: { // LBX rd, index(base) (Cavium OCTEON)
3432 DIP("lbx r%u, r%u(r%u)", regRd
, regRs
, regRt
);
3433 LOADX_STORE_PATTERN
;
3437 unop(Iop_8Sto64
, load(Ity_I8
, mkexpr(t1
))));
3440 unop(Iop_8Sto32
, load(Ity_I8
, mkexpr(t1
))));
3446 vex_printf("\nUnhandled LX instruction opc3 = %x\n",
3453 } /* opc1 = 0x1F & opc2 = 0xA (LX) ends here*/
3456 } /* opc1 = 0x1F ends here*/
3460 } /* main opc1 switch ends here */
3465 static Int
msa_I8_logical(UInt cins
, UChar wd
, UChar ws
)
3471 operation
= (cins
>> 24) & 3;
3472 i8
= (cins
& 0x00FF0000) >> 16;
3474 switch (operation
) {
3475 case 0x00: { /* ANDI.B */
3476 DIP("ANDI.B w%d, w%d, %d", wd
, ws
, i8
);
3477 t1
= newTemp(Ity_V128
);
3478 t2
= newTemp(Ity_V128
);
3480 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3481 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3483 assign(t1
, getWReg(ws
));
3484 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3485 putWReg(wd
, binop(Iop_AndV128
, mkexpr(t1
), mkexpr(t2
)));
3489 case 0x01: { /* ORI.B */
3490 DIP("ORI.B w%d, w%d, %d", wd
, ws
, i8
);
3491 t1
= newTemp(Ity_V128
);
3492 t2
= newTemp(Ity_V128
);
3494 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3495 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3497 assign(t1
, getWReg(ws
));
3498 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3499 putWReg(wd
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3503 case 0x02: { /* NORI.B */
3504 DIP("NORI.B w%d, w%d, %d", wd
, ws
, i8
);
3505 t1
= newTemp(Ity_V128
);
3506 t2
= newTemp(Ity_V128
);
3508 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3509 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3511 assign(t1
, getWReg(ws
));
3512 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3513 putWReg(wd
, unop(Iop_NotV128
, binop(Iop_OrV128
,
3514 mkexpr(t1
), mkexpr(t2
))));
3518 case 0x03: { /* XORI.B */
3519 DIP("XORI.B w%d, w%d, %d", wd
, ws
, i8
);
3520 t1
= newTemp(Ity_V128
);
3521 t2
= newTemp(Ity_V128
);
3523 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3524 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3526 assign(t1
, getWReg(ws
));
3527 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3528 putWReg(wd
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
3539 static Int
msa_I8_branch(UInt cins
, UChar wd
, UChar ws
)
3541 IRTemp t1
, t2
, t3
, t4
;
3545 operation
= (cins
>> 24) & 3;
3546 i8
= (cins
& 0x00FF0000) >> 16;
3548 switch (operation
) {
3549 case 0x00: { /* BMNZI.B */
3550 DIP("BMNZI.B w%d, w%d, %d", wd
, ws
, i8
);
3551 t1
= newTemp(Ity_V128
);
3552 t2
= newTemp(Ity_V128
);
3553 t3
= newTemp(Ity_V128
);
3554 t4
= newTemp(Ity_V128
);
3556 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3557 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3559 assign(t4
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3560 assign(t1
, binop(Iop_AndV128
, getWReg(ws
), mkexpr(t4
)));
3561 assign(t2
, binop(Iop_AndV128
, getWReg(wd
),
3562 unop(Iop_NotV128
, mkexpr(t4
))));
3563 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3564 putWReg(wd
, mkexpr(t3
));
3568 case 0x01: { /* BMZI.B */
3569 DIP("BMZI.B w%d, w%d, %d", wd
, ws
, i8
);
3570 t1
= newTemp(Ity_V128
);
3571 t2
= newTemp(Ity_V128
);
3572 t3
= newTemp(Ity_V128
);
3573 t4
= newTemp(Ity_V128
);
3575 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3576 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3578 assign(t4
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3579 assign(t1
, binop(Iop_AndV128
, getWReg(wd
), mkexpr(t4
)));
3580 assign(t2
, binop(Iop_AndV128
, getWReg(ws
),
3581 unop(Iop_NotV128
, mkexpr(t4
))));
3582 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3583 putWReg(wd
, mkexpr(t3
));
3587 case 0x02: { /* BSELI.B */
3588 DIP("BSELI.B w%d, w%d, %d", wd
, ws
, i8
);
3589 t1
= newTemp(Ity_V128
);
3590 t2
= newTemp(Ity_V128
);
3591 t3
= newTemp(Ity_V128
);
3592 t4
= newTemp(Ity_V128
);
3594 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3595 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3597 assign(t4
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3598 assign(t1
, binop(Iop_AndV128
, getWReg(wd
), mkexpr(t4
)));
3599 assign(t2
, binop(Iop_AndV128
, getWReg(ws
),
3600 unop(Iop_NotV128
, getWReg(wd
))));
3601 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
3602 putWReg(wd
, mkexpr(t3
));
3613 static Int
msa_I8_shift(UInt cins
, UChar wd
, UChar ws
)
3619 operation
= (cins
>> 24) & 3;
3620 i8
= (cins
& 0x00FF0000) >> 16;
3622 switch (operation
) {
3623 case 0x00: { /* SHF.B */
3624 DIP("SHF.B w%d, w%d, %d", wd
, ws
, i8
);
3625 t1
= newTemp(Ity_V128
);
3626 t2
= newTemp(Ity_V128
);
3627 assign(t1
, getWReg(wd
));
3628 assign(t2
, getWReg(ws
));
3632 for (i
= 0; i
< 16; i
++) {
3633 tmp
[i
] = newTemp(Ity_I8
);
3635 binop(Iop_GetElem8x16
, mkexpr(t2
),
3637 ((i8
>> (i
% 4) * 2) & 0x03))));
3640 putWReg(wd
, binop(Iop_64HLtoV128
,
3670 mkexpr(tmp
[0]))))));
3674 case 0x01: { /* SHF.H */
3675 DIP("SHF.H w%d, w%d, %d", wd
, ws
, i8
);
3676 t1
= newTemp(Ity_V128
);
3677 t2
= newTemp(Ity_V128
);
3678 assign(t1
, getWReg(wd
));
3679 assign(t2
, getWReg(ws
));
3683 for (i
= 0; i
< 8; i
++) {
3684 tmp
[i
] = newTemp(Ity_I16
);
3686 binop(Iop_GetElem16x8
, mkexpr(t2
),
3688 ((i8
>> (i
% 4) * 2) & 0x03))));
3691 putWReg(wd
, binop(Iop_64HLtoV128
,
3694 mkexpr(tmp
[7]), mkexpr(tmp
[6])),
3696 mkexpr(tmp
[5]), mkexpr(tmp
[4]))),
3699 mkexpr(tmp
[3]), mkexpr(tmp
[2])),
3701 mkexpr(tmp
[1]), mkexpr(tmp
[0])))));
3705 case 0x02: { /* SHF.W */
3706 DIP("SHF.W w%d, w%d, %d", wd
, ws
, i8
);
3707 t1
= newTemp(Ity_V128
);
3708 t2
= newTemp(Ity_V128
);
3709 assign(t1
, getWReg(wd
));
3710 assign(t2
, getWReg(ws
));
3714 for (i
= 0; i
< 4; i
++) {
3715 tmp
[i
] = newTemp(Ity_I32
);
3717 binop(Iop_GetElem32x4
, mkexpr(t2
),
3719 ((i8
>> (i
% 4) * 2) & 0x03))));
3722 putWReg(wd
, binop(Iop_64HLtoV128
,
3724 mkexpr(tmp
[3]), mkexpr(tmp
[2])),
3726 mkexpr(tmp
[1]), mkexpr(tmp
[0]))));
3737 static Int
msa_I5_06(UInt cins
, UChar wd
, UChar ws
) /* I5 (0x06) */
3743 operation
= (cins
& 0x03800000) >> 23;
3744 df
= (cins
& 0x00600000) >> 21;
3745 wt
= (cins
& 0x001F0000) >> 16;
3747 switch (operation
) {
3748 case 0x00: { /* ADDVI */
3752 case 0x00: { /* ADDVI.B */
3753 DIP("ADDVI.B w%d, w%d, %d", wd
, ws
, wt
);
3754 t1
= newTemp(Ity_V128
);
3755 t2
= newTemp(Ity_V128
);
3756 t3
= newTemp(Ity_V128
);
3757 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3758 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3760 assign(t1
, getWReg(ws
));
3761 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3762 assign(t3
, binop(Iop_Add8x16
, mkexpr(t1
), mkexpr(t2
)));
3763 putWReg(wd
, mkexpr(t3
));
3767 case 0x01: { /* ADDVI.H */
3768 DIP("ADDVI.H w%d, w%d, %d", wd
, ws
, wt
);
3769 t1
= newTemp(Ity_V128
);
3770 t2
= newTemp(Ity_V128
);
3771 t3
= newTemp(Ity_V128
);
3772 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3773 assign(t1
, getWReg(ws
));
3774 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3775 assign(t3
, binop(Iop_Add16x8
, mkexpr(t1
), mkexpr(t2
)));
3776 putWReg(wd
, mkexpr(t3
));
3780 case 0x02: { /* ADDVI.W */
3781 DIP("ADDVI.W w%d, w%d, %d", wd
, ws
, wt
);
3782 t1
= newTemp(Ity_V128
);
3783 t2
= newTemp(Ity_V128
);
3784 t3
= newTemp(Ity_V128
);
3786 assign(t1
, getWReg(ws
));
3787 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3788 assign(t3
, binop(Iop_Add32x4
, mkexpr(t1
), mkexpr(t2
)));
3789 putWReg(wd
, mkexpr(t3
));
3793 case 0x03: { /* ADDVI.D */
3794 DIP("ADDVI.D w%d, w%d, %d", wd
, ws
, wt
);
3795 t1
= newTemp(Ity_V128
);
3796 t2
= newTemp(Ity_V128
);
3797 t3
= newTemp(Ity_V128
);
3798 assign(t1
, getWReg(ws
));
3799 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3800 assign(t3
, binop(Iop_Add64x2
, mkexpr(t1
), mkexpr(t2
)));
3801 putWReg(wd
, mkexpr(t3
));
3809 case 0x01: { /* SUBVI */
3813 case 0x00: { /* SUBVI.B */
3814 DIP("SUBVI.B w%d, w%d, %d", wd
, ws
, wt
);
3815 t1
= newTemp(Ity_V128
);
3816 t2
= newTemp(Ity_V128
);
3817 t3
= newTemp(Ity_V128
);
3818 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3819 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3821 assign(t1
, getWReg(ws
));
3822 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3823 assign(t3
, binop(Iop_Sub8x16
, mkexpr(t1
), mkexpr(t2
)));
3824 putWReg(wd
, mkexpr(t3
));
3828 case 0x01: { /* SUBVI.H */
3829 DIP("SUBVI.H w%d, w%d, %d", wd
, ws
, wt
);
3830 t1
= newTemp(Ity_V128
);
3831 t2
= newTemp(Ity_V128
);
3832 t3
= newTemp(Ity_V128
);
3833 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3834 assign(t1
, getWReg(ws
));
3835 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3836 assign(t3
, binop(Iop_Sub16x8
, mkexpr(t1
), mkexpr(t2
)));
3837 putWReg(wd
, mkexpr(t3
));
3841 case 0x02: { /* SUBVI.W */
3842 DIP("SUBVI.W w%d, w%d, %d", wd
, ws
, wt
);
3843 t1
= newTemp(Ity_V128
);
3844 t2
= newTemp(Ity_V128
);
3845 t3
= newTemp(Ity_V128
);
3847 assign(t1
, getWReg(ws
));
3848 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3849 assign(t3
, binop(Iop_Sub32x4
, mkexpr(t1
), mkexpr(t2
)));
3850 putWReg(wd
, mkexpr(t3
));
3854 case 0x03: { /* SUBVI.D */
3855 DIP("SUBVI.D w%d, w%d, %d", wd
, ws
, wt
);
3856 t1
= newTemp(Ity_V128
);
3857 t2
= newTemp(Ity_V128
);
3858 t3
= newTemp(Ity_V128
);
3859 assign(t1
, getWReg(ws
));
3860 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3861 assign(t3
, binop(Iop_Sub64x2
, mkexpr(t1
), mkexpr(t2
)));
3862 putWReg(wd
, mkexpr(t3
));
3870 case 0x02: { /* MAXI_S */
3874 case 0x00: { /* MAXI_S.B */
3875 DIP("MAXI_S.B w%d, w%d, %d", wd
, ws
, wt
);
3876 t1
= newTemp(Ity_V128
);
3877 t2
= newTemp(Ity_V128
);
3878 t3
= newTemp(Ity_V128
);
3879 char stemp
= ((int)tmp
<< 27) >> 27;
3881 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3882 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3884 assign(t1
, getWReg(ws
));
3885 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3886 assign(t3
, binop(Iop_Max8Sx16
, mkexpr(t1
), mkexpr(t2
)));
3887 putWReg(wd
, mkexpr(t3
));
3891 case 0x01: { /* MAXI_S.H */
3892 DIP("MAXI_S.H w%d, w%d, %d", wd
, ws
, wt
);
3893 t1
= newTemp(Ity_V128
);
3894 t2
= newTemp(Ity_V128
);
3895 t3
= newTemp(Ity_V128
);
3896 short stemp
= ((int)tmp
<< 27) >> 27;
3897 tmp
= (UShort
)stemp
;
3898 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3899 assign(t1
, getWReg(ws
));
3900 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3901 assign(t3
, binop(Iop_Max16Sx8
, mkexpr(t1
), mkexpr(t2
)));
3902 putWReg(wd
, mkexpr(t3
));
3906 case 0x02: { /* MAXI_S.W */
3907 DIP("MAXI_S.W w%d, w%d, %d", wd
, ws
, wt
);
3908 t1
= newTemp(Ity_V128
);
3909 t2
= newTemp(Ity_V128
);
3910 t3
= newTemp(Ity_V128
);
3911 int stemp
= ((int)tmp
<< 27) >> 27;
3914 assign(t1
, getWReg(ws
));
3915 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3916 assign(t3
, binop(Iop_Max32Sx4
, mkexpr(t1
), mkexpr(t2
)));
3917 putWReg(wd
, mkexpr(t3
));
3921 case 0x03: { /* MAXI_S.D */
3922 DIP("MAXI_S.D w%d, w%d, %d", wd
, ws
, wt
);
3923 t1
= newTemp(Ity_V128
);
3924 t2
= newTemp(Ity_V128
);
3925 t3
= newTemp(Ity_V128
);
3926 Long stemp
= ((Long
)tmp
<< 59) >> 59;
3928 assign(t1
, getWReg(ws
));
3929 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3930 assign(t3
, binop(Iop_Max64Sx2
, mkexpr(t1
), mkexpr(t2
)));
3931 putWReg(wd
, mkexpr(t3
));
3939 case 0x03: { /* MAXI_U */
3943 case 0x00: { /* MAXI_U.B */
3944 DIP("MAXI_U.B w%d, w%d, %d", wd
, ws
, wt
);
3945 t1
= newTemp(Ity_V128
);
3946 t2
= newTemp(Ity_V128
);
3947 t3
= newTemp(Ity_V128
);
3948 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
3949 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
3951 assign(t1
, getWReg(ws
));
3952 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3953 assign(t3
, binop(Iop_Max8Ux16
, mkexpr(t1
), mkexpr(t2
)));
3954 putWReg(wd
, mkexpr(t3
));
3958 case 0x01: { /* MAXI_U.H */
3959 DIP("MAXI_U.H w%d, w%d, %d", wd
, ws
, wt
);
3960 t1
= newTemp(Ity_V128
);
3961 t2
= newTemp(Ity_V128
);
3962 t3
= newTemp(Ity_V128
);
3963 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
3964 assign(t1
, getWReg(ws
));
3965 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3966 assign(t3
, binop(Iop_Max16Ux8
, mkexpr(t1
), mkexpr(t2
)));
3967 putWReg(wd
, mkexpr(t3
));
3971 case 0x02: { /* MAXI_U.W */
3972 DIP("MAXI_U.W w%d, w%d, %d", wd
, ws
, wt
);
3973 t1
= newTemp(Ity_V128
);
3974 t2
= newTemp(Ity_V128
);
3975 t3
= newTemp(Ity_V128
);
3977 assign(t1
, getWReg(ws
));
3978 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3979 assign(t3
, binop(Iop_Max32Ux4
, mkexpr(t1
), mkexpr(t2
)));
3980 putWReg(wd
, mkexpr(t3
));
3984 case 0x03: { /* MAXI_U.D */
3985 DIP("MAXI_U.D w%d, w%d, %d", wd
, ws
, wt
);
3986 t1
= newTemp(Ity_V128
);
3987 t2
= newTemp(Ity_V128
);
3988 t3
= newTemp(Ity_V128
);
3989 assign(t1
, getWReg(ws
));
3990 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
3991 assign(t3
, binop(Iop_Max64Ux2
, mkexpr(t1
), mkexpr(t2
)));
3992 putWReg(wd
, mkexpr(t3
));
4000 case 0x04: { /* MINI_S */
4004 case 0x00: { /* MINI_S.B */
4005 DIP("MINI_S.B w%d, w%d, %d", wd
, ws
, wt
);
4006 t1
= newTemp(Ity_V128
);
4007 t2
= newTemp(Ity_V128
);
4008 t3
= newTemp(Ity_V128
);
4009 char stemp
= ((int)tmp
<< 27) >> 27;
4011 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4012 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4014 assign(t1
, getWReg(ws
));
4015 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4016 assign(t3
, binop(Iop_Min8Sx16
, mkexpr(t1
), mkexpr(t2
)));
4017 putWReg(wd
, mkexpr(t3
));
4021 case 0x01: { /* MINI_S.H */
4022 DIP("MINI_S.H w%d, w%d, %d", wd
, ws
, wt
);
4023 t1
= newTemp(Ity_V128
);
4024 t2
= newTemp(Ity_V128
);
4025 t3
= newTemp(Ity_V128
);
4026 short stemp
= ((int)tmp
<< 27) >> 27;
4027 tmp
= (UShort
)stemp
;
4028 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4029 assign(t1
, getWReg(ws
));
4030 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4031 assign(t3
, binop(Iop_Min16Sx8
, mkexpr(t1
), mkexpr(t2
)));
4032 putWReg(wd
, mkexpr(t3
));
4036 case 0x02: { /* MINI_S.W */
4037 DIP("MINI_S.W w%d, w%d, %d", wd
, ws
, wt
);
4038 t1
= newTemp(Ity_V128
);
4039 t2
= newTemp(Ity_V128
);
4040 t3
= newTemp(Ity_V128
);
4041 int stemp
= ((int)tmp
<< 27) >> 27;
4044 assign(t1
, getWReg(ws
));
4045 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4046 assign(t3
, binop(Iop_Min32Sx4
, mkexpr(t1
), mkexpr(t2
)));
4047 putWReg(wd
, mkexpr(t3
));
4051 case 0x03: { /* MINI_S.D */
4052 DIP("MINI_S.D w%d, w%d, %d", wd
, ws
, wt
);
4053 t1
= newTemp(Ity_V128
);
4054 t2
= newTemp(Ity_V128
);
4055 t3
= newTemp(Ity_V128
);
4056 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4058 assign(t1
, getWReg(ws
));
4059 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4060 assign(t3
, binop(Iop_Min64Sx2
, mkexpr(t1
), mkexpr(t2
)));
4061 putWReg(wd
, mkexpr(t3
));
4069 case 0x05: { /* MINI_U */
4073 case 0x00: { /* MINI_U.B */
4074 DIP("MINI_U.B w%d, w%d, %d", wd
, ws
, wt
);
4075 t1
= newTemp(Ity_V128
);
4076 t2
= newTemp(Ity_V128
);
4077 t3
= newTemp(Ity_V128
);
4078 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4079 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4081 assign(t1
, getWReg(ws
));
4082 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4083 assign(t3
, binop(Iop_Min8Ux16
, mkexpr(t1
), mkexpr(t2
)));
4084 putWReg(wd
, mkexpr(t3
));
4088 case 0x01: { /* MINI_U.H */
4089 DIP("MINI_U.H w%d, w%d, %d", wd
, ws
, wt
);
4090 t1
= newTemp(Ity_V128
);
4091 t2
= newTemp(Ity_V128
);
4092 t3
= newTemp(Ity_V128
);
4093 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4094 assign(t1
, getWReg(ws
));
4095 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4096 assign(t3
, binop(Iop_Min16Ux8
, mkexpr(t1
), mkexpr(t2
)));
4097 putWReg(wd
, mkexpr(t3
));
4101 case 0x02: { /* MINI_U.W */
4102 DIP("MINI_U.W w%d, w%d, %d", wd
, ws
, wt
);
4103 t1
= newTemp(Ity_V128
);
4104 t2
= newTemp(Ity_V128
);
4105 t3
= newTemp(Ity_V128
);
4107 assign(t1
, getWReg(ws
));
4108 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4109 assign(t3
, binop(Iop_Min32Ux4
, mkexpr(t1
), mkexpr(t2
)));
4110 putWReg(wd
, mkexpr(t3
));
4114 case 0x03: { /* MINI_U.D */
4115 DIP("MINI_U.D w%d, w%d, %d", wd
, ws
, wt
);
4116 t1
= newTemp(Ity_V128
);
4117 t2
= newTemp(Ity_V128
);
4118 t3
= newTemp(Ity_V128
);
4119 assign(t1
, getWReg(ws
));
4120 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4121 assign(t3
, binop(Iop_Min64Ux2
, mkexpr(t1
), mkexpr(t2
)));
4122 putWReg(wd
, mkexpr(t3
));
4138 static Int
msa_I5_07(UInt cins
, UChar wd
, UChar ws
) /* I5 (0x07) / I10 */
4144 operation
= (cins
& 0x03800000) >> 23;
4145 df
= (cins
& 0x00600000) >> 21;
4146 i5
= (cins
& 0x001F0000) >> 16;
4148 switch (operation
) {
4153 case 0x00: { /* CEQI.B */
4154 DIP("CEQI.B w%d, w%d, %d", wd
, ws
, i5
);
4155 t1
= newTemp(Ity_V128
);
4156 t2
= newTemp(Ity_V128
);
4157 t3
= newTemp(Ity_V128
);
4158 char stemp
= ((int)tmp
<< 27) >> 27;
4160 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4161 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4163 assign(t1
, getWReg(ws
));
4164 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4165 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
4166 putWReg(wd
, mkexpr(t3
));
4170 case 0x01: { /* CEQI.H */
4171 DIP("CEQI.H w%d, w%d, %d", wd
, ws
, i5
);
4172 t1
= newTemp(Ity_V128
);
4173 t2
= newTemp(Ity_V128
);
4174 t3
= newTemp(Ity_V128
);
4175 short stemp
= ((int)tmp
<< 27) >> 27;
4176 tmp
= (UShort
)stemp
;
4177 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4178 assign(t1
, getWReg(ws
));
4179 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4180 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
4181 putWReg(wd
, mkexpr(t3
));
4185 case 0x02: { /* CEQI.W */
4186 DIP("CEQI.W w%d, w%d, %d", wd
, ws
, i5
);
4187 t1
= newTemp(Ity_V128
);
4188 t2
= newTemp(Ity_V128
);
4189 t3
= newTemp(Ity_V128
);
4190 int stemp
= ((int)tmp
<< 27) >> 27;
4193 assign(t1
, getWReg(ws
));
4194 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4195 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
4196 putWReg(wd
, mkexpr(t3
));
4200 case 0x03: { /* CEQI.D */
4201 DIP("CEQI.D w%d, w%d, %d", wd
, ws
, i5
);
4202 t1
= newTemp(Ity_V128
);
4203 t2
= newTemp(Ity_V128
);
4204 t3
= newTemp(Ity_V128
);
4205 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4207 assign(t1
, getWReg(ws
));
4208 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4209 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
4210 putWReg(wd
, mkexpr(t3
));
4218 case 0x02: { /* CLTI_S.df */
4222 case 0x00: { /* CLTI_S.B */
4223 DIP("CLTI_S.B w%d, w%d, %d", wd
, ws
, i5
);
4224 t1
= newTemp(Ity_V128
);
4225 t2
= newTemp(Ity_V128
);
4226 t3
= newTemp(Ity_V128
);
4227 char stemp
= ((int)tmp
<< 27) >> 27;
4229 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4230 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4232 assign(t1
, getWReg(ws
));
4233 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4234 assign(t3
, binop(Iop_CmpGT8Sx16
, mkexpr(t2
), mkexpr(t1
)));
4235 putWReg(wd
, mkexpr(t3
));
4239 case 0x01: { /* CLTI_S.H */
4240 DIP("CLTI_S.H w%d, w%d, %d", wd
, ws
, i5
);
4241 t1
= newTemp(Ity_V128
);
4242 t2
= newTemp(Ity_V128
);
4243 t3
= newTemp(Ity_V128
);
4244 short stemp
= ((int)tmp
<< 27) >> 27;
4245 tmp
= (UShort
)stemp
;
4246 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4247 assign(t1
, getWReg(ws
));
4248 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4249 assign(t3
, binop(Iop_CmpGT16Sx8
, mkexpr(t2
), mkexpr(t1
)));
4250 putWReg(wd
, mkexpr(t3
));
4254 case 0x02: { /* CLTI_S.W */
4255 DIP("CLTI_S.W w%d, w%d, %d", wd
, ws
, i5
);
4256 t1
= newTemp(Ity_V128
);
4257 t2
= newTemp(Ity_V128
);
4258 t3
= newTemp(Ity_V128
);
4259 int stemp
= ((int)tmp
<< 27) >> 27;
4262 assign(t1
, getWReg(ws
));
4263 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4264 assign(t3
, binop(Iop_CmpGT32Sx4
, mkexpr(t2
), mkexpr(t1
)));
4265 putWReg(wd
, mkexpr(t3
));
4269 case 0x03: { /* CLTI_S.D */
4270 DIP("CLTI_S.D w%d, w%d, %d", wd
, ws
, i5
);
4271 t1
= newTemp(Ity_V128
);
4272 t2
= newTemp(Ity_V128
);
4273 t3
= newTemp(Ity_V128
);
4274 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4276 assign(t1
, getWReg(ws
));
4277 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4278 assign(t3
, binop(Iop_CmpGT64Sx2
, mkexpr(t2
), mkexpr(t1
)));
4279 putWReg(wd
, mkexpr(t3
));
4290 case 0x03: { /* CLTI_U.df */
4294 case 0x00: { /* CLTI_U.B */
4295 DIP("CLTI_U.B w%d, w%d, %d", wd
, ws
, i5
);
4296 t1
= newTemp(Ity_V128
);
4297 t2
= newTemp(Ity_V128
);
4298 t3
= newTemp(Ity_V128
);
4299 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4300 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4302 assign(t1
, getWReg(ws
));
4303 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4304 assign(t3
, binop(Iop_CmpGT8Ux16
, mkexpr(t2
), mkexpr(t1
)));
4305 putWReg(wd
, mkexpr(t3
));
4309 case 0x01: { /* CLTI_U.H */
4310 DIP("CLTI_U.H w%d, w%d, %d", wd
, ws
, i5
);
4311 t1
= newTemp(Ity_V128
);
4312 t2
= newTemp(Ity_V128
);
4313 t3
= newTemp(Ity_V128
);
4314 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4315 assign(t1
, getWReg(ws
));
4316 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4317 assign(t3
, binop(Iop_CmpGT16Ux8
, mkexpr(t2
), mkexpr(t1
)));
4318 putWReg(wd
, mkexpr(t3
));
4322 case 0x02: { /* CLTI_U.W */
4323 DIP("CLTI_U.W w%d, w%d, %d", wd
, ws
, i5
);
4324 t1
= newTemp(Ity_V128
);
4325 t2
= newTemp(Ity_V128
);
4326 t3
= newTemp(Ity_V128
);
4328 assign(t1
, getWReg(ws
));
4329 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4330 assign(t3
, binop(Iop_CmpGT32Ux4
, mkexpr(t2
), mkexpr(t1
)));
4331 putWReg(wd
, mkexpr(t3
));
4335 case 0x03: { /* CLTI_U.D */
4336 DIP("CLTI_U.D w%d, w%d, %d", wd
, ws
, i5
);
4337 t1
= newTemp(Ity_V128
);
4338 t2
= newTemp(Ity_V128
);
4339 t3
= newTemp(Ity_V128
);
4340 assign(t1
, getWReg(ws
));
4341 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4342 assign(t3
, binop(Iop_CmpGT64Ux2
, mkexpr(t2
), mkexpr(t1
)));
4343 putWReg(wd
, mkexpr(t3
));
4351 case 0x04: { /* CLEI_S.df */
4355 case 0x00: { /* CLEI_S.B */
4356 DIP("CLEI_S.B w%d, w%d, %d", wd
, ws
, i5
);
4357 t1
= newTemp(Ity_V128
);
4358 t2
= newTemp(Ity_V128
);
4359 t3
= newTemp(Ity_V128
);
4360 char stemp
= ((int)tmp
<< 27) >> 27;
4362 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4363 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4365 assign(t1
, getWReg(ws
));
4366 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4367 assign(t3
, binop(Iop_OrV128
, binop(Iop_CmpGT8Sx16
,
4368 mkexpr(t2
), mkexpr(t1
)),
4369 binop(Iop_CmpEQ8x16
,
4370 mkexpr(t1
), mkexpr(t2
))));
4371 putWReg(wd
, mkexpr(t3
));
4375 case 0x01: { /* CLEI_S.H */
4376 DIP("CLEI_S.H w%d, w%d, %d", wd
, ws
, i5
);
4377 t1
= newTemp(Ity_V128
);
4378 t2
= newTemp(Ity_V128
);
4379 t3
= newTemp(Ity_V128
);
4380 short stemp
= ((int)tmp
<< 27) >> 27;
4381 tmp
= (UShort
)stemp
;
4382 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4383 assign(t1
, getWReg(ws
));
4384 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4385 assign(t3
, binop(Iop_OrV128
, binop(Iop_CmpGT16Sx8
,
4386 mkexpr(t2
), mkexpr(t1
)),
4387 binop(Iop_CmpEQ16x8
,
4388 mkexpr(t1
), mkexpr(t2
))));
4389 putWReg(wd
, mkexpr(t3
));
4393 case 0x02: { /* CLEI_S.W */
4394 DIP("CLEI_S.W w%d, w%d, %d", wd
, ws
, i5
);
4395 t1
= newTemp(Ity_V128
);
4396 t2
= newTemp(Ity_V128
);
4397 t3
= newTemp(Ity_V128
);
4398 int stemp
= ((int)tmp
<< 27) >> 27;
4401 assign(t1
, getWReg(ws
));
4402 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4403 assign(t3
, binop(Iop_OrV128
,
4404 binop(Iop_CmpGT32Sx4
,
4405 mkexpr(t2
), mkexpr(t1
)),
4406 binop(Iop_CmpEQ32x4
,
4407 mkexpr(t1
), mkexpr(t2
))));
4408 putWReg(wd
, mkexpr(t3
));
4412 case 0x03: { /* CLEI_S.D */
4413 DIP("CLEI_S.D w%d, w%d, %d", wd
, ws
, i5
);
4414 t1
= newTemp(Ity_V128
);
4415 t2
= newTemp(Ity_V128
);
4416 t3
= newTemp(Ity_V128
);
4417 Long stemp
= ((Long
)tmp
<< 59) >> 59;
4419 assign(t1
, getWReg(ws
));
4420 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4421 assign(t3
, binop(Iop_OrV128
,
4422 binop(Iop_CmpGT64Sx2
,
4423 mkexpr(t2
), mkexpr(t1
)),
4424 binop(Iop_CmpEQ64x2
,
4425 mkexpr(t1
), mkexpr(t2
))));
4426 putWReg(wd
, mkexpr(t3
));
4437 case 0x05: { /* CLEI_U.df */
4441 case 0x00: { /* CLEI_U.B */
4442 DIP("CLEI_U.B w%d, w%d, %d", wd
, ws
, i5
);
4443 t1
= newTemp(Ity_V128
);
4444 t2
= newTemp(Ity_V128
);
4445 t3
= newTemp(Ity_V128
);
4446 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4447 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4449 assign(t1
, getWReg(ws
));
4450 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4451 assign(t3
, binop(Iop_OrV128
,
4452 binop(Iop_CmpGT8Ux16
,
4453 mkexpr(t2
), mkexpr(t1
)),
4454 binop(Iop_CmpEQ8x16
,
4455 mkexpr(t1
), mkexpr(t2
))));
4456 putWReg(wd
, mkexpr(t3
));
4460 case 0x01: { /* CLEI_U.H */
4461 DIP("CLEI_U.H w%d, w%d, %d", wd
, ws
, i5
);
4462 t1
= newTemp(Ity_V128
);
4463 t2
= newTemp(Ity_V128
);
4464 t3
= newTemp(Ity_V128
);
4465 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4466 assign(t1
, getWReg(ws
));
4467 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4468 assign(t3
, binop(Iop_OrV128
,
4469 binop(Iop_CmpGT16Ux8
,
4470 mkexpr(t2
), mkexpr(t1
)),
4471 binop(Iop_CmpEQ16x8
,
4472 mkexpr(t1
), mkexpr(t2
))));
4473 putWReg(wd
, mkexpr(t3
));
4477 case 0x02: { /* CLEI_U.W */
4478 DIP("CLEI_U.W w%d, w%d, %d", wd
, ws
, i5
);
4479 t1
= newTemp(Ity_V128
);
4480 t2
= newTemp(Ity_V128
);
4481 t3
= newTemp(Ity_V128
);
4483 assign(t1
, getWReg(ws
));
4484 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4485 assign(t3
, binop(Iop_OrV128
,
4486 binop(Iop_CmpGT32Ux4
,
4487 mkexpr(t2
), mkexpr(t1
)),
4488 binop(Iop_CmpEQ32x4
,
4489 mkexpr(t1
), mkexpr(t2
))));
4490 putWReg(wd
, mkexpr(t3
));
4494 case 0x03: { /* CLEI_U.D */
4495 DIP("CLEI_U.D w%d, w%d, %d", wd
, ws
, i5
);
4496 t1
= newTemp(Ity_V128
);
4497 t2
= newTemp(Ity_V128
);
4498 t3
= newTemp(Ity_V128
);
4499 assign(t1
, getWReg(ws
));
4500 assign(t2
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4501 assign(t3
, binop(Iop_OrV128
,
4502 binop(Iop_CmpGT64Ux2
,
4503 mkexpr(t2
), mkexpr(t1
)),
4504 binop(Iop_CmpEQ64x2
,
4505 mkexpr(t1
), mkexpr(t2
))));
4506 putWReg(wd
, mkexpr(t3
));
4514 case 0x06: { /* LDI.df */
4517 s10
= (cins
& 0x001FF800) >> 11;
4520 case 0x00: /* LDI.B */
4521 DIP("LDI.B w%d, %d", wd
, s10
);
4523 tmp
= tmp
| (tmp
<< 8) | (tmp
<< 16) | (tmp
<< 24)
4524 | (tmp
<< 32) | (tmp
<< 40) | (tmp
<< 48) |
4528 case 0x01: /* LDI.H */
4529 DIP("LDI.H w%d, %d", wd
, s10
);
4530 tmp
= extend_s_10to16(s10
);
4531 tmp
= tmp
| (tmp
<< 16) | (tmp
<< 32) | (tmp
<< 48);
4534 case 0x02: /* LDI.W */
4535 DIP("LDI.W w%d, %d", wd
, s10
);
4536 tmp
= extend_s_10to32(s10
);
4537 tmp
= tmp
| (tmp
<< 32);
4540 case 0x03: /* LDI.D */
4541 DIP("LDI.D w%d, %d", wd
, s10
);
4542 tmp
= extend_s_10to64(s10
);
4549 putWReg(wd
, binop(Iop_64HLtoV128
, mkU64(tmp
), mkU64(tmp
)));
4560 static Int
msa_BIT_09(UInt cins
, UChar wd
, UChar ws
) /* BIT (0x09) */
4566 operation
= (cins
& 0x03800000) >> 23;
4567 df
= (cins
& 0x007F0000) >> 16;
4569 if ((df
& 0x70) == 0x70) { // 111mmmm; b
4572 } else if ((df
& 0x60) == 0x60) { // 110mmmm; h
4575 } else if ((df
& 0x40) == 0x40) { // 10mmmmm; w
4578 } else if ((df
& 0x00) == 0x00) { // 0mmmmmm; d
4583 switch (operation
) {
4584 case 0x00: { /* SLLI.df */
4586 case 0x00: { /* SLLI.B */
4587 DIP("SLLI.B w%d, w%d, %d", wd
, ws
, m
);
4588 putWReg(wd
, binop(Iop_ShlN8x16
, getWReg(ws
), mkU8(m
)));
4592 case 0x01: { /* SLLI.H */
4593 DIP("SLLI.H w%d, w%d, %d", wd
, ws
, m
);
4594 putWReg(wd
, binop(Iop_ShlN16x8
, getWReg(ws
), mkU8(m
)));
4598 case 0x02: { /* SLLI.W */
4599 DIP("SLLI.W w%d, w%d, %d", wd
, ws
, m
);
4600 putWReg(wd
, binop(Iop_ShlN32x4
, getWReg(ws
), mkU8(m
)));
4604 case 0x03: { /* SLLI.D */
4605 DIP("SLLI.D w%d, w%d, %d", wd
, ws
, m
);
4606 putWReg(wd
, binop(Iop_ShlN64x2
, getWReg(ws
), mkU8(m
)));
4614 case 0x01: { /* SRAI.df */
4616 case 0x00: { /* SRAI.B */
4617 DIP("SRAI.B w%d, w%d, %d", wd
, ws
, m
);
4618 putWReg(wd
, binop(Iop_SarN8x16
, getWReg(ws
), mkU8(m
)));
4622 case 0x01: { /* SRAI.H */
4623 DIP("SRAI.H w%d, w%d, %d", wd
, ws
, m
);
4624 putWReg(wd
, binop(Iop_SarN16x8
, getWReg(ws
), mkU8(m
)));
4628 case 0x02: { /* SRAI.W */
4629 DIP("SRAI.W w%d, w%d, %d", wd
, ws
, m
);
4630 putWReg(wd
, binop(Iop_SarN32x4
, getWReg(ws
), mkU8(m
)));
4634 case 0x03: { /* SRAI.D */
4635 DIP("SRAI.D w%d, w%d, %d", wd
, ws
, m
);
4636 putWReg(wd
, binop(Iop_SarN64x2
, getWReg(ws
), mkU8(m
)));
4644 case 0x02: { /* SRLI.df */
4646 case 0x00: { /* SRLI.B */
4647 DIP("SRLI.B w%d, w%d, %d", wd
, ws
, m
);
4648 putWReg(wd
, binop(Iop_ShrN8x16
, getWReg(ws
), mkU8(m
)));
4652 case 0x01: { /* SRLI.H */
4653 DIP("SRLI.H w%d, w%d, %d", wd
, ws
, m
);
4654 putWReg(wd
, binop(Iop_ShrN16x8
, getWReg(ws
), mkU8(m
)));
4658 case 0x02: { /* SRLI.W */
4659 DIP("SRLI.W w%d, w%d, %d", wd
, ws
, m
);
4660 putWReg(wd
, binop(Iop_ShrN32x4
, getWReg(ws
), mkU8(m
)));
4664 case 0x03: { /* SRLI.D */
4665 DIP("SRLI.D w%d, w%d, %d", wd
, ws
, m
);
4666 putWReg(wd
, binop(Iop_ShrN64x2
, getWReg(ws
), mkU8(m
)));
4674 case 0x03: { /* BCLRI.df */
4675 t1
= newTemp(Ity_V128
);
4676 t2
= newTemp(Ity_V128
);
4677 t3
= newTemp(Ity_V128
);
4679 assign(t1
, getWReg(ws
));
4682 case 0x00: { /* BCLRI.B */
4683 DIP("BCLRI.B w%d, w%d, %d", wd
, ws
, m
);
4684 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4685 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4687 assign(t2
, binop(Iop_ShlN8x16
,
4688 binop(Iop_64HLtoV128
,
4689 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4693 case 0x01: { /* BCLRI.H */
4694 DIP("BCLRI.H w%d, w%d, %d", wd
, ws
, m
);
4695 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4696 assign(t2
, binop(Iop_ShlN16x8
,
4697 binop(Iop_64HLtoV128
,
4698 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4702 case 0x02: { /* BCLRI.W */
4703 DIP("BCLRI.W w%d, w%d, %d", wd
, ws
, m
);
4705 assign(t2
, binop(Iop_ShlN32x4
,
4706 binop(Iop_64HLtoV128
,
4707 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4711 case 0x03: { /* BCLRI.D */
4712 DIP("BCLRI.D w%d, w%d, %d", wd
, ws
, m
);
4713 assign(t2
, binop(Iop_ShlN64x2
,
4714 binop(Iop_64HLtoV128
,
4715 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4720 assign(t3
, binop(Iop_AndV128
,
4721 mkexpr(t1
), unop(Iop_NotV128
, mkexpr(t2
))));
4722 putWReg(wd
, mkexpr(t3
));
4726 case 0x04: { /* BSETI */
4727 t1
= newTemp(Ity_V128
);
4728 t2
= newTemp(Ity_V128
);
4729 t3
= newTemp(Ity_V128
);
4731 assign(t1
, getWReg(ws
));
4734 case 0x00: { /* BSETI.B */
4735 DIP("BSETI.B w%d, w%d, %d", wd
, ws
, m
);
4736 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4737 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4739 assign(t2
, binop(Iop_ShlN8x16
,
4740 binop(Iop_64HLtoV128
,
4741 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4745 case 0x01: { /* BSETI.H */
4746 DIP("BSETI.H w%d, w%d, %d", wd
, ws
, m
);
4747 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4748 assign(t2
, binop(Iop_ShlN16x8
,
4749 binop(Iop_64HLtoV128
,
4750 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4754 case 0x02: { /* BSETI.W */
4755 DIP("BSETI.W w%d, w%d, %d", wd
, ws
, m
);
4757 assign(t2
, binop(Iop_ShlN32x4
,
4758 binop(Iop_64HLtoV128
,
4759 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4763 case 0x03: { /* BSETI.D */
4764 DIP("BSETI.D w%d, w%d, %d", wd
, ws
, m
);
4765 assign(t2
, binop(Iop_ShlN64x2
,
4766 binop(Iop_64HLtoV128
,
4767 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4772 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
4773 putWReg(wd
, mkexpr(t3
));
4777 case 0x05: { /* BNEGI.df */
4778 t1
= newTemp(Ity_V128
);
4779 t2
= newTemp(Ity_V128
);
4780 t3
= newTemp(Ity_V128
);
4782 assign(t1
, getWReg(ws
));
4785 case 0x00: { /* BNEGI.B */
4786 DIP("BNEGI.B w%d, w%d, %d", wd
, ws
, m
);
4787 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
4788 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
4790 assign(t2
, binop(Iop_ShlN8x16
,
4791 binop(Iop_64HLtoV128
,
4792 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4796 case 0x01: { /* BNEGI.H */
4797 DIP("BNEGI.H w%d, w%d, %d", wd
, ws
, m
);
4798 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
4799 assign(t2
, binop(Iop_ShlN16x8
,
4800 binop(Iop_64HLtoV128
,
4801 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4805 case 0x02: { /* BNEGI.W */
4806 DIP("BNEGI.W w%d, w%d, %d", wd
, ws
, m
);
4808 assign(t2
, binop(Iop_ShlN32x4
,
4809 binop(Iop_64HLtoV128
,
4810 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4814 case 0x03: { /* BNEGI.D */
4815 DIP("BNEGI.D w%d, w%d, %d", wd
, ws
, m
);
4816 assign(t2
, binop(Iop_ShlN64x2
,
4817 binop(Iop_64HLtoV128
,
4818 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4823 assign(t3
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
4824 putWReg(wd
, mkexpr(t3
));
4828 case 0x06: { /* BINSLI.df */
4830 case 0x00: { /* BINSLI.B */
4831 DIP("BINSLI.B w%d, w%d, w%d", wd
, ws
, m
);
4832 t1
= newTemp(Ity_V128
);
4833 t2
= newTemp(Ity_V128
);
4834 t3
= newTemp(Ity_V128
);
4835 ULong tmp
= 0x8080808080808080ULL
;
4836 assign(t1
, binop(Iop_SarN8x16
,
4837 binop(Iop_64HLtoV128
,
4838 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4841 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4844 mkexpr(t1
), getWReg(ws
)));
4847 mkexpr(t2
), mkexpr(t3
)));
4851 case 0x01: { /* BINSLI.H */
4852 DIP("BINSLI.H w%d, w%d, w%d", wd
, ws
, m
);
4853 t1
= newTemp(Ity_V128
);
4854 t2
= newTemp(Ity_V128
);
4855 t3
= newTemp(Ity_V128
);
4856 ULong tmp
= 0x8000800080008000ULL
;
4859 binop(Iop_64HLtoV128
,
4860 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4863 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4866 mkexpr(t1
), getWReg(ws
)));
4869 mkexpr(t2
), mkexpr(t3
)));
4873 case 0x02: { /* BINSLI.W */
4874 DIP("BINSLI.W w%d, w%d, w%d", wd
, ws
, m
);
4875 t1
= newTemp(Ity_V128
);
4876 t2
= newTemp(Ity_V128
);
4877 t3
= newTemp(Ity_V128
);
4878 ULong tmp
= 0x8000000080000000ULL
;
4881 binop(Iop_64HLtoV128
,
4882 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4885 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4888 mkexpr(t1
), getWReg(ws
)));
4891 mkexpr(t2
), mkexpr(t3
)));
4895 case 0x03: { /* BINSLI.D */
4896 DIP("BINSLI.D w%d, w%d, w%d", wd
, ws
, m
);
4897 t1
= newTemp(Ity_V128
);
4898 t2
= newTemp(Ity_V128
);
4899 t3
= newTemp(Ity_V128
);
4900 ULong tmp
= 0x8000000000000000ULL
;
4903 binop(Iop_64HLtoV128
,
4904 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4907 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(wd
)));
4910 mkexpr(t1
), getWReg(ws
)));
4913 mkexpr(t2
), mkexpr(t3
)));
4926 case 0x00: { /* BINSRI.B */
4927 DIP("BINSRI.B w%d, w%d, w%d", wd
, ws
, m
);
4928 t1
= newTemp(Ity_V128
);
4929 t2
= newTemp(Ity_V128
);
4930 t3
= newTemp(Ity_V128
);
4931 ULong tmp
= 0xFEFEFEFEFEFEFEFEULL
;
4934 binop(Iop_64HLtoV128
,
4935 mkU64(tmp
), mkU64(tmp
)), mkU8(m
)));
4938 unop(Iop_NotV128
, mkexpr(t1
)), getWReg(ws
)));
4941 mkexpr(t1
), getWReg(wd
)));
4944 mkexpr(t2
), mkexpr(t3
)));
4948 case 0x01: { /* BINSRI.H */
4949 DIP("BINSRI.H w%d, w%d, w%d", wd
, ws
, m
);
4950 t1
= newTemp(Ity_V128
);
4951 t2
= newTemp(Ity_V128
);
4952 t3
= newTemp(Ity_V128
);
4953 ULong tmp
= 0xFFFEFFFEFFFEFFFEULL
;
4956 binop(Iop_64HLtoV128
,
4957 mkU64(tmp
), mkU64(tmp
)),
4961 unop(Iop_NotV128
, mkexpr(t1
)),
4965 mkexpr(t1
), getWReg(wd
)));
4968 mkexpr(t2
), mkexpr(t3
)));
4972 case 0x02: { /* BINSRI.W */
4973 DIP("BINSRI.W w%d, w%d, w%d", wd
, ws
, m
);
4974 t1
= newTemp(Ity_V128
);
4975 t2
= newTemp(Ity_V128
);
4976 t3
= newTemp(Ity_V128
);
4977 ULong tmp
= 0xFFFFFFFEFFFFFFFEULL
;
4980 binop(Iop_64HLtoV128
,
4981 mkU64(tmp
), mkU64(tmp
)),
4985 unop(Iop_NotV128
, mkexpr(t1
)),
4989 mkexpr(t1
), getWReg(wd
)));
4992 mkexpr(t2
), mkexpr(t3
)));
4996 case 0x03: { /* BINSRI.D */
4997 DIP("BINSRI.D w%d, w%d, w%d", wd
, ws
, m
);
4998 t1
= newTemp(Ity_V128
);
4999 t2
= newTemp(Ity_V128
);
5000 t3
= newTemp(Ity_V128
);
5004 binop(Iop_64HLtoV128
,
5005 mkU64(tmp
), mkU64(tmp
)),
5009 unop(Iop_NotV128
, mkexpr(t1
)),
5013 mkexpr(t1
), getWReg(wd
)));
5016 mkexpr(t2
), mkexpr(t3
)));
5034 static Int
msa_BIT_0A(UInt cins
, UChar wd
, UChar ws
) /* BIT (0x0A) */
5040 operation
= (cins
& 0x03800000) >> 23;
5041 df
= (cins
& 0x007F0000) >> 16;
5043 if ((df
& 0x70) == 0x70) { // 111mmmm; b
5046 } else if ((df
& 0x60) == 0x60) { // 110mmmm; h
5049 } else if ((df
& 0x40) == 0x40) { // 10mmmmm; w
5052 } else if ((df
& 0x00) == 0x00) { // 0mmmmmm; d
5057 switch (operation
) {
5058 case 0x00: { /* SAT_S.df */
5060 case 0x00: { /* SAT_S.B */
5061 DIP("SAT_S.B w%d, w%d, %d", wd
, ws
, m
);
5062 t1
= newTemp(Ity_V128
);
5063 assign(t1
, binop(Iop_SarN8x16
, getWReg(ws
), mkU8(7)));
5066 putWReg(wd
, mkexpr(t1
));
5068 t2
= newTemp(Ity_V128
);
5070 binop(Iop_SarN8x16
, getWReg(ws
), mkU8(m
)));
5075 binop(Iop_CmpEQ8x16
,
5080 binop(Iop_CmpGT8Sx16
,
5085 binop(Iop_CmpGT8Sx16
,
5094 case 0x01: { /* SAT_S.H */
5095 DIP("SAT_S.H w%d, w%d, %d", wd
, ws
, m
);
5096 t1
= newTemp(Ity_V128
);
5097 assign(t1
, binop(Iop_SarN16x8
, getWReg(ws
), mkU8(15)));
5100 putWReg(wd
, mkexpr(t1
));
5102 t2
= newTemp(Ity_V128
);
5111 binop(Iop_CmpEQ16x8
,
5116 binop(Iop_CmpGT16Sx8
,
5121 binop(Iop_CmpGT16Sx8
,
5130 case 0x02: { /* SAT_S.W */
5131 DIP("SAT_S.W w%d, w%d, %d", wd
, ws
, m
);
5132 t1
= newTemp(Ity_V128
);
5133 assign(t1
, binop(Iop_SarN32x4
, getWReg(ws
), mkU8(31)));
5136 putWReg(wd
, mkexpr(t1
));
5138 t2
= newTemp(Ity_V128
);
5147 binop(Iop_CmpEQ32x4
,
5152 binop(Iop_CmpGT32Sx4
,
5157 binop(Iop_CmpGT32Sx4
,
5166 case 0x03: { /* SAT_S.D */
5167 DIP("SAT_S.D w%d, w%d, %d", wd
, ws
, m
);
5168 t1
= newTemp(Ity_V128
);
5169 assign(t1
, binop(Iop_SarN64x2
, getWReg(ws
), mkU8(63)));
5172 putWReg(wd
, mkexpr(t1
));
5174 t2
= newTemp(Ity_V128
);
5183 binop(Iop_CmpEQ64x2
,
5188 binop(Iop_CmpGT64Sx2
,
5193 binop(Iop_CmpGT64Sx2
,
5206 case 0x01: { /* SAT_U.df */
5208 case 0x00: { /* SAT_U.B */
5209 DIP("SAT_U.B w%d, w%d, %d", wd
, ws
, m
);
5212 putWReg(wd
, getWReg(ws
));
5214 t1
= newTemp(Ity_V128
);
5216 binop(Iop_CmpEQ8x16
,
5220 binop(Iop_64HLtoV128
,
5221 mkU64(0), mkU64(0))));
5236 case 0x01: { /* SAT_U.H */
5237 DIP("SAT_U.H w%d, w%d, %d", wd
, ws
, m
);
5240 putWReg(wd
, getWReg(ws
));
5242 t1
= newTemp(Ity_V128
);
5244 binop(Iop_CmpEQ16x8
,
5248 binop(Iop_64HLtoV128
,
5249 mkU64(0), mkU64(0))));
5264 case 0x02: { /* SAT_U.W */
5265 DIP("SAT_U.W w%d, w%d, %d", wd
, ws
, m
);
5268 putWReg(wd
, getWReg(ws
));
5270 t1
= newTemp(Ity_V128
);
5272 binop(Iop_CmpEQ32x4
,
5276 binop(Iop_64HLtoV128
,
5277 mkU64(0), mkU64(0))));
5292 case 0x03: { /* SAT_U.D */
5293 DIP("SAT_U.D w%d, w%d, %d", wd
, ws
, m
);
5296 putWReg(wd
, getWReg(ws
));
5298 t1
= newTemp(Ity_V128
);
5300 binop(Iop_CmpEQ64x2
,
5304 binop(Iop_64HLtoV128
,
5305 mkU64(0), mkU64(0))));
5324 case 0x02: { /* SRARI.df */
5326 case 0x00: { /* SRARI.B */
5327 DIP("SRARI.B w%d, w%d, %d", wd
, ws
, m
);
5328 t1
= newTemp(Ity_V128
);
5329 t2
= newTemp(Ity_V128
);
5341 if (m
) putWReg(wd
, binop(Iop_Add8x16
,
5344 else putWReg(wd
, mkexpr(t1
));
5349 case 0x01: { /* SRARI.H */
5350 DIP("SRARI.H w%d, w%d, %d", wd
, ws
, m
);
5351 t1
= newTemp(Ity_V128
);
5352 t2
= newTemp(Ity_V128
);
5367 mkexpr(t1
), mkexpr(t2
)));
5368 else putWReg(wd
, mkexpr(t1
));
5373 case 0x02: { /* SRARI.W */
5374 DIP("SRARI.W w%d, w%d, %d", wd
, ws
, m
);
5375 t1
= newTemp(Ity_V128
);
5376 t2
= newTemp(Ity_V128
);
5391 mkexpr(t1
), mkexpr(t2
)));
5392 else putWReg(wd
, mkexpr(t1
));
5397 case 0x03: { /* SRARI.D */
5398 DIP("SRARI.D w%d, w%d, %d", wd
, ws
, m
);
5399 t1
= newTemp(Ity_V128
);
5400 t2
= newTemp(Ity_V128
);
5415 mkexpr(t1
), mkexpr(t2
)));
5416 else putWReg(wd
, mkexpr(t1
));
5425 case 0x03: { /* SRLRI.df */
5427 case 0x00: { /* SRLRI.B */
5428 DIP("SRLRI.B w%d, w%d, %d", wd
, ws
, m
);
5429 t1
= newTemp(Ity_V128
);
5430 t2
= newTemp(Ity_V128
);
5445 mkexpr(t1
), mkexpr(t2
)));
5446 else putWReg(wd
, mkexpr(t1
));
5451 case 0x01: { /* SRLRI.H */
5452 DIP("SRLRI.H w%d, w%d, %d", wd
, ws
, m
);
5453 t1
= newTemp(Ity_V128
);
5454 t2
= newTemp(Ity_V128
);
5469 mkexpr(t1
), mkexpr(t2
)));
5470 else putWReg(wd
, mkexpr(t1
));
5475 case 0x02: { /* SRLRI.W */
5476 DIP("SRLRI.W w%d, w%d, %d", wd
, ws
, m
);
5477 t1
= newTemp(Ity_V128
);
5478 t2
= newTemp(Ity_V128
);
5493 mkexpr(t1
), mkexpr(t2
)));
5494 else putWReg(wd
, mkexpr(t1
));
5499 case 0x03: { /* SRLRI.D */
5500 DIP("SRLRI.D w%d, w%d, %d", wd
, ws
, m
);
5501 t1
= newTemp(Ity_V128
);
5502 t2
= newTemp(Ity_V128
);
5517 mkexpr(t1
), mkexpr(t2
)));
5518 else putWReg(wd
, mkexpr(t1
));
5534 static Int
msa_3R_0D(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x0D) */
5540 operation
= (cins
& 0x03800000) >> 23;
5541 df
= (cins
& 0x00600000) >> 21;
5542 wt
= (cins
& 0x001F0000) >> 16;
5544 switch (operation
) {
5545 case 0x00: { /* SLL.df */
5547 case 0x00: { /* SLL.B */
5548 DIP("SLL.B w%d, w%d, w%d", wd
, ws
, wt
);
5549 t1
= newTemp(Ity_V128
);
5550 t2
= newTemp(Ity_V128
);
5551 t3
= newTemp(Ity_V128
);
5552 assign(t1
, getWReg(ws
));
5553 assign(t2
, getWReg(wt
));
5554 assign(t3
, binop(Iop_Shl8x16
, mkexpr(t1
), mkexpr(t2
)));
5555 putWReg(wd
, mkexpr(t3
));
5559 case 0x01: { /* SLL.H */
5560 DIP("SLL.H w%d, w%d, w%d", wd
, ws
, wt
);
5561 t1
= newTemp(Ity_V128
);
5562 t2
= newTemp(Ity_V128
);
5563 t3
= newTemp(Ity_V128
);
5564 assign(t1
, getWReg(ws
));
5565 assign(t2
, getWReg(wt
));
5566 assign(t3
, binop(Iop_Shl16x8
, mkexpr(t1
), mkexpr(t2
)));
5567 putWReg(wd
, mkexpr(t3
));
5571 case 0x02: { /* SLL.W */
5572 DIP("SLL.W w%d, w%d, w%d", wd
, ws
, wt
);
5573 t1
= newTemp(Ity_V128
);
5574 t2
= newTemp(Ity_V128
);
5575 t3
= newTemp(Ity_V128
);
5576 assign(t1
, getWReg(ws
));
5577 assign(t2
, getWReg(wt
));
5578 assign(t3
, binop(Iop_Shl32x4
, mkexpr(t1
), mkexpr(t2
)));
5579 putWReg(wd
, mkexpr(t3
));
5583 case 0x03: { /* SLL.D */
5584 DIP("SLL.D w%d, w%d, w%d", wd
, ws
, wt
);
5585 t1
= newTemp(Ity_V128
);
5586 t2
= newTemp(Ity_V128
);
5587 t3
= newTemp(Ity_V128
);
5588 assign(t1
, getWReg(ws
));
5589 assign(t2
, getWReg(wt
));
5590 assign(t3
, binop(Iop_Shl64x2
, mkexpr(t1
), mkexpr(t2
)));
5591 putWReg(wd
, mkexpr(t3
));
5602 case 0x01: { /* SRA.df */
5604 case 0x00: { /* SRA.B */
5605 DIP("SRA.B w%d, w%d, w%d", wd
, ws
, wt
);
5606 t1
= newTemp(Ity_V128
);
5607 t2
= newTemp(Ity_V128
);
5608 t3
= newTemp(Ity_V128
);
5609 assign(t1
, getWReg(ws
));
5610 assign(t2
, getWReg(wt
));
5611 assign(t3
, binop(Iop_Sar8x16
, mkexpr(t1
), mkexpr(t2
)));
5612 putWReg(wd
, mkexpr(t3
));
5616 case 0x01: { /* SRA.H */
5617 DIP("SRA.H w%d, w%d, w%d", wd
, ws
, wt
);
5618 t1
= newTemp(Ity_V128
);
5619 t2
= newTemp(Ity_V128
);
5620 t3
= newTemp(Ity_V128
);
5621 assign(t1
, getWReg(ws
));
5622 assign(t2
, getWReg(wt
));
5623 assign(t3
, binop(Iop_Sar16x8
, mkexpr(t1
), mkexpr(t2
)));
5624 putWReg(wd
, mkexpr(t3
));
5628 case 0x02: { /* SRA.W */
5629 DIP("SRA.W w%d, w%d, w%d", wd
, ws
, wt
);
5630 t1
= newTemp(Ity_V128
);
5631 t2
= newTemp(Ity_V128
);
5632 t3
= newTemp(Ity_V128
);
5633 assign(t1
, getWReg(ws
));
5634 assign(t2
, getWReg(wt
));
5635 assign(t3
, binop(Iop_Sar32x4
, mkexpr(t1
), mkexpr(t2
)));
5636 putWReg(wd
, mkexpr(t3
));
5640 case 0x03: { /* SRA.D */
5641 DIP("SRA.D w%d, w%d, w%d", wd
, ws
, wt
);
5642 t1
= newTemp(Ity_V128
);
5643 t2
= newTemp(Ity_V128
);
5644 t3
= newTemp(Ity_V128
);
5645 assign(t1
, getWReg(ws
));
5646 assign(t2
, getWReg(wt
));
5647 assign(t3
, binop(Iop_Sar64x2
, mkexpr(t1
), mkexpr(t2
)));
5648 putWReg(wd
, mkexpr(t3
));
5659 case 0x02: { /* SRL.df */
5661 case 0x00: { /* SRL.B */
5662 DIP("SRL.B w%d, w%d, w%d", wd
, ws
, wt
);
5663 t1
= newTemp(Ity_V128
);
5664 t2
= newTemp(Ity_V128
);
5665 t3
= newTemp(Ity_V128
);
5666 assign(t1
, getWReg(ws
));
5667 assign(t2
, getWReg(wt
));
5668 assign(t3
, binop(Iop_Shr8x16
, mkexpr(t1
), mkexpr(t2
)));
5669 putWReg(wd
, mkexpr(t3
));
5673 case 0x01: { /* SRL.H */
5674 DIP("SRL.H w%d, w%d, w%d", wd
, ws
, wt
);
5675 t1
= newTemp(Ity_V128
);
5676 t2
= newTemp(Ity_V128
);
5677 t3
= newTemp(Ity_V128
);
5678 assign(t1
, getWReg(ws
));
5679 assign(t2
, getWReg(wt
));
5680 assign(t3
, binop(Iop_Shr16x8
, mkexpr(t1
), mkexpr(t2
)));
5681 putWReg(wd
, mkexpr(t3
));
5685 case 0x02: { /* SRL.W */
5686 DIP("SRL.W w%d, w%d, w%d", wd
, ws
, wt
);
5687 t1
= newTemp(Ity_V128
);
5688 t2
= newTemp(Ity_V128
);
5689 t3
= newTemp(Ity_V128
);
5690 assign(t1
, getWReg(ws
));
5691 assign(t2
, getWReg(wt
));
5692 assign(t3
, binop(Iop_Shr32x4
, mkexpr(t1
), mkexpr(t2
)));
5693 putWReg(wd
, mkexpr(t3
));
5697 case 0x03: { /* SRL.D */
5698 DIP("SRL.D w%d, w%d, w%d", wd
, ws
, wt
);
5699 t1
= newTemp(Ity_V128
);
5700 t2
= newTemp(Ity_V128
);
5701 t3
= newTemp(Ity_V128
);
5702 assign(t1
, getWReg(ws
));
5703 assign(t2
, getWReg(wt
));
5704 assign(t3
, binop(Iop_Shr64x2
, mkexpr(t1
), mkexpr(t2
)));
5705 putWReg(wd
, mkexpr(t3
));
5716 case 0x03: { /* BCLR.df */
5717 t1
= newTemp(Ity_V128
);
5718 t2
= newTemp(Ity_V128
);
5719 t3
= newTemp(Ity_V128
);
5721 assign(t1
, getWReg(ws
));
5724 case 0x00: { /* BCLR.B */
5725 DIP("BCLR.B w%d, w%d, w%d", wd
, ws
, wt
);
5726 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
5727 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
5729 assign(t2
, binop(Iop_Shl8x16
,
5730 binop(Iop_64HLtoV128
,
5731 mkU64(tmp
), mkU64(tmp
)),
5736 case 0x01: { /* BCLR.H */
5737 DIP("BCLR.H w%d, w%d, w%d", wd
, ws
, wt
);
5738 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
5741 binop(Iop_64HLtoV128
,
5742 mkU64(tmp
), mkU64(tmp
)),
5747 case 0x02: { /* BCLR.W */
5748 DIP("BCLR.W w%d, w%d, w%d", wd
, ws
, wt
);
5752 binop(Iop_64HLtoV128
,
5753 mkU64(tmp
), mkU64(tmp
)),
5758 case 0x03: { /* BCLR.D */
5759 DIP("BCLR.D w%d, w%d, w%d", wd
, ws
, wt
);
5762 binop(Iop_64HLtoV128
,
5763 mkU64(tmp
), mkU64(tmp
)),
5771 mkexpr(t1
), unop(Iop_NotV128
, mkexpr(t2
))));
5772 putWReg(wd
, mkexpr(t3
));
5776 case 0x04: { /* BSET.df */
5777 t1
= newTemp(Ity_V128
);
5778 t2
= newTemp(Ity_V128
);
5779 t3
= newTemp(Ity_V128
);
5781 assign(t1
, getWReg(ws
));
5784 case 0x00: { /* BSET.B */
5785 DIP("BSET.B w%d, w%d, w%d", wd
, ws
, wt
);
5786 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
5787 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
5791 binop(Iop_64HLtoV128
,
5792 mkU64(tmp
), mkU64(tmp
)),
5797 case 0x01: { /* BSET.H */
5798 DIP("BSET.H w%d, w%d, w%d", wd
, ws
, wt
);
5799 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
5802 binop(Iop_64HLtoV128
,
5803 mkU64(tmp
), mkU64(tmp
)),
5808 case 0x02: { /* BSET.W */
5809 DIP("BSET.W w%d, w%d, w%d", wd
, ws
, wt
);
5813 binop(Iop_64HLtoV128
,
5814 mkU64(tmp
), mkU64(tmp
)),
5819 case 0x03: { /* BSET.D */
5820 DIP("BSET.D w%d, w%d, w%d", wd
, ws
, wt
);
5823 binop(Iop_64HLtoV128
,
5824 mkU64(tmp
), mkU64(tmp
)),
5830 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
5831 putWReg(wd
, mkexpr(t3
));
5835 case 0x05: { /* BNEG.df */
5836 t1
= newTemp(Ity_V128
);
5837 t2
= newTemp(Ity_V128
);
5838 t3
= newTemp(Ity_V128
);
5840 assign(t1
, getWReg(ws
));
5843 case 0x00: { /* BNEG.B */
5844 DIP("BNEG.B w%d, w%d, w%d", wd
, ws
, wt
);
5845 tmp
|= (tmp
<< 56) | (tmp
<< 48) | (tmp
<< 40) |
5846 (tmp
<< 32) | (tmp
<< 24) | (tmp
<< 16) |
5850 binop(Iop_64HLtoV128
,
5851 mkU64(tmp
), mkU64(tmp
)),
5856 case 0x01: { /* BNEG.H */
5857 DIP("BNEG.H w%d, w%d, w%d", wd
, ws
, wt
);
5858 tmp
|= (tmp
<< 48) | (tmp
<< 32) | (tmp
<< 16);
5861 binop(Iop_64HLtoV128
,
5862 mkU64(tmp
), mkU64(tmp
)),
5867 case 0x02: { /* BNEG.W */
5868 DIP("BNEG.W w%d, w%d, w%d", wd
, ws
, wt
);
5872 binop(Iop_64HLtoV128
,
5873 mkU64(tmp
), mkU64(tmp
)),
5878 case 0x03: { /* BNEG.D */
5879 DIP("BNEG.D w%d, w%d, w%d", wd
, ws
, wt
);
5882 binop(Iop_64HLtoV128
,
5883 mkU64(tmp
), mkU64(tmp
)),
5889 assign(t3
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
5890 putWReg(wd
, mkexpr(t3
));
5894 case 0x06: { /* BINSL.df */
5896 case 0x00: { /* BINSL.B */
5897 DIP("BINSL.B w%d, w%d, w%d", wd
, ws
, wt
);
5898 t1
= newTemp(Ity_V128
);
5899 t2
= newTemp(Ity_V128
);
5900 t3
= newTemp(Ity_V128
);
5901 ULong tmp
= 0x8080808080808080ULL
;
5904 binop(Iop_64HLtoV128
,
5905 mkU64(tmp
), mkU64(tmp
)),
5909 unop(Iop_NotV128
, mkexpr(t1
)),
5913 mkexpr(t1
), getWReg(ws
)));
5916 mkexpr(t2
), mkexpr(t3
)));
5920 case 0x01: { /* BINSL.H */
5921 DIP("BINSL.H w%d, w%d, w%d", wd
, ws
, wt
);
5922 t1
= newTemp(Ity_V128
);
5923 t2
= newTemp(Ity_V128
);
5924 t3
= newTemp(Ity_V128
);
5925 ULong tmp
= 0x8000800080008000ULL
;
5928 binop(Iop_64HLtoV128
,
5929 mkU64(tmp
), mkU64(tmp
)),
5933 unop(Iop_NotV128
, mkexpr(t1
)),
5937 mkexpr(t1
), getWReg(ws
)));
5940 mkexpr(t2
), mkexpr(t3
)));
5944 case 0x02: { /* BINSL.W */
5945 DIP("BINSL.W w%d, w%d, w%d", wd
, ws
, wt
);
5946 t1
= newTemp(Ity_V128
);
5947 t2
= newTemp(Ity_V128
);
5948 t3
= newTemp(Ity_V128
);
5949 ULong tmp
= 0x8000000080000000ULL
;
5952 binop(Iop_64HLtoV128
,
5953 mkU64(tmp
), mkU64(tmp
)),
5957 unop(Iop_NotV128
, mkexpr(t1
)),
5961 mkexpr(t1
), getWReg(ws
)));
5964 mkexpr(t2
), mkexpr(t3
)));
5968 case 0x03: { /* BINSL.D */
5969 DIP("BINSL.D w%d, w%d, w%d", wd
, ws
, wt
);
5970 t1
= newTemp(Ity_V128
);
5971 t2
= newTemp(Ity_V128
);
5972 t3
= newTemp(Ity_V128
);
5973 ULong tmp
= 0x8000000000000000ULL
;
5976 binop(Iop_64HLtoV128
,
5977 mkU64(tmp
), mkU64(tmp
)),
5981 unop(Iop_NotV128
, mkexpr(t1
)),
5985 mkexpr(t1
), getWReg(ws
)));
5988 mkexpr(t2
), mkexpr(t3
)));
5999 case 0x07: { /* BINSR.df */
6001 case 0x00: { /* BINSR.B */
6002 DIP("BINSR.B w%d, w%d, w%d", wd
, ws
, wt
);
6003 t1
= newTemp(Ity_V128
);
6004 t2
= newTemp(Ity_V128
);
6005 t3
= newTemp(Ity_V128
);
6006 ULong tmp
= 0xFEFEFEFEFEFEFEFEULL
;
6009 binop(Iop_64HLtoV128
,
6010 mkU64(tmp
), mkU64(tmp
)),
6014 unop(Iop_NotV128
, mkexpr(t1
)),
6018 mkexpr(t1
), getWReg(wd
)));
6021 mkexpr(t2
), mkexpr(t3
)));
6025 case 0x01: { /* BINSR.H */
6026 DIP("BINSR.H w%d, w%d, w%d", wd
, ws
, wt
);
6027 t1
= newTemp(Ity_V128
);
6028 t2
= newTemp(Ity_V128
);
6029 t3
= newTemp(Ity_V128
);
6030 ULong tmp
= 0xFFFEFFFEFFFEFFFEULL
;
6033 binop(Iop_64HLtoV128
,
6034 mkU64(tmp
), mkU64(tmp
)),
6038 unop(Iop_NotV128
, mkexpr(t1
)),
6042 mkexpr(t1
), getWReg(wd
)));
6045 mkexpr(t2
), mkexpr(t3
)));
6049 case 0x02: { /* BINSR.W */
6050 DIP("BINSR.W w%d, w%d, w%d", wd
, ws
, wt
);
6051 t1
= newTemp(Ity_V128
);
6052 t2
= newTemp(Ity_V128
);
6053 t3
= newTemp(Ity_V128
);
6054 ULong tmp
= 0xFFFFFFFEFFFFFFFEULL
;
6057 binop(Iop_64HLtoV128
,
6058 mkU64(tmp
), mkU64(tmp
)),
6062 unop(Iop_NotV128
, mkexpr(t1
)),
6066 mkexpr(t1
), getWReg(wd
)));
6069 mkexpr(t2
), mkexpr(t3
)));
6073 case 0x03: { /* BINSR.D */
6074 DIP("BINSR.D w%d, w%d, w%d", wd
, ws
, wt
);
6075 t1
= newTemp(Ity_V128
);
6076 t2
= newTemp(Ity_V128
);
6077 t3
= newTemp(Ity_V128
);
6081 binop(Iop_64HLtoV128
,
6082 mkU64(tmp
), mkU64(tmp
)),
6086 unop(Iop_NotV128
, mkexpr(t1
)),
6090 mkexpr(t1
), getWReg(wd
)));
6093 mkexpr(t2
), mkexpr(t3
)));
6111 static Int
msa_3R_0E(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x0E) */
6113 IRTemp t1
, t2
, t3
, t4
;
6117 operation
= (cins
& 0x03800000) >> 23;
6118 df
= (cins
& 0x00600000) >> 21;
6119 wt
= (cins
& 0x001F0000) >> 16;
6121 switch (operation
) {
6122 case 0x00: { /* ADDV.df */
6124 case 0x00: { /* ADDV.B */
6125 DIP("ADDV.B w%d, w%d, w%d", wd
, ws
, wt
);
6126 t1
= newTemp(Ity_V128
);
6127 t2
= newTemp(Ity_V128
);
6128 t3
= newTemp(Ity_V128
);
6129 assign(t1
, getWReg(ws
));
6130 assign(t2
, getWReg(wt
));
6131 assign(t3
, binop(Iop_Add8x16
, mkexpr(t1
), mkexpr(t2
)));
6132 putWReg(wd
, mkexpr(t3
));
6136 case 0x01: { /* ADDV.H */
6137 DIP("ADDV.H w%d, w%d, w%d", wd
, ws
, wt
);
6138 t1
= newTemp(Ity_V128
);
6139 t2
= newTemp(Ity_V128
);
6140 t3
= newTemp(Ity_V128
);
6141 assign(t1
, getWReg(ws
));
6142 assign(t2
, getWReg(wt
));
6143 assign(t3
, binop(Iop_Add16x8
, mkexpr(t1
), mkexpr(t2
)));
6144 putWReg(wd
, mkexpr(t3
));
6148 case 0x02: { /* ADDV.W */
6149 DIP("ADDV.W w%d, w%d, w%d", wd
, ws
, wt
);
6150 t1
= newTemp(Ity_V128
);
6151 t2
= newTemp(Ity_V128
);
6152 t3
= newTemp(Ity_V128
);
6153 assign(t1
, getWReg(ws
));
6154 assign(t2
, getWReg(wt
));
6155 assign(t3
, binop(Iop_Add32x4
, mkexpr(t1
), mkexpr(t2
)));
6156 putWReg(wd
, mkexpr(t3
));
6160 case 0x03: { /* ADDV.D */
6161 DIP("ADDV.D w%d, w%d, w%d", wd
, ws
, wt
);
6162 t1
= newTemp(Ity_V128
);
6163 t2
= newTemp(Ity_V128
);
6164 t3
= newTemp(Ity_V128
);
6165 assign(t1
, getWReg(ws
));
6166 assign(t2
, getWReg(wt
));
6167 assign(t3
, binop(Iop_Add64x2
, mkexpr(t1
), mkexpr(t2
)));
6168 putWReg(wd
, mkexpr(t3
));
6179 case 0x01: { /* SUBV.df */
6181 case 0x00: { /* SUBV.B */
6182 DIP("SUBV.B w%d, w%d, w%d", wd
, ws
, wt
);
6183 t1
= newTemp(Ity_V128
);
6184 t2
= newTemp(Ity_V128
);
6185 t3
= newTemp(Ity_V128
);
6186 assign(t1
, getWReg(ws
));
6187 assign(t2
, getWReg(wt
));
6188 assign(t3
, binop(Iop_Sub8x16
, mkexpr(t1
), mkexpr(t2
)));
6189 putWReg(wd
, mkexpr(t3
));
6193 case 0x01: { /* SUBV.H */
6194 DIP("SUBV.H w%d, w%d, w%d", wd
, ws
, wt
);
6195 t1
= newTemp(Ity_V128
);
6196 t2
= newTemp(Ity_V128
);
6197 t3
= newTemp(Ity_V128
);
6198 assign(t1
, getWReg(ws
));
6199 assign(t2
, getWReg(wt
));
6200 assign(t3
, binop(Iop_Sub16x8
, mkexpr(t1
), mkexpr(t2
)));
6201 putWReg(wd
, mkexpr(t3
));
6205 case 0x02: { /* SUBV.W */
6206 DIP("SUBV.W w%d, w%d, w%d", wd
, ws
, wt
);
6207 t1
= newTemp(Ity_V128
);
6208 t2
= newTemp(Ity_V128
);
6209 t3
= newTemp(Ity_V128
);
6210 assign(t1
, getWReg(ws
));
6211 assign(t2
, getWReg(wt
));
6212 assign(t3
, binop(Iop_Sub32x4
, mkexpr(t1
), mkexpr(t2
)));
6213 putWReg(wd
, mkexpr(t3
));
6217 case 0x03: { /* SUBV.D */
6218 DIP("SUBV.D w%d, w%d, w%d", wd
, ws
, wt
);
6219 t1
= newTemp(Ity_V128
);
6220 t2
= newTemp(Ity_V128
);
6221 t3
= newTemp(Ity_V128
);
6222 assign(t1
, getWReg(ws
));
6223 assign(t2
, getWReg(wt
));
6224 assign(t3
, binop(Iop_Sub64x2
, mkexpr(t1
), mkexpr(t2
)));
6225 putWReg(wd
, mkexpr(t3
));
6236 case 0x02: { /* MAX_S.df */
6238 case 0x00: { /* MAX_S.B */
6239 DIP("MAX_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6240 t1
= newTemp(Ity_V128
);
6241 t2
= newTemp(Ity_V128
);
6242 t3
= newTemp(Ity_V128
);
6243 assign(t1
, getWReg(ws
));
6244 assign(t2
, getWReg(wt
));
6245 assign(t3
, binop(Iop_Max8Sx16
, mkexpr(t1
), mkexpr(t2
)));
6246 putWReg(wd
, mkexpr(t3
));
6250 case 0x01: { /* MAX_S.H */
6251 DIP("MAX_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6252 t1
= newTemp(Ity_V128
);
6253 t2
= newTemp(Ity_V128
);
6254 t3
= newTemp(Ity_V128
);
6255 assign(t1
, getWReg(ws
));
6256 assign(t2
, getWReg(wt
));
6257 assign(t3
, binop(Iop_Max16Sx8
, mkexpr(t1
), mkexpr(t2
)));
6258 putWReg(wd
, mkexpr(t3
));
6262 case 0x02: { /* MAX_S.W */
6263 DIP("MAX_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6264 t1
= newTemp(Ity_V128
);
6265 t2
= newTemp(Ity_V128
);
6266 t3
= newTemp(Ity_V128
);
6267 assign(t1
, getWReg(ws
));
6268 assign(t2
, getWReg(wt
));
6269 assign(t3
, binop(Iop_Max32Sx4
, mkexpr(t1
), mkexpr(t2
)));
6270 putWReg(wd
, mkexpr(t3
));
6274 case 0x03: { /* MAX_S.D */
6275 DIP("MAX_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6276 t1
= newTemp(Ity_V128
);
6277 t2
= newTemp(Ity_V128
);
6278 t3
= newTemp(Ity_V128
);
6279 assign(t1
, getWReg(ws
));
6280 assign(t2
, getWReg(wt
));
6281 assign(t3
, binop(Iop_Max64Sx2
, mkexpr(t1
), mkexpr(t2
)));
6282 putWReg(wd
, mkexpr(t3
));
6293 case 0x03: { /* MAX_U.df */
6295 case 0x00: { /* MAX_U.B */
6296 DIP("MAX_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6297 t1
= newTemp(Ity_V128
);
6298 t2
= newTemp(Ity_V128
);
6299 t3
= newTemp(Ity_V128
);
6300 assign(t1
, getWReg(ws
));
6301 assign(t2
, getWReg(wt
));
6302 assign(t3
, binop(Iop_Max8Ux16
, mkexpr(t1
), mkexpr(t2
)));
6303 putWReg(wd
, mkexpr(t3
));
6307 case 0x01: { /* MAX_U.H */
6308 DIP("MAX_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6309 t1
= newTemp(Ity_V128
);
6310 t2
= newTemp(Ity_V128
);
6311 t3
= newTemp(Ity_V128
);
6312 assign(t1
, getWReg(ws
));
6313 assign(t2
, getWReg(wt
));
6314 assign(t3
, binop(Iop_Max16Ux8
, mkexpr(t1
), mkexpr(t2
)));
6315 putWReg(wd
, mkexpr(t3
));
6319 case 0x02: { /* MAX_U.W */
6320 DIP("MAX_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6321 t1
= newTemp(Ity_V128
);
6322 t2
= newTemp(Ity_V128
);
6323 t3
= newTemp(Ity_V128
);
6324 assign(t1
, getWReg(ws
));
6325 assign(t2
, getWReg(wt
));
6326 assign(t3
, binop(Iop_Max32Ux4
, mkexpr(t1
), mkexpr(t2
)));
6327 putWReg(wd
, mkexpr(t3
));
6331 case 0x03: { /* MAX_U.D */
6332 DIP("MAX_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6333 t1
= newTemp(Ity_V128
);
6334 t2
= newTemp(Ity_V128
);
6335 t3
= newTemp(Ity_V128
);
6336 assign(t1
, getWReg(ws
));
6337 assign(t2
, getWReg(wt
));
6338 assign(t3
, binop(Iop_Max64Ux2
, mkexpr(t1
), mkexpr(t2
)));
6339 putWReg(wd
, mkexpr(t3
));
6350 case 0x04: { /* MIN_S.df */
6352 case 0x00: { /* MIN_S.B */
6353 DIP("MIN_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6354 t1
= newTemp(Ity_V128
);
6355 t2
= newTemp(Ity_V128
);
6356 t3
= newTemp(Ity_V128
);
6357 assign(t1
, getWReg(ws
));
6358 assign(t2
, getWReg(wt
));
6359 assign(t3
, binop(Iop_Min8Sx16
, mkexpr(t1
), mkexpr(t2
)));
6360 putWReg(wd
, mkexpr(t3
));
6364 case 0x01: { /* MIN_S.H */
6365 DIP("MIN_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6366 t1
= newTemp(Ity_V128
);
6367 t2
= newTemp(Ity_V128
);
6368 t3
= newTemp(Ity_V128
);
6369 assign(t1
, getWReg(ws
));
6370 assign(t2
, getWReg(wt
));
6371 assign(t3
, binop(Iop_Min16Sx8
, mkexpr(t1
), mkexpr(t2
)));
6372 putWReg(wd
, mkexpr(t3
));
6376 case 0x02: { /* MIN_S.W */
6377 DIP("MIN_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6378 t1
= newTemp(Ity_V128
);
6379 t2
= newTemp(Ity_V128
);
6380 t3
= newTemp(Ity_V128
);
6381 assign(t1
, getWReg(ws
));
6382 assign(t2
, getWReg(wt
));
6383 assign(t3
, binop(Iop_Min32Sx4
, mkexpr(t1
), mkexpr(t2
)));
6384 putWReg(wd
, mkexpr(t3
));
6388 case 0x03: { /* MIN_S.D */
6389 DIP("MIN_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6390 t1
= newTemp(Ity_V128
);
6391 t2
= newTemp(Ity_V128
);
6392 t3
= newTemp(Ity_V128
);
6393 assign(t1
, getWReg(ws
));
6394 assign(t2
, getWReg(wt
));
6395 assign(t3
, binop(Iop_Min64Sx2
, mkexpr(t1
), mkexpr(t2
)));
6396 putWReg(wd
, mkexpr(t3
));
6407 case 0x05: { /* MIN_U.df */
6409 case 0x00: { /* MIN_U.B */
6410 DIP("MIN_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6411 t1
= newTemp(Ity_V128
);
6412 t2
= newTemp(Ity_V128
);
6413 t3
= newTemp(Ity_V128
);
6414 assign(t1
, getWReg(ws
));
6415 assign(t2
, getWReg(wt
));
6416 assign(t3
, binop(Iop_Min8Ux16
, mkexpr(t1
), mkexpr(t2
)));
6417 putWReg(wd
, mkexpr(t3
));
6421 case 0x01: { /* MIN_U.H */
6422 DIP("MIN_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6423 t1
= newTemp(Ity_V128
);
6424 t2
= newTemp(Ity_V128
);
6425 t3
= newTemp(Ity_V128
);
6426 assign(t1
, getWReg(ws
));
6427 assign(t2
, getWReg(wt
));
6428 assign(t3
, binop(Iop_Min16Ux8
, mkexpr(t1
), mkexpr(t2
)));
6429 putWReg(wd
, mkexpr(t3
));
6433 case 0x02: { /* MIN_U.W */
6434 DIP("MIN_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6435 t1
= newTemp(Ity_V128
);
6436 t2
= newTemp(Ity_V128
);
6437 t3
= newTemp(Ity_V128
);
6438 assign(t1
, getWReg(ws
));
6439 assign(t2
, getWReg(wt
));
6440 assign(t3
, binop(Iop_Min32Ux4
, mkexpr(t1
), mkexpr(t2
)));
6441 putWReg(wd
, mkexpr(t3
));
6445 case 0x03: { /* MIN_U.D */
6446 DIP("MIN_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6447 t1
= newTemp(Ity_V128
);
6448 t2
= newTemp(Ity_V128
);
6449 t3
= newTemp(Ity_V128
);
6450 assign(t1
, getWReg(ws
));
6451 assign(t2
, getWReg(wt
));
6452 assign(t3
, binop(Iop_Min64Ux2
, mkexpr(t1
), mkexpr(t2
)));
6453 putWReg(wd
, mkexpr(t3
));
6464 case 0x06: { /* MAX_A.df */
6466 case 0x00: { /* MAX_A.B */
6467 DIP("MAX_A.B w%d, w%d, w%d", wd
, ws
, wt
);
6468 t1
= newTemp(Ity_V128
);
6469 t2
= newTemp(Ity_V128
);
6470 t3
= newTemp(Ity_V128
);
6471 t4
= newTemp(Ity_V128
);
6472 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
6473 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
6474 assign(t4
, binop(Iop_CmpGT8Ux16
, mkexpr(t1
), mkexpr(t2
)));
6475 assign(t3
, binop(Iop_OrV128
,
6480 unop(Iop_NotV128
, mkexpr(t4
)),
6482 putWReg(wd
, mkexpr(t3
));
6486 case 0x01: { /* MAX_A.H */
6487 DIP("MAX_A.H w%d, w%d, w%d", wd
, ws
, wt
);
6488 t1
= newTemp(Ity_V128
);
6489 t2
= newTemp(Ity_V128
);
6490 t3
= newTemp(Ity_V128
);
6491 t4
= newTemp(Ity_V128
);
6492 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
6493 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
6494 assign(t4
, binop(Iop_CmpGT16Ux8
, mkexpr(t1
), mkexpr(t2
)));
6495 assign(t3
, binop(Iop_OrV128
,
6500 unop(Iop_NotV128
, mkexpr(t4
)),
6502 putWReg(wd
, mkexpr(t3
));
6506 case 0x02: { /* MAX_A.W */
6507 DIP("MAX_A.W w%d, w%d, w%d", wd
, ws
, wt
);
6508 t1
= newTemp(Ity_V128
);
6509 t2
= newTemp(Ity_V128
);
6510 t3
= newTemp(Ity_V128
);
6511 t4
= newTemp(Ity_V128
);
6512 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
6513 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
6514 assign(t4
, binop(Iop_CmpGT32Ux4
, mkexpr(t1
), mkexpr(t2
)));
6515 assign(t3
, binop(Iop_OrV128
,
6520 unop(Iop_NotV128
, mkexpr(t4
)),
6522 putWReg(wd
, mkexpr(t3
));
6526 case 0x03: { /* MAX_A.D */
6527 DIP("MAX_A.D w%d, w%d, w%d", wd
, ws
, wt
);
6528 t1
= newTemp(Ity_V128
);
6529 t2
= newTemp(Ity_V128
);
6530 t3
= newTemp(Ity_V128
);
6531 t4
= newTemp(Ity_V128
);
6532 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
6533 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
6534 assign(t4
, binop(Iop_CmpGT64Ux2
, mkexpr(t1
), mkexpr(t2
)));
6535 assign(t3
, binop(Iop_OrV128
,
6540 unop(Iop_NotV128
, mkexpr(t4
)),
6542 putWReg(wd
, mkexpr(t3
));
6553 case 0x07: { /* MIN_A.df */
6555 case 0x00: { /* MIN_A.B */
6556 DIP("MIN_A.B w%d, w%d, w%d", wd
, ws
, wt
);
6557 t1
= newTemp(Ity_V128
);
6558 t2
= newTemp(Ity_V128
);
6559 t3
= newTemp(Ity_V128
);
6560 t4
= newTemp(Ity_V128
);
6561 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
6562 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
6563 assign(t4
, binop(Iop_OrV128
,
6564 binop(Iop_CmpGT8Ux16
,
6565 mkexpr(t1
), mkexpr(t2
)),
6566 binop(Iop_CmpEQ8x16
,
6567 mkexpr(t1
), mkexpr(t2
))));
6568 assign(t3
, binop(Iop_OrV128
,
6573 unop(Iop_NotV128
, mkexpr(t4
)),
6575 putWReg(wd
, mkexpr(t3
));
6579 case 0x01: { /* MIN_A.H */
6580 DIP("MIN_A.H w%d, w%d, w%d", wd
, ws
, wt
);
6581 t1
= newTemp(Ity_V128
);
6582 t2
= newTemp(Ity_V128
);
6583 t3
= newTemp(Ity_V128
);
6584 t4
= newTemp(Ity_V128
);
6585 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
6586 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
6587 assign(t4
, binop(Iop_OrV128
,
6588 binop(Iop_CmpGT16Ux8
,
6589 mkexpr(t1
), mkexpr(t2
)),
6590 binop(Iop_CmpEQ16x8
,
6591 mkexpr(t1
), mkexpr(t2
))));
6592 assign(t3
, binop(Iop_OrV128
,
6597 unop(Iop_NotV128
, mkexpr(t4
)),
6599 putWReg(wd
, mkexpr(t3
));
6603 case 0x02: { /* MIN_A.W */
6604 DIP("MIN_A.W w%d, w%d, w%d", wd
, ws
, wt
);
6605 t1
= newTemp(Ity_V128
);
6606 t2
= newTemp(Ity_V128
);
6607 t3
= newTemp(Ity_V128
);
6608 t4
= newTemp(Ity_V128
);
6609 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
6610 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
6611 assign(t4
, binop(Iop_OrV128
,
6612 binop(Iop_CmpGT32Ux4
,
6613 mkexpr(t1
), mkexpr(t2
)),
6614 binop(Iop_CmpEQ32x4
,
6615 mkexpr(t1
), mkexpr(t2
))));
6616 assign(t3
, binop(Iop_OrV128
,
6621 unop(Iop_NotV128
, mkexpr(t4
)),
6623 putWReg(wd
, mkexpr(t3
));
6627 case 0x03: { /* MIN_A.D */
6628 DIP("MIN_A.D w%d, w%d, w%d", wd
, ws
, wt
);
6629 t1
= newTemp(Ity_V128
);
6630 t2
= newTemp(Ity_V128
);
6631 t3
= newTemp(Ity_V128
);
6632 t4
= newTemp(Ity_V128
);
6633 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
6634 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
6635 assign(t4
, binop(Iop_OrV128
,
6636 binop(Iop_CmpGT64Ux2
,
6637 mkexpr(t1
), mkexpr(t2
)),
6638 binop(Iop_CmpEQ64x2
,
6639 mkexpr(t1
), mkexpr(t2
))));
6640 assign(t3
, binop(Iop_OrV128
,
6645 unop(Iop_NotV128
, mkexpr(t4
)),
6647 putWReg(wd
, mkexpr(t3
));
6665 static Int
msa_3R_0F(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x0F) */
6671 operation
= (cins
& 0x03800000) >> 23;
6672 df
= (cins
& 0x00600000) >> 21;
6673 wt
= (cins
& 0x001F0000) >> 16;
6675 switch (operation
) {
6676 case 0x00: { /* CEQ.df */
6678 case 0x00: { /* CEQ.B */
6679 DIP("CEQ.B w%d, w%d, w%d", wd
, ws
, wt
);
6680 t1
= newTemp(Ity_V128
);
6681 t2
= newTemp(Ity_V128
);
6682 t3
= newTemp(Ity_V128
);
6683 assign(t1
, getWReg(ws
));
6684 assign(t2
, getWReg(wt
));
6685 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
6686 putWReg(wd
, mkexpr(t3
));
6690 case 0x01: { /* CEQ.H */
6691 DIP("CEQ.H w%d, w%d, w%d", wd
, ws
, wt
);
6692 t1
= newTemp(Ity_V128
);
6693 t2
= newTemp(Ity_V128
);
6694 t3
= newTemp(Ity_V128
);
6695 assign(t1
, getWReg(ws
));
6696 assign(t2
, getWReg(wt
));
6697 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
6698 putWReg(wd
, mkexpr(t3
));
6702 case 0x02: { /* CEQ.W */
6703 DIP("CEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
6704 t1
= newTemp(Ity_V128
);
6705 t2
= newTemp(Ity_V128
);
6706 t3
= newTemp(Ity_V128
);
6707 assign(t1
, getWReg(ws
));
6708 assign(t2
, getWReg(wt
));
6709 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
6710 putWReg(wd
, mkexpr(t3
));
6714 case 0x03: { /* CEQ.D */
6715 DIP("CEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
6716 t1
= newTemp(Ity_V128
);
6717 t2
= newTemp(Ity_V128
);
6718 t3
= newTemp(Ity_V128
);
6719 assign(t1
, getWReg(ws
));
6720 assign(t2
, getWReg(wt
));
6721 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
6722 putWReg(wd
, mkexpr(t3
));
6733 case 0x02: { /* CLT_S.df */
6735 case 0x00: { /* CLT_S.B */
6736 DIP("CLT_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6737 t1
= newTemp(Ity_V128
);
6738 t2
= newTemp(Ity_V128
);
6739 t3
= newTemp(Ity_V128
);
6740 assign(t1
, getWReg(ws
));
6741 assign(t2
, getWReg(wt
));
6742 assign(t3
, binop(Iop_CmpGT8Sx16
, mkexpr(t2
), mkexpr(t1
)));
6743 putWReg(wd
, mkexpr(t3
));
6747 case 0x01: { /* CLT_S.H */
6748 DIP("CLT_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6749 t1
= newTemp(Ity_V128
);
6750 t2
= newTemp(Ity_V128
);
6751 t3
= newTemp(Ity_V128
);
6752 assign(t1
, getWReg(ws
));
6753 assign(t2
, getWReg(wt
));
6754 assign(t3
, binop(Iop_CmpGT16Sx8
, mkexpr(t2
), mkexpr(t1
)));
6755 putWReg(wd
, mkexpr(t3
));
6759 case 0x02: { /* CLT_S.W */
6760 DIP("CLT_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6761 t1
= newTemp(Ity_V128
);
6762 t2
= newTemp(Ity_V128
);
6763 t3
= newTemp(Ity_V128
);
6764 assign(t1
, getWReg(ws
));
6765 assign(t2
, getWReg(wt
));
6766 assign(t3
, binop(Iop_CmpGT32Sx4
, mkexpr(t2
), mkexpr(t1
)));
6767 putWReg(wd
, mkexpr(t3
));
6771 case 0x03: { /* CLT_S.D */
6772 DIP("CLT_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6773 t1
= newTemp(Ity_V128
);
6774 t2
= newTemp(Ity_V128
);
6775 t3
= newTemp(Ity_V128
);
6776 assign(t1
, getWReg(ws
));
6777 assign(t2
, getWReg(wt
));
6778 assign(t3
, binop(Iop_CmpGT64Sx2
, mkexpr(t2
), mkexpr(t1
)));
6779 putWReg(wd
, mkexpr(t3
));
6790 case 0x03: { /* CLT_U.df */
6792 case 0x00: { /* CLT_U.B */
6793 DIP("CLT_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6794 t1
= newTemp(Ity_V128
);
6795 t2
= newTemp(Ity_V128
);
6796 t3
= newTemp(Ity_V128
);
6797 assign(t1
, getWReg(ws
));
6798 assign(t2
, getWReg(wt
));
6799 assign(t3
, binop(Iop_CmpGT8Ux16
, mkexpr(t2
), mkexpr(t1
)));
6800 putWReg(wd
, mkexpr(t3
));
6804 case 0x01: { /* CLT_U.H */
6805 DIP("CLT_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6806 t1
= newTemp(Ity_V128
);
6807 t2
= newTemp(Ity_V128
);
6808 t3
= newTemp(Ity_V128
);
6809 assign(t1
, getWReg(ws
));
6810 assign(t2
, getWReg(wt
));
6811 assign(t3
, binop(Iop_CmpGT16Ux8
, mkexpr(t2
), mkexpr(t1
)));
6812 putWReg(wd
, mkexpr(t3
));
6816 case 0x02: { /* CLT_U.W */
6817 DIP("CLT_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6818 t1
= newTemp(Ity_V128
);
6819 t2
= newTemp(Ity_V128
);
6820 t3
= newTemp(Ity_V128
);
6821 assign(t1
, getWReg(ws
));
6822 assign(t2
, getWReg(wt
));
6823 assign(t3
, binop(Iop_CmpGT32Ux4
, mkexpr(t2
), mkexpr(t1
)));
6824 putWReg(wd
, mkexpr(t3
));
6828 case 0x03: { /* CLT_U.D */
6829 DIP("CLT_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6830 t1
= newTemp(Ity_V128
);
6831 t2
= newTemp(Ity_V128
);
6832 t3
= newTemp(Ity_V128
);
6833 assign(t1
, getWReg(ws
));
6834 assign(t2
, getWReg(wt
));
6835 assign(t3
, binop(Iop_CmpGT64Ux2
, mkexpr(t2
), mkexpr(t1
)));
6836 putWReg(wd
, mkexpr(t3
));
6847 case 0x04: { /* CLE_S.df */
6849 case 0x00: { /* CLE_S.B */
6850 DIP("CLE_S.B w%d, w%d, w%d", wd
, ws
, wt
);
6851 t1
= newTemp(Ity_V128
);
6852 t2
= newTemp(Ity_V128
);
6853 t3
= newTemp(Ity_V128
);
6854 assign(t1
, getWReg(ws
));
6855 assign(t2
, getWReg(wt
));
6856 assign(t3
, binop(Iop_OrV128
,
6857 binop(Iop_CmpGT8Sx16
,
6858 mkexpr(t2
), mkexpr(t1
)),
6859 binop(Iop_CmpEQ8x16
,
6860 mkexpr(t1
), mkexpr(t2
))));
6861 putWReg(wd
, mkexpr(t3
));
6865 case 0x01: { /* CLE_S.H */
6866 DIP("CLE_S.H w%d, w%d, w%d", wd
, ws
, wt
);
6867 t1
= newTemp(Ity_V128
);
6868 t2
= newTemp(Ity_V128
);
6869 t3
= newTemp(Ity_V128
);
6870 assign(t1
, getWReg(ws
));
6871 assign(t2
, getWReg(wt
));
6872 assign(t3
, binop(Iop_OrV128
,
6873 binop(Iop_CmpGT16Sx8
,
6874 mkexpr(t2
), mkexpr(t1
)),
6875 binop(Iop_CmpEQ16x8
,
6876 mkexpr(t1
), mkexpr(t2
))));
6877 putWReg(wd
, mkexpr(t3
));
6881 case 0x02: { /* CLE_S.W */
6882 DIP("CLE_S.W w%d, w%d, w%d", wd
, ws
, wt
);
6883 t1
= newTemp(Ity_V128
);
6884 t2
= newTemp(Ity_V128
);
6885 t3
= newTemp(Ity_V128
);
6886 assign(t1
, getWReg(ws
));
6887 assign(t2
, getWReg(wt
));
6888 assign(t3
, binop(Iop_OrV128
,
6889 binop(Iop_CmpGT32Sx4
,
6890 mkexpr(t2
), mkexpr(t1
)),
6891 binop(Iop_CmpEQ32x4
,
6892 mkexpr(t1
), mkexpr(t2
))));
6893 putWReg(wd
, mkexpr(t3
));
6897 case 0x03: { /* CLE_S.D */
6898 DIP("CLE_S.D w%d, w%d, w%d", wd
, ws
, wt
);
6899 t1
= newTemp(Ity_V128
);
6900 t2
= newTemp(Ity_V128
);
6901 t3
= newTemp(Ity_V128
);
6902 assign(t1
, getWReg(ws
));
6903 assign(t2
, getWReg(wt
));
6904 assign(t3
, binop(Iop_OrV128
,
6905 binop(Iop_CmpGT64Sx2
,
6906 mkexpr(t2
), mkexpr(t1
)),
6907 binop(Iop_CmpEQ64x2
,
6908 mkexpr(t1
), mkexpr(t2
))));
6909 putWReg(wd
, mkexpr(t3
));
6920 case 0x05: { /* CLE_U.df */
6922 case 0x00: { /* CLE_U.B */
6923 DIP("CLE_U.B w%d, w%d, w%d", wd
, ws
, wt
);
6924 t1
= newTemp(Ity_V128
);
6925 t2
= newTemp(Ity_V128
);
6926 t3
= newTemp(Ity_V128
);
6927 assign(t1
, getWReg(ws
));
6928 assign(t2
, getWReg(wt
));
6929 assign(t3
, binop(Iop_OrV128
,
6930 binop(Iop_CmpGT8Ux16
,
6931 mkexpr(t2
), mkexpr(t1
)),
6932 binop(Iop_CmpEQ8x16
,
6933 mkexpr(t1
), mkexpr(t2
))));
6934 putWReg(wd
, mkexpr(t3
));
6938 case 0x01: { /* CLE_U.H */
6939 DIP("CLE_U.H w%d, w%d, w%d", wd
, ws
, wt
);
6940 t1
= newTemp(Ity_V128
);
6941 t2
= newTemp(Ity_V128
);
6942 t3
= newTemp(Ity_V128
);
6943 assign(t1
, getWReg(ws
));
6944 assign(t2
, getWReg(wt
));
6945 assign(t3
, binop(Iop_OrV128
,
6946 binop(Iop_CmpGT16Ux8
,
6947 mkexpr(t2
), mkexpr(t1
)),
6948 binop(Iop_CmpEQ16x8
,
6949 mkexpr(t1
), mkexpr(t2
))));
6950 putWReg(wd
, mkexpr(t3
));
6954 case 0x02: { /* CLE_U.W */
6955 DIP("CLE_U.W w%d, w%d, w%d", wd
, ws
, wt
);
6956 t1
= newTemp(Ity_V128
);
6957 t2
= newTemp(Ity_V128
);
6958 t3
= newTemp(Ity_V128
);
6959 assign(t1
, getWReg(ws
));
6960 assign(t2
, getWReg(wt
));
6961 assign(t3
, binop(Iop_OrV128
,
6962 binop(Iop_CmpGT32Ux4
,
6963 mkexpr(t2
), mkexpr(t1
)),
6964 binop(Iop_CmpEQ32x4
,
6965 mkexpr(t1
), mkexpr(t2
))));
6966 putWReg(wd
, mkexpr(t3
));
6970 case 0x03: { /* CLE_U.D */
6971 DIP("CLE_U.D w%d, w%d, w%d", wd
, ws
, wt
);
6972 t1
= newTemp(Ity_V128
);
6973 t2
= newTemp(Ity_V128
);
6974 t3
= newTemp(Ity_V128
);
6975 assign(t1
, getWReg(ws
));
6976 assign(t2
, getWReg(wt
));
6979 binop(Iop_CmpGT64Ux2
,
6980 mkexpr(t2
), mkexpr(t1
)),
6981 binop(Iop_CmpEQ64x2
,
6982 mkexpr(t1
), mkexpr(t2
))));
6983 putWReg(wd
, mkexpr(t3
));
7001 static Int
msa_3R_10(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x10) */
7003 IRTemp t1
, t2
, t3
, t4
;
7007 operation
= (cins
& 0x03800000) >> 23;
7008 df
= (cins
& 0x00600000) >> 21;
7009 wt
= (cins
& 0x001F0000) >> 16;
7011 switch (operation
) {
7012 case 0x00: { /* ADD_A.df */
7014 case 0x00: { /* ADD_A.B */
7015 DIP("ADD_A.B w%d, w%d, w%d", wd
, ws
, wt
);
7016 t1
= newTemp(Ity_V128
);
7017 t2
= newTemp(Ity_V128
);
7018 t3
= newTemp(Ity_V128
);
7019 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
7020 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
7021 assign(t3
, binop(Iop_Add8x16
, mkexpr(t1
), mkexpr(t2
)));
7022 putWReg(wd
, mkexpr(t3
));
7026 case 0x01: { /* ADD_A.H */
7027 DIP("ADD_A.H w%d, w%d, w%d", wd
, ws
, wt
);
7028 t1
= newTemp(Ity_V128
);
7029 t2
= newTemp(Ity_V128
);
7030 t3
= newTemp(Ity_V128
);
7031 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
7032 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
7033 assign(t3
, binop(Iop_Add16x8
, mkexpr(t1
), mkexpr(t2
)));
7034 putWReg(wd
, mkexpr(t3
));
7038 case 0x02: { /* ADD_A.W */
7039 DIP("ADD_A.W w%d, w%d, w%d", wd
, ws
, wt
);
7040 t1
= newTemp(Ity_V128
);
7041 t2
= newTemp(Ity_V128
);
7042 t3
= newTemp(Ity_V128
);
7043 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
7044 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
7045 assign(t3
, binop(Iop_Add32x4
, mkexpr(t1
), mkexpr(t2
)));
7046 putWReg(wd
, mkexpr(t3
));
7050 case 0x03: { /* ADD_A.D */
7051 DIP("ADD_A.D w%d, w%d, w%d", wd
, ws
, wt
);
7052 t1
= newTemp(Ity_V128
);
7053 t2
= newTemp(Ity_V128
);
7054 t3
= newTemp(Ity_V128
);
7055 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
7056 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
7057 assign(t3
, binop(Iop_Add64x2
, mkexpr(t1
), mkexpr(t2
)));
7058 putWReg(wd
, mkexpr(t3
));
7069 case 0x01: { /* ADDS_A.df */
7071 case 0x00: { /* ADDS_A.B */
7072 DIP("ADDS_A.B w%d, w%d, w%d", wd
, ws
, wt
);
7073 t1
= newTemp(Ity_V128
);
7074 t2
= newTemp(Ity_V128
);
7075 t3
= newTemp(Ity_V128
);
7076 t4
= newTemp(Ity_V128
);
7077 assign(t1
, unop(Iop_Abs8x16
, getWReg(ws
)));
7078 assign(t2
, unop(Iop_Abs8x16
, getWReg(wt
)));
7079 assign(t3
, binop(Iop_SarN8x16
,
7084 assign(t4
, binop(Iop_SarN8x16
,
7089 putWReg(wd
, binop(Iop_QAdd8Sx16
,
7111 case 0x01: { /* ADDS_A.H */
7112 DIP("ADDS_A.H w%d, w%d, w%d", wd
, ws
, wt
);
7113 t1
= newTemp(Ity_V128
);
7114 t2
= newTemp(Ity_V128
);
7115 t3
= newTemp(Ity_V128
);
7116 t4
= newTemp(Ity_V128
);
7117 assign(t1
, unop(Iop_Abs16x8
, getWReg(ws
)));
7118 assign(t2
, unop(Iop_Abs16x8
, getWReg(wt
)));
7119 assign(t3
, binop(Iop_SarN16x8
,
7124 assign(t4
, binop(Iop_SarN16x8
,
7129 putWReg(wd
, binop(Iop_QAdd16Sx8
,
7151 case 0x02: { /* ADDS_A.W */
7152 DIP("ADDS_A.W w%d, w%d, w%d", wd
, ws
, wt
);
7153 t1
= newTemp(Ity_V128
);
7154 t2
= newTemp(Ity_V128
);
7155 t3
= newTemp(Ity_V128
);
7156 t4
= newTemp(Ity_V128
);
7157 assign(t1
, unop(Iop_Abs32x4
, getWReg(ws
)));
7158 assign(t2
, unop(Iop_Abs32x4
, getWReg(wt
)));
7159 assign(t3
, binop(Iop_SarN32x4
,
7164 assign(t4
, binop(Iop_SarN32x4
,
7169 putWReg(wd
, binop(Iop_QAdd32Sx4
,
7191 case 0x03: { /* ADDS_A.D */
7192 DIP("ADDS_A.D w%d, w%d, w%d", wd
, ws
, wt
);
7193 t1
= newTemp(Ity_V128
);
7194 t2
= newTemp(Ity_V128
);
7195 t3
= newTemp(Ity_V128
);
7196 t4
= newTemp(Ity_V128
);
7197 assign(t1
, unop(Iop_Abs64x2
, getWReg(ws
)));
7198 assign(t2
, unop(Iop_Abs64x2
, getWReg(wt
)));
7199 assign(t3
, binop(Iop_SarN64x2
,
7204 assign(t4
, binop(Iop_SarN64x2
,
7210 binop(Iop_QAdd64Sx2
,
7239 case 0x02: { /* ADDS_S.df */
7241 case 0x00: { /* ADDS_S.B */
7242 DIP("ADDS_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7243 t1
= newTemp(Ity_V128
);
7244 t2
= newTemp(Ity_V128
);
7245 t3
= newTemp(Ity_V128
);
7246 assign(t1
, getWReg(ws
));
7247 assign(t2
, getWReg(wt
));
7248 assign(t3
, binop(Iop_QAdd8Sx16
, mkexpr(t1
), mkexpr(t2
)));
7249 putWReg(wd
, mkexpr(t3
));
7253 case 0x01: { /* ADDS_S.H */
7254 DIP("ADDS_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7255 t1
= newTemp(Ity_V128
);
7256 t2
= newTemp(Ity_V128
);
7257 t3
= newTemp(Ity_V128
);
7258 assign(t1
, getWReg(ws
));
7259 assign(t2
, getWReg(wt
));
7260 assign(t3
, binop(Iop_QAdd16Sx8
, mkexpr(t1
), mkexpr(t2
)));
7261 putWReg(wd
, mkexpr(t3
));
7265 case 0x02: { /* ADDS_S.W */
7266 DIP("ADDS_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7267 t1
= newTemp(Ity_V128
);
7268 t2
= newTemp(Ity_V128
);
7269 t3
= newTemp(Ity_V128
);
7270 assign(t1
, getWReg(ws
));
7271 assign(t2
, getWReg(wt
));
7272 assign(t3
, binop(Iop_QAdd32Sx4
, mkexpr(t1
), mkexpr(t2
)));
7273 putWReg(wd
, mkexpr(t3
));
7277 case 0x03: { /* ADDS_S.D */
7278 DIP("ADDS_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7279 t1
= newTemp(Ity_V128
);
7280 t2
= newTemp(Ity_V128
);
7281 t3
= newTemp(Ity_V128
);
7282 assign(t1
, getWReg(ws
));
7283 assign(t2
, getWReg(wt
));
7284 assign(t3
, binop(Iop_QAdd64Sx2
, mkexpr(t1
), mkexpr(t2
)));
7285 putWReg(wd
, mkexpr(t3
));
7296 case 0x03: { /* ADDS_U.df */
7298 case 0x00: { /* ADDS_U.B */
7299 DIP("ADDS_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7300 t1
= newTemp(Ity_V128
);
7301 t2
= newTemp(Ity_V128
);
7302 t3
= newTemp(Ity_V128
);
7303 assign(t1
, getWReg(ws
));
7304 assign(t2
, getWReg(wt
));
7305 assign(t3
, binop(Iop_QAdd8Ux16
, mkexpr(t1
), mkexpr(t2
)));
7306 putWReg(wd
, mkexpr(t3
));
7310 case 0x01: { /* ADDS_U.H */
7311 DIP("ADDS_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7312 t1
= newTemp(Ity_V128
);
7313 t2
= newTemp(Ity_V128
);
7314 t3
= newTemp(Ity_V128
);
7315 assign(t1
, getWReg(ws
));
7316 assign(t2
, getWReg(wt
));
7317 assign(t3
, binop(Iop_QAdd16Ux8
, mkexpr(t1
), mkexpr(t2
)));
7318 putWReg(wd
, mkexpr(t3
));
7322 case 0x02: { /* ADDS_U.W */
7323 DIP("ADDS_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7324 t1
= newTemp(Ity_V128
);
7325 t2
= newTemp(Ity_V128
);
7326 t3
= newTemp(Ity_V128
);
7327 assign(t1
, getWReg(ws
));
7328 assign(t2
, getWReg(wt
));
7329 assign(t3
, binop(Iop_QAdd32Ux4
, mkexpr(t1
), mkexpr(t2
)));
7330 putWReg(wd
, mkexpr(t3
));
7334 case 0x03: { /* ADDS_U.D */
7335 DIP("ADDS_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7336 t1
= newTemp(Ity_V128
);
7337 t2
= newTemp(Ity_V128
);
7338 t3
= newTemp(Ity_V128
);
7339 assign(t1
, getWReg(ws
));
7340 assign(t2
, getWReg(wt
));
7341 assign(t3
, binop(Iop_QAdd64Ux2
, mkexpr(t1
), mkexpr(t2
)));
7342 putWReg(wd
, mkexpr(t3
));
7353 case 0x04: { /* AVE_S.df */
7355 case 0x00: { /* AVE_S.B */
7356 DIP("AVE_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7357 t1
= newTemp(Ity_V128
);
7358 t2
= newTemp(Ity_V128
);
7359 t3
= newTemp(Ity_V128
);
7360 assign(t1
, getWReg(ws
));
7361 assign(t2
, getWReg(wt
));
7362 assign(t3
, binop(Iop_Add8x16
,
7365 mkexpr(t1
), mkU8(1)),
7367 mkexpr(t2
), mkU8(1))),
7375 putWReg(wd
, mkexpr(t3
));
7379 case 0x01: { /* AVE_S.H */
7380 DIP("AVE_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7381 t1
= newTemp(Ity_V128
);
7382 t2
= newTemp(Ity_V128
);
7383 t3
= newTemp(Ity_V128
);
7384 assign(t1
, getWReg(ws
));
7385 assign(t2
, getWReg(wt
));
7390 mkexpr(t1
), mkU8(1)),
7392 mkexpr(t2
), mkU8(1))),
7400 putWReg(wd
, mkexpr(t3
));
7404 case 0x02: { /* AVE_S.W */
7405 DIP("AVE_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7406 t1
= newTemp(Ity_V128
);
7407 t2
= newTemp(Ity_V128
);
7408 t3
= newTemp(Ity_V128
);
7409 assign(t1
, getWReg(ws
));
7410 assign(t2
, getWReg(wt
));
7411 assign(t3
, binop(Iop_Add32x4
,
7414 mkexpr(t1
), mkU8(1)),
7416 mkexpr(t2
), mkU8(1))),
7424 putWReg(wd
, mkexpr(t3
));
7428 case 0x03: { /* AVE_S.D */
7429 DIP("AVE_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7430 t1
= newTemp(Ity_V128
);
7431 t2
= newTemp(Ity_V128
);
7432 t3
= newTemp(Ity_V128
);
7433 assign(t1
, getWReg(ws
));
7434 assign(t2
, getWReg(wt
));
7435 assign(t3
, binop(Iop_Add64x2
,
7438 mkexpr(t1
), mkU8(1)),
7440 mkexpr(t2
), mkU8(1))),
7448 putWReg(wd
, mkexpr(t3
));
7459 case 0x05: { /* AVE_U.df */
7461 case 0x00: { /* AVE_U.B */
7462 DIP("AVE_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7463 t1
= newTemp(Ity_V128
);
7464 t2
= newTemp(Ity_V128
);
7465 t3
= newTemp(Ity_V128
);
7466 assign(t1
, getWReg(ws
));
7467 assign(t2
, getWReg(wt
));
7468 assign(t3
, binop(Iop_Add16x8
,
7471 mkexpr(t1
), mkU8(1)),
7473 mkexpr(t2
), mkU8(1))),
7481 putWReg(wd
, mkexpr(t3
));
7485 case 0x01: { /* AVE_U.H */
7486 DIP("AVE_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7487 t1
= newTemp(Ity_V128
);
7488 t2
= newTemp(Ity_V128
);
7489 t3
= newTemp(Ity_V128
);
7490 assign(t1
, getWReg(ws
));
7491 assign(t2
, getWReg(wt
));
7492 assign(t3
, binop(Iop_Add16x8
,
7495 mkexpr(t1
), mkU8(1)),
7497 mkexpr(t2
), mkU8(1))),
7505 putWReg(wd
, mkexpr(t3
));
7509 case 0x02: { /* AVE_U.W */
7510 DIP("AVE_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7511 t1
= newTemp(Ity_V128
);
7512 t2
= newTemp(Ity_V128
);
7513 t3
= newTemp(Ity_V128
);
7514 assign(t1
, getWReg(ws
));
7515 assign(t2
, getWReg(wt
));
7516 assign(t3
, binop(Iop_Add32x4
,
7519 mkexpr(t1
), mkU8(1)),
7521 mkexpr(t2
), mkU8(1))),
7529 putWReg(wd
, mkexpr(t3
));
7533 case 0x03: { /* AVE_U.D */
7534 DIP("AVE_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7535 t1
= newTemp(Ity_V128
);
7536 t2
= newTemp(Ity_V128
);
7537 t3
= newTemp(Ity_V128
);
7538 assign(t1
, getWReg(ws
));
7539 assign(t2
, getWReg(wt
));
7540 assign(t3
, binop(Iop_Add64x2
,
7543 mkexpr(t1
), mkU8(1)),
7545 mkexpr(t2
), mkU8(1))),
7553 putWReg(wd
, mkexpr(t3
));
7564 case 0x06: { /* AVER_S.df */
7566 case 0x00: { /* AVER_S.B */
7567 DIP("AVER_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7568 t1
= newTemp(Ity_V128
);
7569 t2
= newTemp(Ity_V128
);
7570 t3
= newTemp(Ity_V128
);
7571 assign(t1
, getWReg(ws
));
7572 assign(t2
, getWReg(wt
));
7573 assign(t3
, binop(Iop_Avg8Sx16
, mkexpr(t1
), mkexpr(t2
)));
7574 putWReg(wd
, mkexpr(t3
));
7578 case 0x01: { /* AVER_S.H */
7579 DIP("AVER_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7580 t1
= newTemp(Ity_V128
);
7581 t2
= newTemp(Ity_V128
);
7582 t3
= newTemp(Ity_V128
);
7583 assign(t1
, getWReg(ws
));
7584 assign(t2
, getWReg(wt
));
7585 assign(t3
, binop(Iop_Avg16Sx8
, mkexpr(t1
), mkexpr(t2
)));
7586 putWReg(wd
, mkexpr(t3
));
7590 case 0x02: { /* AVER_S.W */
7591 DIP("AVER_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7592 t1
= newTemp(Ity_V128
);
7593 t2
= newTemp(Ity_V128
);
7594 t3
= newTemp(Ity_V128
);
7595 assign(t1
, getWReg(ws
));
7596 assign(t2
, getWReg(wt
));
7597 assign(t3
, binop(Iop_Avg32Sx4
, mkexpr(t1
), mkexpr(t2
)));
7598 putWReg(wd
, mkexpr(t3
));
7602 case 0x03: { /* AVER_S.D */
7603 DIP("AVER_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7604 t1
= newTemp(Ity_V128
);
7605 t2
= newTemp(Ity_V128
);
7606 t3
= newTemp(Ity_V128
);
7607 assign(t1
, getWReg(ws
));
7608 assign(t2
, getWReg(wt
));
7609 assign(t3
, binop(Iop_Add64x2
,
7612 mkexpr(t1
), mkU8(1)),
7614 mkexpr(t2
), mkU8(1))),
7622 putWReg(wd
, mkexpr(t3
));
7633 case 0x07: { /* AVER_U.df */
7635 case 0x00: { /* AVER_U.B */
7636 DIP("AVER_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7637 t1
= newTemp(Ity_V128
);
7638 t2
= newTemp(Ity_V128
);
7639 t3
= newTemp(Ity_V128
);
7640 assign(t1
, getWReg(ws
));
7641 assign(t2
, getWReg(wt
));
7642 assign(t3
, binop(Iop_Avg8Ux16
, mkexpr(t1
), mkexpr(t2
)));
7643 putWReg(wd
, mkexpr(t3
));
7647 case 0x01: { /* AVER_U.H */
7648 DIP("AVER_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7649 t1
= newTemp(Ity_V128
);
7650 t2
= newTemp(Ity_V128
);
7651 t3
= newTemp(Ity_V128
);
7652 assign(t1
, getWReg(ws
));
7653 assign(t2
, getWReg(wt
));
7654 assign(t3
, binop(Iop_Avg16Ux8
, mkexpr(t1
), mkexpr(t2
)));
7655 putWReg(wd
, mkexpr(t3
));
7659 case 0x02: { /* AVER_U.W */
7660 DIP("AVER_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7661 t1
= newTemp(Ity_V128
);
7662 t2
= newTemp(Ity_V128
);
7663 t3
= newTemp(Ity_V128
);
7664 assign(t1
, getWReg(ws
));
7665 assign(t2
, getWReg(wt
));
7666 assign(t3
, binop(Iop_Avg32Ux4
, mkexpr(t1
), mkexpr(t2
)));
7667 putWReg(wd
, mkexpr(t3
));
7671 case 0x03: { /* AVER_U.D */
7672 DIP("AVER_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7673 t1
= newTemp(Ity_V128
);
7674 t2
= newTemp(Ity_V128
);
7675 t3
= newTemp(Ity_V128
);
7676 assign(t1
, getWReg(ws
));
7677 assign(t2
, getWReg(wt
));
7678 assign(t3
, binop(Iop_Add64x2
,
7681 mkexpr(t1
), mkU8(1)),
7683 mkexpr(t2
), mkU8(1))),
7691 putWReg(wd
, mkexpr(t3
));
7709 static Int
msa_3R_11(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x11) */
7715 operation
= (cins
& 0x03800000) >> 23;
7716 df
= (cins
& 0x00600000) >> 21;
7717 wt
= (cins
& 0x001F0000) >> 16;
7719 switch (operation
) {
7720 case 0x00: { /* SUBS_S.df */
7722 case 0x00: { /* SUBS_S.B */
7723 DIP("SUBS_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7724 t1
= newTemp(Ity_V128
);
7725 t2
= newTemp(Ity_V128
);
7726 t3
= newTemp(Ity_V128
);
7727 assign(t1
, getWReg(ws
));
7728 assign(t2
, getWReg(wt
));
7729 assign(t3
, binop(Iop_QSub8Sx16
, mkexpr(t1
), mkexpr(t2
)));
7730 putWReg(wd
, mkexpr(t3
));
7734 case 0x01: { /* SUBS_S.H */
7735 DIP("SUBS_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7736 t1
= newTemp(Ity_V128
);
7737 t2
= newTemp(Ity_V128
);
7738 t3
= newTemp(Ity_V128
);
7739 assign(t1
, getWReg(ws
));
7740 assign(t2
, getWReg(wt
));
7741 assign(t3
, binop(Iop_QSub16Sx8
, mkexpr(t1
), mkexpr(t2
)));
7742 putWReg(wd
, mkexpr(t3
));
7746 case 0x02: { /* SUBS_S.W */
7747 DIP("SUBS_S.W w%d, w%d, w%d", wd
, ws
, wt
);
7748 t1
= newTemp(Ity_V128
);
7749 t2
= newTemp(Ity_V128
);
7750 t3
= newTemp(Ity_V128
);
7751 assign(t1
, getWReg(ws
));
7752 assign(t2
, getWReg(wt
));
7753 assign(t3
, binop(Iop_QSub32Sx4
, mkexpr(t1
), mkexpr(t2
)));
7754 putWReg(wd
, mkexpr(t3
));
7758 case 0x03: { /* SUBS_S.D */
7759 DIP("SUBS_S.D w%d, w%d, w%d", wd
, ws
, wt
);
7760 t1
= newTemp(Ity_V128
);
7761 t2
= newTemp(Ity_V128
);
7762 t3
= newTemp(Ity_V128
);
7763 assign(t1
, getWReg(ws
));
7764 assign(t2
, getWReg(wt
));
7765 assign(t3
, binop(Iop_QSub64Sx2
, mkexpr(t1
), mkexpr(t2
)));
7766 putWReg(wd
, mkexpr(t3
));
7777 case 0x01: { /* SUBS_U.df */
7779 case 0x00: { /* SUBS_U.B */
7780 DIP("SUBS_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7781 t1
= newTemp(Ity_V128
);
7782 t2
= newTemp(Ity_V128
);
7783 t3
= newTemp(Ity_V128
);
7784 assign(t1
, getWReg(ws
));
7785 assign(t2
, getWReg(wt
));
7786 assign(t3
, binop(Iop_QSub8Ux16
, mkexpr(t1
), mkexpr(t2
)));
7787 putWReg(wd
, mkexpr(t3
));
7791 case 0x01: { /* SUBS_U.H */
7792 DIP("SUBS_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7793 t1
= newTemp(Ity_V128
);
7794 t2
= newTemp(Ity_V128
);
7795 t3
= newTemp(Ity_V128
);
7796 assign(t1
, getWReg(ws
));
7797 assign(t2
, getWReg(wt
));
7798 assign(t3
, binop(Iop_QSub16Ux8
, mkexpr(t1
), mkexpr(t2
)));
7799 putWReg(wd
, mkexpr(t3
));
7803 case 0x02: { /* SUBS_U.W */
7804 DIP("SUBS_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7805 t1
= newTemp(Ity_V128
);
7806 t2
= newTemp(Ity_V128
);
7807 t3
= newTemp(Ity_V128
);
7808 assign(t1
, getWReg(ws
));
7809 assign(t2
, getWReg(wt
));
7810 assign(t3
, binop(Iop_QSub32Ux4
, mkexpr(t1
), mkexpr(t2
)));
7811 putWReg(wd
, mkexpr(t3
));
7815 case 0x03: { /* SUBS_U.D */
7816 DIP("SUBS_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7817 t1
= newTemp(Ity_V128
);
7818 t2
= newTemp(Ity_V128
);
7819 t3
= newTemp(Ity_V128
);
7820 assign(t1
, getWReg(ws
));
7821 assign(t2
, getWReg(wt
));
7822 assign(t3
, binop(Iop_QSub64Ux2
, mkexpr(t1
), mkexpr(t2
)));
7823 putWReg(wd
, mkexpr(t3
));
7834 case 0x02: { /* SUBSUS_U.df */
7836 case 0x00: { /* SUBSUS_U.B */
7837 DIP("SUBSUS_U.B w%d, w%d, w%d", wd
, ws
, wt
);
7838 t1
= newTemp(Ity_V128
);
7839 t2
= newTemp(Ity_V128
);
7840 t3
= newTemp(Ity_V128
);
7841 assign(t1
, binop(Iop_Sub8x16
, getWReg(ws
), getWReg(wt
)));
7842 assign(t2
, binop(Iop_SarN8x16
, getWReg(wt
), mkU8(7)));
7843 assign(t3
, binop(Iop_OrV128
,
7844 binop(Iop_CmpGT8Ux16
,
7847 binop(Iop_CmpEQ8x16
,
7853 mkexpr(t3
), mkexpr(t2
)),
7862 case 0x01: { /* SUBSUS_U.H */
7863 DIP("SUBSUS_U.H w%d, w%d, w%d", wd
, ws
, wt
);
7864 t1
= newTemp(Ity_V128
);
7865 t2
= newTemp(Ity_V128
);
7866 t3
= newTemp(Ity_V128
);
7867 assign(t1
, binop(Iop_Sub16x8
, getWReg(ws
), getWReg(wt
)));
7868 assign(t2
, binop(Iop_SarN16x8
, getWReg(wt
), mkU8(15)));
7871 binop(Iop_CmpGT16Ux8
,
7874 binop(Iop_CmpEQ16x8
,
7880 mkexpr(t3
), mkexpr(t2
)),
7889 case 0x02: { /* SUBSUS_U.W */
7890 DIP("SUBSUS_U.W w%d, w%d, w%d", wd
, ws
, wt
);
7891 t1
= newTemp(Ity_V128
);
7892 t2
= newTemp(Ity_V128
);
7893 t3
= newTemp(Ity_V128
);
7894 assign(t1
, binop(Iop_Sub32x4
, getWReg(ws
), getWReg(wt
)));
7895 assign(t2
, binop(Iop_SarN32x4
, getWReg(wt
), mkU8(31)));
7898 binop(Iop_CmpGT32Ux4
,
7901 binop(Iop_CmpEQ32x4
,
7907 mkexpr(t3
), mkexpr(t2
)),
7916 case 0x03: { /* SUBSUS_U.D */
7917 DIP("SUBSUS_U.D w%d, w%d, w%d", wd
, ws
, wt
);
7918 t1
= newTemp(Ity_V128
);
7919 t2
= newTemp(Ity_V128
);
7920 t3
= newTemp(Ity_V128
);
7921 assign(t1
, binop(Iop_Sub64x2
, getWReg(ws
), getWReg(wt
)));
7922 assign(t2
, binop(Iop_SarN64x2
, getWReg(wt
), mkU8(63)));
7925 binop(Iop_CmpGT64Ux2
,
7928 binop(Iop_CmpEQ64x2
,
7934 mkexpr(t3
), mkexpr(t2
)),
7950 case 0x03: { /* SUBSUU_S.df */
7952 case 0x00: { /* SUBSUU_S.B */
7953 DIP("SUBSUU_S.B w%d, w%d, w%d", wd
, ws
, wt
);
7954 t1
= newTemp(Ity_V128
);
7955 t2
= newTemp(Ity_V128
);
7956 t3
= newTemp(Ity_V128
);
7957 assign(t1
, binop(Iop_Sub8x16
, getWReg(ws
), getWReg(wt
)));
7971 getWReg(ws
), mkU8(7)),
7981 mkexpr(t2
), mkU8(7)),
7986 case 0x01: { /* SUBSUU_S.H */
7987 DIP("SUBSUU_S.H w%d, w%d, w%d", wd
, ws
, wt
);
7988 t1
= newTemp(Ity_V128
);
7989 t2
= newTemp(Ity_V128
);
7990 t3
= newTemp(Ity_V128
);
7991 assign(t1
, binop(Iop_Sub16x8
, getWReg(ws
), getWReg(wt
)));
8016 mkexpr(t2
), mkU8(15)),
8021 case 0x02: { /* SUBSUU_S.W */
8022 DIP("SUBSUU_S.W w%d, w%d, w%d", wd
, ws
, wt
);
8023 t1
= newTemp(Ity_V128
);
8024 t2
= newTemp(Ity_V128
);
8025 t3
= newTemp(Ity_V128
);
8026 assign(t1
, binop(Iop_Sub32x4
, getWReg(ws
), getWReg(wt
)));
8057 case 0x03: { /* SUBSUU_S.D */
8058 DIP("SUBSUU_S.D w%d, w%d, w%d", wd
, ws
, wt
);
8059 t1
= newTemp(Ity_V128
);
8060 t2
= newTemp(Ity_V128
);
8061 t3
= newTemp(Ity_V128
);
8062 assign(t1
, binop(Iop_Sub64x2
, getWReg(ws
), getWReg(wt
)));
8087 mkexpr(t2
), mkU8(63)),
8099 case 0x04: { /* ASUB_S.df */
8101 case 0x00: { /* ASUB_S.B */
8102 DIP("ASUB_S.B w%d, w%d, w%d", wd
, ws
, wt
);
8103 t1
= newTemp(Ity_V128
);
8104 t2
= newTemp(Ity_V128
);
8105 t3
= newTemp(Ity_V128
);
8106 assign(t1
, binop(Iop_SarN8x16
, getWReg(ws
), mkU8(7)));
8107 assign(t2
, binop(Iop_SarN8x16
, getWReg(wt
), mkU8(7)));
8108 assign(t3
, binop(Iop_Sub8x16
, getWReg(ws
), getWReg(wt
)));
8136 case 0x01: { /* ASUB_S.H */
8137 DIP("ASUB_S.H w%d, w%d, w%d", wd
, ws
, wt
);
8138 t1
= newTemp(Ity_V128
);
8139 t2
= newTemp(Ity_V128
);
8140 t3
= newTemp(Ity_V128
);
8141 assign(t1
, binop(Iop_SarN16x8
, getWReg(ws
), mkU8(15)));
8142 assign(t2
, binop(Iop_SarN16x8
, getWReg(wt
), mkU8(15)));
8143 assign(t3
, binop(Iop_Sub16x8
, getWReg(ws
), getWReg(wt
)));
8171 case 0x02: { /* ASUB_S.W */
8172 DIP("ASUB_S.W w%d, w%d, w%d", wd
, ws
, wt
);
8173 t1
= newTemp(Ity_V128
);
8174 t2
= newTemp(Ity_V128
);
8175 t3
= newTemp(Ity_V128
);
8176 assign(t1
, binop(Iop_SarN32x4
, getWReg(ws
), mkU8(31)));
8177 assign(t2
, binop(Iop_SarN32x4
, getWReg(wt
), mkU8(31)));
8178 assign(t3
, binop(Iop_Sub32x4
, getWReg(ws
), getWReg(wt
)));
8206 case 0x03: { /* ASUB_S.D */
8207 DIP("ASUB_S.D w%d, w%d, w%d", wd
, ws
, wt
);
8208 t1
= newTemp(Ity_V128
);
8209 t2
= newTemp(Ity_V128
);
8210 t3
= newTemp(Ity_V128
);
8211 assign(t1
, binop(Iop_SarN64x2
, getWReg(ws
), mkU8(63)));
8212 assign(t2
, binop(Iop_SarN64x2
, getWReg(wt
), mkU8(63)));
8213 assign(t3
, binop(Iop_Sub64x2
, getWReg(ws
), getWReg(wt
)));
8248 case 0x05: { /* ASUB_U.df */
8250 case 0x00: { /* ASUB_U.B */
8251 DIP("ASUB_U.B w%d, w%d, w%d", wd
, ws
, wt
);
8252 t1
= newTemp(Ity_V128
);
8253 t2
= newTemp(Ity_V128
);
8254 t3
= newTemp(Ity_V128
);
8255 assign(t1
, getWReg(ws
));
8256 assign(t2
, getWReg(wt
));
8260 mkexpr(t1
), mkexpr(t2
)),
8265 unop(Iop_NotV128
, mkexpr(t3
)),
8270 binop(Iop_AndV128
, mkexpr(t3
),
8281 case 0x01: { /* ASUB_U.H */
8282 DIP("ASUB_U.H w%d, w%d, w%d", wd
, ws
, wt
);
8283 t1
= newTemp(Ity_V128
);
8284 t2
= newTemp(Ity_V128
);
8285 t3
= newTemp(Ity_V128
);
8286 assign(t1
, getWReg(ws
));
8287 assign(t2
, getWReg(wt
));
8291 mkexpr(t1
), mkexpr(t2
)),
8314 case 0x02: { /* ASUB_U.W */
8315 DIP("ASUB_U.W w%d, w%d, w%d", wd
, ws
, wt
);
8316 t1
= newTemp(Ity_V128
);
8317 t2
= newTemp(Ity_V128
);
8318 t3
= newTemp(Ity_V128
);
8319 assign(t1
, getWReg(ws
));
8320 assign(t2
, getWReg(wt
));
8324 mkexpr(t1
), mkexpr(t2
)),
8329 unop(Iop_NotV128
, mkexpr(t3
)),
8346 case 0x03: { /* ASUB_U.D */
8347 DIP("ASUB_U.D w%d, w%d, w%d", wd
, ws
, wt
);
8348 t1
= newTemp(Ity_V128
);
8349 t2
= newTemp(Ity_V128
);
8350 t3
= newTemp(Ity_V128
);
8351 assign(t1
, getWReg(ws
));
8352 assign(t2
, getWReg(wt
));
8356 mkexpr(t1
), mkexpr(t2
)),
8361 unop(Iop_NotV128
, mkexpr(t3
)),
8392 static Int
msa_3R_12(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x12) */
8394 IRTemp t1
, t2
, t3
, t4
, t5
, t6
;
8398 operation
= (cins
& 0x03800000) >> 23;
8399 df
= (cins
& 0x00600000) >> 21;
8400 wt
= (cins
& 0x001F0000) >> 16;
8402 switch (operation
) {
8403 case 0x00: { /* MULV.df */
8405 case 0x00: { /* MULV.B */
8406 DIP("MULV.B w%d, w%d, w%d", wd
, ws
, wt
);
8407 putWReg(wd
, binop(Iop_Mul8x16
, getWReg(ws
), getWReg(wt
)));
8411 case 0x01: { /* MULV.H */
8412 DIP("MULV.H w%d, w%d, w%d", wd
, ws
, wt
);
8413 putWReg(wd
, binop(Iop_Mul16x8
, getWReg(ws
), getWReg(wt
)));
8417 case 0x02: { /* MULV.W */
8418 DIP("MULV.W w%d, w%d, w%d", wd
, ws
, wt
);
8419 putWReg(wd
, binop(Iop_Mul32x4
, getWReg(ws
), getWReg(wt
)));
8423 case 0x03: { /* MULV.D */
8424 DIP("MULV.D w%d, w%d, w%d", wd
, ws
, wt
);
8425 t1
= newTemp(Ity_V128
);
8426 t2
= newTemp(Ity_V128
);
8427 assign(t1
, getWReg(ws
));
8428 assign(t2
, getWReg(wt
));
8430 binop(Iop_64HLtoV128
,
8432 unop(Iop_V128HIto64
,
8434 unop(Iop_V128HIto64
,
8451 case 0x01: { /* MADDV.df */
8453 case 0x00: { /* MADDV.B */
8454 DIP("MADDV.B w%d, w%d, w%d", wd
, ws
, wt
);
8464 case 0x01: { /* MADDV.H */
8465 DIP("MADDV.H w%d, w%d, w%d", wd
, ws
, wt
);
8475 case 0x02: { /* MADDV.W */
8476 DIP("MADDV.W w%d, w%d, w%d", wd
, ws
, wt
);
8486 case 0x03: { /* MADDV.D */
8487 DIP("MADDV.D w%d, w%d, w%d", wd
, ws
, wt
);
8488 t1
= newTemp(Ity_V128
);
8489 t2
= newTemp(Ity_V128
);
8490 assign(t1
, getWReg(ws
));
8491 assign(t2
, getWReg(wt
));
8495 binop(Iop_64HLtoV128
,
8497 unop(Iop_V128HIto64
,
8499 unop(Iop_V128HIto64
,
8516 case 0x02: { /* MSUBV.df */
8518 case 0x00: { /* MSUBV.B */
8519 DIP("MSUBV.B w%d, w%d, w%d", wd
, ws
, wt
);
8529 case 0x01: { /* MSUBV.H */
8530 DIP("MSUBV.H w%d, w%d, w%d", wd
, ws
, wt
);
8540 case 0x02: { /* MSUBV.W */
8541 DIP("MSUBV.W w%d, w%d, w%d", wd
, ws
, wt
);
8551 case 0x03: { /* MSUBV.D */
8552 DIP("MSUBV.D w%d, w%d, w%d", wd
, ws
, wt
);
8553 t1
= newTemp(Ity_V128
);
8554 t2
= newTemp(Ity_V128
);
8555 assign(t1
, getWReg(ws
));
8556 assign(t2
, getWReg(wt
));
8560 binop(Iop_64HLtoV128
,
8562 unop(Iop_V128HIto64
,
8564 unop(Iop_V128HIto64
,
8581 case 0x04: { /* DIV_S.df */
8582 t1
= newTemp(Ity_V128
);
8583 t2
= newTemp(Ity_V128
);
8584 assign(t1
, getWReg(ws
));
8585 assign(t2
, getWReg(wt
));
8588 case 0x00: { /* DIV_S.B */
8589 DIP("DIV_S.B w%d, w%d, w%d", wd
, ws
, wt
);
8593 for (i
= 0; i
< 16; i
++) {
8594 tmp
[i
] = newTemp(Ity_I32
);
8601 binop(Iop_GetElem8x16
,
8605 binop(Iop_GetElem8x16
,
8608 mkU8((i
& 3) << 3)));
8612 binop(Iop_64HLtoV128
,
8647 case 0x01: { /* DIV_S.H */
8648 DIP("DIV_S.H w%d, w%d, w%d", wd
, ws
, wt
);
8652 for (i
= 0; i
< 8; i
++) {
8653 tmp
[i
] = newTemp(Ity_I32
);
8660 binop(Iop_GetElem16x8
,
8664 binop(Iop_GetElem16x8
,
8667 mkU8((i
& 1) << 4)));
8671 binop(Iop_64HLtoV128
,
8689 case 0x02: { /* DIV_S.W */
8690 DIP("DIV_S.W w%d, w%d, w%d", wd
, ws
, wt
);
8694 for (i
= 0; i
< 4; i
++) {
8695 tmp
[i
] = newTemp(Ity_I32
);
8698 binop(Iop_GetElem32x4
,
8699 mkexpr(t1
), mkU8(i
)),
8700 binop(Iop_GetElem32x4
,
8701 mkexpr(t2
), mkU8(i
))));
8705 binop(Iop_64HLtoV128
, \
8715 case 0x03: { /* DIV_S.D */
8716 DIP("DIV_S.D w%d, w%d, w%d", wd
, ws
, wt
);
8718 binop(Iop_64HLtoV128
,
8720 unop(Iop_V128HIto64
,
8722 unop(Iop_V128HIto64
,
8739 case 0x05: { /* DIV_U.df */
8740 t1
= newTemp(Ity_V128
);
8741 t2
= newTemp(Ity_V128
);
8742 assign(t1
, getWReg(ws
));
8743 assign(t2
, getWReg(wt
));
8746 case 0x00: { /* DIV_U.B */
8747 DIP("DIV_U.B w%d, w%d, w%d", wd
, ws
, wt
);
8751 for (i
= 0; i
< 16; i
++) {
8752 tmp
[i
] = newTemp(Ity_I32
);
8759 binop(Iop_GetElem8x16
,
8763 binop(Iop_GetElem8x16
,
8766 mkU8((i
& 3) << 3)));
8770 binop(Iop_64HLtoV128
,
8805 case 0x01: { /* DIV_U.H */
8806 DIP("DIV_U.H w%d, w%d, w%d", wd
, ws
, wt
);
8810 for (i
= 0; i
< 8; i
++) {
8811 tmp
[i
] = newTemp(Ity_I32
);
8818 binop(Iop_GetElem16x8
,
8822 binop(Iop_GetElem16x8
,
8825 mkU8((i
& 1) << 4)));
8829 binop(Iop_64HLtoV128
,
8847 case 0x02: { /* DIV_U.W */
8848 DIP("DIV_U.W w%d, w%d, w%d", wd
, ws
, wt
);
8852 for (i
= 0; i
< 4; i
++) {
8853 tmp
[i
] = newTemp(Ity_I32
);
8856 binop(Iop_GetElem32x4
,
8857 mkexpr(t1
), mkU8(i
)),
8858 binop(Iop_GetElem32x4
,
8859 mkexpr(t2
), mkU8(i
))));
8863 binop(Iop_64HLtoV128
,
8873 case 0x03: { /* DIV_U.D */
8874 DIP("DIV_U.D w%d, w%d, w%d", wd
, ws
, wt
);
8876 binop(Iop_64HLtoV128
,
8878 unop(Iop_V128HIto64
,
8880 unop(Iop_V128HIto64
,
8897 case 0x06: { /* MOD_S.df */
8898 t1
= newTemp(Ity_V128
);
8899 t2
= newTemp(Ity_V128
);
8900 assign(t1
, getWReg(ws
));
8901 assign(t2
, getWReg(wt
));
8904 case 0x00: { /* MOD_S.B */
8905 DIP("MOD_S.B w%d, w%d, w%d", wd
, ws
, wt
);
8909 for (i
= 0; i
< 16; i
++) {
8910 tmp
[i
] = newTemp(Ity_I32
);
8916 binop(Iop_DivModS32to32
,
8918 binop(Iop_GetElem8x16
,
8922 binop(Iop_GetElem8x16
,
8925 mkU8((i
& 3) << 3)));
8929 binop(Iop_64HLtoV128
,
8959 mkexpr(tmp
[0])))))));
8963 case 0x01: { /* MOD_S.H */
8964 DIP("MOD_S.H w%d, w%d, w%d", wd
, ws
, wt
);
8968 for (i
= 0; i
< 8; i
++) {
8969 tmp
[i
] = newTemp(Ity_I32
);
8975 binop(Iop_DivModS32to32
,
8977 binop(Iop_GetElem16x8
,
8981 binop(Iop_GetElem16x8
,
8984 mkU8((i
& 1) << 4)));
8988 binop(Iop_64HLtoV128
,
9006 case 0x02: { /* MOD_S.W */
9007 DIP("MOD_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9011 for (i
= 0; i
< 4; i
++) {
9012 tmp
[i
] = newTemp(Ity_I32
);
9015 binop(Iop_DivModS32to32
,
9016 binop(Iop_GetElem32x4
,
9019 binop(Iop_GetElem32x4
,
9025 binop(Iop_64HLtoV128
,
9035 case 0x03: { /* MOD_S.D */
9036 DIP("MOD_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9037 t3
= newTemp(Ity_I64
);
9038 t4
= newTemp(Ity_I64
);
9039 t5
= newTemp(Ity_I64
);
9040 t6
= newTemp(Ity_I64
);
9041 assign(t3
, unop(Iop_V128HIto64
, mkexpr(t1
)));
9042 assign(t4
, unop(Iop_V128HIto64
, mkexpr(t2
)));
9043 assign(t5
, unop(Iop_V128to64
, mkexpr(t1
)));
9044 assign(t6
, unop(Iop_V128to64
, mkexpr(t2
)));
9046 binop(Iop_64HLtoV128
,
9071 case 0x07: { /* MOD_U.df */
9072 t1
= newTemp(Ity_V128
);
9073 t2
= newTemp(Ity_V128
);
9074 assign(t1
, getWReg(ws
));
9075 assign(t2
, getWReg(wt
));
9078 case 0x00: { /* MOD_U.B */
9079 DIP("MOD_U.B w%d, w%d, w%d", wd
, ws
, wt
);
9083 for (i
= 0; i
< 16; i
++) {
9084 tmp
[i
] = newTemp(Ity_I32
);
9090 binop(Iop_DivModU32to32
,
9092 binop(Iop_GetElem8x16
,
9096 binop(Iop_GetElem8x16
,
9099 mkU8((i
& 3) << 3)));
9103 binop(Iop_64HLtoV128
,
9133 mkexpr(tmp
[0])))))));
9137 case 0x01: { /* MOD_U.H */
9138 DIP("MOD_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9142 for (i
= 0; i
< 8; i
++) {
9143 tmp
[i
] = newTemp(Ity_I32
);
9149 binop(Iop_DivModU32to32
,
9151 binop(Iop_GetElem16x8
,
9155 binop(Iop_GetElem16x8
,
9158 mkU8((i
& 1) << 4)));
9162 binop(Iop_64HLtoV128
,
9180 case 0x02: { /* MOD_U.W */
9181 DIP("MOD_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9185 for (i
= 0; i
< 4; i
++) {
9186 tmp
[i
] = newTemp(Ity_I32
);
9189 binop(Iop_DivModU32to32
,
9190 binop(Iop_GetElem32x4
,
9193 binop(Iop_GetElem32x4
,
9199 binop(Iop_64HLtoV128
,
9209 case 0x03: { /* MOD_U.D */
9210 DIP("MOD_U.D w%d, w%d, w%d", wd
, ws
, wt
);
9211 t3
= newTemp(Ity_I64
);
9212 t4
= newTemp(Ity_I64
);
9213 t5
= newTemp(Ity_I64
);
9214 t6
= newTemp(Ity_I64
);
9215 assign(t3
, unop(Iop_V128HIto64
, mkexpr(t1
)));
9216 assign(t4
, unop(Iop_V128HIto64
, mkexpr(t2
)));
9217 assign(t5
, unop(Iop_V128to64
, mkexpr(t1
)));
9218 assign(t6
, unop(Iop_V128to64
, mkexpr(t2
)));
9220 binop(Iop_64HLtoV128
,
9252 static Int
msa_3R_13(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x13) */
9258 operation
= (cins
& 0x03800000) >> 23;
9259 df
= (cins
& 0x00600000) >> 21;
9260 wt
= (cins
& 0x001F0000) >> 16;
9262 switch (operation
) {
9263 case 0x00: { /* DOTP_S.df */
9264 t1
= newTemp(Ity_V128
);
9265 t2
= newTemp(Ity_V128
);
9266 assign(t1
, getWReg(ws
));
9267 assign(t2
, getWReg(wt
));
9270 case 0x01: { /* DOTP_S.H */
9271 DIP("DOTP_S.H w%d, w%d, w%d", wd
, ws
, wt
);
9275 for (i
= 0; i
< 8; i
++) {
9276 tmp
[i
] = newTemp(Ity_I16
);
9280 binop(Iop_GetElem8x16
,
9283 binop(Iop_GetElem8x16
,
9287 binop(Iop_GetElem8x16
,
9290 binop(Iop_GetElem8x16
,
9292 mkU8(2 * i
+ 1)))));
9296 binop(Iop_64HLtoV128
,
9314 case 0x02: { /* DOTP_S.W */
9315 DIP("DOTP_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9319 for (i
= 0; i
< 4; i
++) {
9320 tmp
[i
] = newTemp(Ity_I32
);
9324 binop(Iop_GetElem16x8
,
9327 binop(Iop_GetElem16x8
,
9331 binop(Iop_GetElem16x8
,
9334 binop(Iop_GetElem16x8
,
9336 mkU8(2 * i
+ 1)))));
9340 binop(Iop_64HLtoV128
,
9350 case 0x03: { /* DOTP_S.D */
9351 DIP("DOTP_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9355 for (i
= 0; i
< 2; i
++) {
9356 tmp
[i
] = newTemp(Ity_I64
);
9360 binop(Iop_GetElem32x4
,
9363 binop(Iop_GetElem32x4
,
9367 binop(Iop_GetElem32x4
,
9370 binop(Iop_GetElem32x4
,
9372 mkU8(2 * i
+ 1)))));
9376 binop(Iop_64HLtoV128
,
9377 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
9388 case 0x01: { /* DOTP_U.df */
9389 t1
= newTemp(Ity_V128
);
9390 t2
= newTemp(Ity_V128
);
9391 assign(t1
, getWReg(ws
));
9392 assign(t2
, getWReg(wt
));
9395 case 0x01: { /* DOTP_U.H */
9396 DIP("DOTP_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9400 for (i
= 0; i
< 8; i
++) {
9401 tmp
[i
] = newTemp(Ity_I16
);
9405 binop(Iop_GetElem8x16
,
9408 binop(Iop_GetElem8x16
,
9412 binop(Iop_GetElem8x16
,
9415 binop(Iop_GetElem8x16
,
9417 mkU8(2 * i
+ 1)))));
9421 binop(Iop_64HLtoV128
,
9439 case 0x02: { /* DOTP_U.W */
9440 DIP("DOTP_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9444 for (i
= 0; i
< 4; i
++) {
9445 tmp
[i
] = newTemp(Ity_I32
);
9449 binop(Iop_GetElem16x8
,
9452 binop(Iop_GetElem16x8
,
9456 binop(Iop_GetElem16x8
,
9459 binop(Iop_GetElem16x8
,
9461 mkU8(2 * i
+ 1)))));
9465 binop(Iop_64HLtoV128
,
9475 case 0x03: { /* DOTP_U.D */
9476 DIP("DOTP_U.D w%d, w%d, w%d", wd
, ws
, wt
);
9480 for (i
= 0; i
< 2; i
++) {
9481 tmp
[i
] = newTemp(Ity_I64
);
9485 binop(Iop_GetElem32x4
,
9488 binop(Iop_GetElem32x4
,
9492 binop(Iop_GetElem32x4
,
9495 binop(Iop_GetElem32x4
,
9497 mkU8(2 * i
+ 1)))));
9501 binop(Iop_64HLtoV128
,
9502 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
9513 case 0x02: { /* DPADD_S.df */
9514 t1
= newTemp(Ity_V128
);
9515 t2
= newTemp(Ity_V128
);
9516 assign(t1
, getWReg(ws
));
9517 assign(t2
, getWReg(wt
));
9520 case 0x01: { /* DPADD_S.H */
9521 DIP("DPADD_S.H w%d, w%d, w%d", wd
, ws
, wt
);
9525 for (i
= 0; i
< 8; i
++) {
9526 tmp
[i
] = newTemp(Ity_I16
);
9530 binop(Iop_GetElem8x16
,
9533 binop(Iop_GetElem8x16
,
9537 binop(Iop_GetElem8x16
,
9540 binop(Iop_GetElem8x16
,
9542 mkU8(2 * i
+ 1)))));
9548 binop(Iop_64HLtoV128
,
9562 mkexpr(tmp
[0]))))));
9566 case 0x02: { /* DPADD_S.W */
9567 DIP("DPADD_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9571 for (i
= 0; i
< 4; i
++) {
9572 tmp
[i
] = newTemp(Ity_I32
);
9576 binop(Iop_GetElem16x8
,
9579 binop(Iop_GetElem16x8
,
9583 binop(Iop_GetElem16x8
,
9586 binop(Iop_GetElem16x8
,
9588 mkU8(2 * i
+ 1)))));
9594 binop(Iop_64HLtoV128
,
9604 case 0x03: { /* DPADD_S.D */
9605 DIP("DPADD_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9609 for (i
= 0; i
< 2; i
++) {
9610 tmp
[i
] = newTemp(Ity_I64
);
9614 binop(Iop_GetElem32x4
,
9617 binop(Iop_GetElem32x4
,
9621 binop(Iop_GetElem32x4
,
9624 binop(Iop_GetElem32x4
,
9626 mkU8(2 * i
+ 1)))));
9632 binop(Iop_64HLtoV128
,
9645 case 0x03: { /* DPADD_U.df */
9646 t1
= newTemp(Ity_V128
);
9647 t2
= newTemp(Ity_V128
);
9648 assign(t1
, getWReg(ws
));
9649 assign(t2
, getWReg(wt
));
9652 case 0x01: { /* DPADD_U.H */
9653 DIP("DPADD_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9657 for (i
= 0; i
< 8; i
++) {
9658 tmp
[i
] = newTemp(Ity_I16
);
9662 binop(Iop_GetElem8x16
,
9665 binop(Iop_GetElem8x16
,
9669 binop(Iop_GetElem8x16
,
9672 binop(Iop_GetElem8x16
,
9674 mkU8(2 * i
+ 1)))));
9680 binop(Iop_64HLtoV128
,
9694 mkexpr(tmp
[0]))))));
9698 case 0x02: { /* DPADD_U.W */
9699 DIP("DPADD_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9703 for (i
= 0; i
< 4; i
++) {
9704 tmp
[i
] = newTemp(Ity_I32
);
9708 binop(Iop_GetElem16x8
,
9711 binop(Iop_GetElem16x8
,
9715 binop(Iop_GetElem16x8
,
9718 binop(Iop_GetElem16x8
,
9720 mkU8(2 * i
+ 1)))));
9726 binop(Iop_64HLtoV128
,
9736 case 0x03: { /* DPADD_U.D */
9737 DIP("DPADD_U.D w%d, w%d, w%d", wd
, ws
, wt
);
9741 for (i
= 0; i
< 2; i
++) {
9742 tmp
[i
] = newTemp(Ity_I64
);
9746 binop(Iop_GetElem32x4
,
9749 binop(Iop_GetElem32x4
,
9753 binop(Iop_GetElem32x4
,
9756 binop(Iop_GetElem32x4
,
9758 mkU8(2 * i
+ 1)))));
9764 binop(Iop_64HLtoV128
,
9777 case 0x04: { /* DPSUB_S.df */
9778 t1
= newTemp(Ity_V128
);
9779 t2
= newTemp(Ity_V128
);
9780 assign(t1
, getWReg(ws
));
9781 assign(t2
, getWReg(wt
));
9784 case 0x01: { /* DPSUB_S.H */
9785 DIP("DPSUB_S.H w%d, w%d, w%d", wd
, ws
, wt
);
9789 for (i
= 0; i
< 8; i
++) {
9790 tmp
[i
] = newTemp(Ity_I16
);
9794 binop(Iop_GetElem8x16
,
9797 binop(Iop_GetElem8x16
,
9801 binop(Iop_GetElem8x16
,
9804 binop(Iop_GetElem8x16
,
9806 mkU8(2 * i
+ 1)))));
9812 binop(Iop_64HLtoV128
,
9826 mkexpr(tmp
[0]))))));
9830 case 0x02: { /* DPSUB_S.W */
9831 DIP("DPSUB_S.W w%d, w%d, w%d", wd
, ws
, wt
);
9835 for (i
= 0; i
< 4; i
++) {
9836 tmp
[i
] = newTemp(Ity_I32
);
9840 binop(Iop_GetElem16x8
,
9843 binop(Iop_GetElem16x8
,
9847 binop(Iop_GetElem16x8
,
9850 binop(Iop_GetElem16x8
,
9852 mkU8(2 * i
+ 1)))));
9858 binop(Iop_64HLtoV128
,
9868 case 0x03: { /* DPSUB_S.D */
9869 DIP("DPSUB_S.D w%d, w%d, w%d", wd
, ws
, wt
);
9873 for (i
= 0; i
< 2; i
++) {
9874 tmp
[i
] = newTemp(Ity_I64
);
9878 binop(Iop_GetElem32x4
,
9881 binop(Iop_GetElem32x4
,
9885 binop(Iop_GetElem32x4
,
9888 binop(Iop_GetElem32x4
,
9890 mkU8(2 * i
+ 1)))));
9896 binop(Iop_64HLtoV128
,
9909 case 0x05: { /* DPSUB_U.df */
9910 t1
= newTemp(Ity_V128
);
9911 t2
= newTemp(Ity_V128
);
9912 assign(t1
, getWReg(ws
));
9913 assign(t2
, getWReg(wt
));
9916 case 0x01: { /* DPSUB_U.H */
9917 DIP("DPSUB_U.H w%d, w%d, w%d", wd
, ws
, wt
);
9921 for (i
= 0; i
< 8; i
++) {
9922 tmp
[i
] = newTemp(Ity_I16
);
9926 binop(Iop_GetElem8x16
,
9929 binop(Iop_GetElem8x16
,
9933 binop(Iop_GetElem8x16
,
9936 binop(Iop_GetElem8x16
,
9938 mkU8(2 * i
+ 1)))));
9944 binop(Iop_64HLtoV128
,
9958 mkexpr(tmp
[0]))))));
9962 case 0x02: { /* DPSUB_U.W */
9963 DIP("DPSUB_U.W w%d, w%d, w%d", wd
, ws
, wt
);
9967 for (i
= 0; i
< 4; i
++) {
9968 tmp
[i
] = newTemp(Ity_I32
);
9972 binop(Iop_GetElem16x8
,
9975 binop(Iop_GetElem16x8
,
9979 binop(Iop_GetElem16x8
,
9982 binop(Iop_GetElem16x8
,
9984 mkU8(2 * i
+ 1)))));
9990 binop(Iop_64HLtoV128
,
10000 case 0x03: { /* DPSUB_U.D */
10001 DIP("DPSUB_U.D w%d, w%d, w%d", wd
, ws
, wt
);
10005 for (i
= 0; i
< 2; i
++) {
10006 tmp
[i
] = newTemp(Ity_I64
);
10010 binop(Iop_GetElem32x4
,
10013 binop(Iop_GetElem32x4
,
10017 binop(Iop_GetElem32x4
,
10020 binop(Iop_GetElem32x4
,
10022 mkU8(2 * i
+ 1)))));
10028 binop(Iop_64HLtoV128
,
10048 static Int
msa_3R_14(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x14) */
10050 IRTemp t1
, t2
, t3
, t4
;
10055 operation
= (cins
& 0x03800000) >> 23;
10056 df
= (cins
& 0x00600000) >> 21;
10057 wt
= (cins
& 0x001F0000) >> 16;
10058 ty
= mode64
? Ity_I64
: Ity_I32
;
10060 switch (operation
) {
10061 case 0x00: { /* SLD.df */
10064 DIP("SLD.B w%d, w%d[%d]", wd
, ws
, wt
);
10065 t1
= newTemp(Ity_I32
);
10066 t2
= newTemp(Ity_V128
);
10067 t3
= newTemp(Ity_V128
);
10078 unop(Iop_32to8
, mkexpr(t1
))));
10088 mkexpr(t2
), mkexpr(t3
)));
10092 case 0x01: {/* SLD.H */
10093 DIP("SLD.H w%d, w%d[%d]", wd
, ws
, wt
);
10094 t1
= newTemp(Ity_I32
);
10095 t2
= newTemp(Ity_I64
);
10096 t3
= newTemp(Ity_V128
);
10097 t4
= newTemp(Ity_V128
);
10106 binop(Iop_32HLto64
, mkU32(0), mkexpr(t1
)));
10110 binop(Iop_64HLtoV128
,
10111 mkexpr(t2
), mkexpr(t2
))));
10116 binop(Iop_64HLtoV128
,
10119 binop(Iop_64HLtoV128
,
10127 mkexpr(t1
), mkU32(0)),
10129 binop(Iop_64HLtoV128
,
10130 mkU64(0), mkU64(0)))));
10134 case 0x02: {/* SLD.W */
10135 DIP("SLD.W w%d, w%d[%d]", wd
, ws
, wt
);
10136 t1
= newTemp(Ity_I32
);
10137 t2
= newTemp(Ity_I64
);
10138 t3
= newTemp(Ity_V128
);
10139 t4
= newTemp(Ity_V128
);
10148 binop(Iop_32HLto64
,
10149 mkexpr(t1
), mkexpr(t1
)));
10153 binop(Iop_64HLtoV128
,
10154 mkexpr(t2
), mkexpr(t2
))));
10159 binop(Iop_64HLtoV128
,
10160 mkU64(0x2000000020ul
),
10161 mkU64(0x2000000020ul
)),
10162 binop(Iop_64HLtoV128
,
10170 mkexpr(t1
), mkU32(0)),
10172 binop(Iop_64HLtoV128
,
10173 mkU64(0), mkU64(0)))));
10177 case 0x03: { /* SLD.D */
10178 DIP("SLD.D w%d, w%d[%d]", wd
, ws
, wt
);
10179 t1
= newTemp(Ity_I32
);
10180 t2
= newTemp(Ity_I64
);
10181 t3
= newTemp(Ity_V128
);
10182 t4
= newTemp(Ity_V128
);
10191 binop(Iop_32HLto64
,
10195 mkexpr(t1
), mkU8(16))),
10199 mkexpr(t1
), mkU8(16)))));
10203 binop(Iop_64HLtoV128
,
10204 mkexpr(t2
), mkexpr(t2
))));
10209 binop(Iop_64HLtoV128
,
10210 mkU64(0x10001000100010ul
),
10211 mkU64(0x10001000100010ul
)),
10212 binop(Iop_64HLtoV128
,
10220 mkexpr(t1
), mkU32(0)),
10222 binop(Iop_64HLtoV128
,
10223 mkU64(0), mkU64(0)))));
10231 case 0x01: { /* SPLAT.df */
10235 case 0x00: { /* SPLAT.B */
10236 DIP("SPLAT.B w%d, w%d, w%d", wd
, ws
, wt
);
10237 t1
= newTemp(Ity_V128
);
10238 t2
= newTemp(Ity_I32
);
10239 assign(t1
, getWReg(ws
));
10241 mkNarrowTo32(ty
, getIReg(wt
)));
10244 for (i
= 0; i
< 16; i
++) {
10245 tmp
[i
] = newTemp(Ity_I8
);
10247 binop(Iop_GetElem8x16
,
10249 unop(Iop_32to8
, mkexpr(t2
))));
10253 binop(Iop_64HLtoV128
,
10254 binop(Iop_32HLto64
,
10255 binop(Iop_16HLto32
,
10262 binop(Iop_16HLto32
,
10269 binop(Iop_32HLto64
,
10270 binop(Iop_16HLto32
,
10277 binop(Iop_16HLto32
,
10283 mkexpr(tmp
[0]))))));
10287 case 0x01: { /* SPLAT.H */
10288 DIP("SPLAT.H w%d, w%d, w%d", wd
, ws
, wt
);
10289 t1
= newTemp(Ity_V128
);
10290 t2
= newTemp(Ity_I32
);
10291 assign(t1
, getWReg(ws
));
10293 mkNarrowTo32(ty
, getIReg(wt
)));
10296 for (i
= 0; i
< 8; i
++) {
10297 tmp
[i
] = newTemp(Ity_I16
);
10299 binop(Iop_GetElem16x8
,
10301 unop(Iop_32to8
, mkexpr(t2
))));
10305 binop(Iop_64HLtoV128
,
10306 binop(Iop_32HLto64
,
10307 binop(Iop_16HLto32
,
10310 binop(Iop_16HLto32
,
10313 binop(Iop_32HLto64
,
10314 binop(Iop_16HLto32
,
10317 binop(Iop_16HLto32
,
10319 mkexpr(tmp
[0])))));
10323 case 0x02: { /* SPLAT.W */
10324 DIP("SPLAT.W w%d, w%d, w%d", wd
, ws
, wt
);
10325 t1
= newTemp(Ity_V128
);
10326 t2
= newTemp(Ity_I32
);
10327 assign(t1
, getWReg(ws
));
10329 mkNarrowTo32(ty
, getIReg(wt
)));
10332 for (i
= 0; i
< 4; i
++) {
10333 tmp
[i
] = newTemp(Ity_I32
);
10335 binop(Iop_GetElem32x4
,
10337 unop(Iop_32to8
, mkexpr(t2
))));
10341 binop(Iop_64HLtoV128
,
10342 binop(Iop_32HLto64
,
10345 binop(Iop_32HLto64
,
10351 case 0x03: { /* SPLAT.D */
10352 DIP("SPLAT.D w%d, w%d, w%d", wd
, ws
, wt
);
10353 t1
= newTemp(Ity_V128
);
10354 t2
= newTemp(Ity_I32
);
10355 assign(t1
, getWReg(ws
));
10357 mkNarrowTo32(ty
, getIReg(wt
)));
10360 for (i
= 0; i
< 2; i
++) {
10361 tmp
[i
] = newTemp(Ity_I64
);
10363 binop(Iop_GetElem64x2
,
10365 unop(Iop_32to8
, mkexpr(t2
))));
10369 binop(Iop_64HLtoV128
,
10370 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
10378 case 0x02: { /* PCKEV.df */
10380 case 0x00: { /* PCKEV.B */
10381 DIP("PCKEV.B w%d, w%d, w%d", wd
, ws
, wt
);
10382 t1
= newTemp(Ity_V128
);
10383 t2
= newTemp(Ity_V128
);
10384 t3
= newTemp(Ity_V128
);
10385 assign(t1
, getWReg(ws
));
10386 assign(t2
, getWReg(wt
));
10388 binop(Iop_PackEvenLanes8x16
,
10389 mkexpr(t1
), mkexpr(t2
)));
10390 putWReg(wd
, mkexpr(t3
));
10394 case 0x01: { /* PCKEV.H */
10395 DIP("PCKEV.H w%d, w%d, w%d", wd
, ws
, wt
);
10396 t1
= newTemp(Ity_V128
);
10397 t2
= newTemp(Ity_V128
);
10398 t3
= newTemp(Ity_V128
);
10399 assign(t1
, getWReg(ws
));
10400 assign(t2
, getWReg(wt
));
10402 binop(Iop_PackEvenLanes16x8
,
10403 mkexpr(t1
), mkexpr(t2
)));
10404 putWReg(wd
, mkexpr(t3
));
10408 case 0x02: { /* PCKEV.W */
10409 DIP("PCKEV.W w%d, w%d, w%d", wd
, ws
, wt
);
10410 t1
= newTemp(Ity_V128
);
10411 t2
= newTemp(Ity_V128
);
10412 t3
= newTemp(Ity_V128
);
10413 assign(t1
, getWReg(ws
));
10414 assign(t2
, getWReg(wt
));
10416 binop(Iop_PackEvenLanes32x4
,
10417 mkexpr(t1
), mkexpr(t2
)));
10418 putWReg(wd
, mkexpr(t3
));
10422 case 0x03: { /* PCKEV.D */
10423 DIP("PCKEV.D w%d, w%d, w%d", wd
, ws
, wt
);
10424 t1
= newTemp(Ity_V128
);
10425 t2
= newTemp(Ity_V128
);
10426 t3
= newTemp(Ity_V128
);
10427 assign(t1
, getWReg(ws
));
10428 assign(t2
, getWReg(wt
));
10430 binop(Iop_InterleaveLO64x2
,
10431 mkexpr(t1
), mkexpr(t2
)));
10432 putWReg(wd
, mkexpr(t3
));
10443 case 0x03: { /* PCKOD.df */
10445 case 0x00: { /* PCKOD.B */
10446 DIP("PCKOD.B w%d, w%d, w%d", wd
, ws
, wt
);
10447 t1
= newTemp(Ity_V128
);
10448 t2
= newTemp(Ity_V128
);
10449 t3
= newTemp(Ity_V128
);
10450 assign(t1
, getWReg(ws
));
10451 assign(t2
, getWReg(wt
));
10453 binop(Iop_PackOddLanes8x16
,
10454 mkexpr(t1
), mkexpr(t2
)));
10455 putWReg(wd
, mkexpr(t3
));
10459 case 0x01: { /* PCKOD.H */
10460 DIP("PCKOD.H w%d, w%d, w%d", wd
, ws
, wt
);
10461 t1
= newTemp(Ity_V128
);
10462 t2
= newTemp(Ity_V128
);
10463 t3
= newTemp(Ity_V128
);
10464 assign(t1
, getWReg(ws
));
10465 assign(t2
, getWReg(wt
));
10467 binop(Iop_PackOddLanes16x8
,
10468 mkexpr(t1
), mkexpr(t2
)));
10469 putWReg(wd
, mkexpr(t3
));
10473 case 0x02: { /* PCKOD.W */
10474 DIP("PCKOD.W w%d, w%d, w%d", wd
, ws
, wt
);
10475 t1
= newTemp(Ity_V128
);
10476 t2
= newTemp(Ity_V128
);
10477 t3
= newTemp(Ity_V128
);
10478 assign(t1
, getWReg(ws
));
10479 assign(t2
, getWReg(wt
));
10481 binop(Iop_PackOddLanes32x4
,
10482 mkexpr(t1
), mkexpr(t2
)));
10483 putWReg(wd
, mkexpr(t3
));
10487 case 0x03: { /* PCKOD.D */
10488 DIP("PCKOD.D w%d, w%d, w%d", wd
, ws
, wt
);
10489 t1
= newTemp(Ity_V128
);
10490 t2
= newTemp(Ity_V128
);
10491 t3
= newTemp(Ity_V128
);
10492 assign(t1
, getWReg(ws
));
10493 assign(t2
, getWReg(wt
));
10495 binop(Iop_InterleaveHI64x2
,
10496 mkexpr(t1
), mkexpr(t2
)));
10497 putWReg(wd
, mkexpr(t3
));
10508 case 0x04: { /* ILVL.df */
10510 case 0x00: { /* ILVL.B */
10511 DIP("ILVL.B w%d, w%d, w%d", wd
, ws
, wt
);
10512 t1
= newTemp(Ity_V128
);
10513 t2
= newTemp(Ity_V128
);
10514 t3
= newTemp(Ity_V128
);
10515 assign(t1
, getWReg(ws
));
10516 assign(t2
, getWReg(wt
));
10518 binop(Iop_InterleaveHI8x16
,
10519 mkexpr(t1
), mkexpr(t2
)));
10520 putWReg(wd
, mkexpr(t3
));
10524 case 0x01: { /* ILVL.H */
10525 DIP("ILVL.H w%d, w%d, w%d", wd
, ws
, wt
);
10526 t1
= newTemp(Ity_V128
);
10527 t2
= newTemp(Ity_V128
);
10528 t3
= newTemp(Ity_V128
);
10529 assign(t1
, getWReg(ws
));
10530 assign(t2
, getWReg(wt
));
10532 binop(Iop_InterleaveHI16x8
,
10533 mkexpr(t1
), mkexpr(t2
)));
10534 putWReg(wd
, mkexpr(t3
));
10538 case 0x02: { /* ILVL.W */
10539 DIP("ILVL.W w%d, w%d, w%d", wd
, ws
, wt
);
10540 t1
= newTemp(Ity_V128
);
10541 t2
= newTemp(Ity_V128
);
10542 t3
= newTemp(Ity_V128
);
10543 assign(t1
, getWReg(ws
));
10544 assign(t2
, getWReg(wt
));
10546 binop(Iop_InterleaveHI32x4
,
10547 mkexpr(t1
), mkexpr(t2
)));
10548 putWReg(wd
, mkexpr(t3
));
10552 case 0x03: { /* ILVL.D */
10553 DIP("ILVL.D w%d, w%d, w%d", wd
, ws
, wt
);
10554 t1
= newTemp(Ity_V128
);
10555 t2
= newTemp(Ity_V128
);
10556 t3
= newTemp(Ity_V128
);
10557 assign(t1
, getWReg(ws
));
10558 assign(t2
, getWReg(wt
));
10560 binop(Iop_InterleaveHI64x2
,
10561 mkexpr(t1
), mkexpr(t2
)));
10562 putWReg(wd
, mkexpr(t3
));
10573 case 0x05: { /* ILVR.df */
10575 case 0x00: { /* ILVL.B */
10576 DIP("ILVL.B w%d, w%d, w%d", wd
, ws
, wt
);
10577 t1
= newTemp(Ity_V128
);
10578 t2
= newTemp(Ity_V128
);
10579 t3
= newTemp(Ity_V128
);
10580 assign(t1
, getWReg(ws
));
10581 assign(t2
, getWReg(wt
));
10583 binop(Iop_InterleaveLO8x16
,
10584 mkexpr(t1
), mkexpr(t2
)));
10585 putWReg(wd
, mkexpr(t3
));
10589 case 0x01: { /* ILVL.H */
10590 DIP("ILVL.H w%d, w%d, w%d", wd
, ws
, wt
);
10591 t1
= newTemp(Ity_V128
);
10592 t2
= newTemp(Ity_V128
);
10593 t3
= newTemp(Ity_V128
);
10594 assign(t1
, getWReg(ws
));
10595 assign(t2
, getWReg(wt
));
10597 binop(Iop_InterleaveLO16x8
,
10598 mkexpr(t1
), mkexpr(t2
)));
10599 putWReg(wd
, mkexpr(t3
));
10603 case 0x02: { /* ILVL.W */
10604 DIP("ILVL.W w%d, w%d, w%d", wd
, ws
, wt
);
10605 t1
= newTemp(Ity_V128
);
10606 t2
= newTemp(Ity_V128
);
10607 t3
= newTemp(Ity_V128
);
10608 assign(t1
, getWReg(ws
));
10609 assign(t2
, getWReg(wt
));
10611 binop(Iop_InterleaveLO32x4
,
10612 mkexpr(t1
), mkexpr(t2
)));
10613 putWReg(wd
, mkexpr(t3
));
10617 case 0x03: { /* ILVL.D */
10618 DIP("ILVL.D w%d, w%d, w%d", wd
, ws
, wt
);
10619 t1
= newTemp(Ity_V128
);
10620 t2
= newTemp(Ity_V128
);
10621 t3
= newTemp(Ity_V128
);
10622 assign(t1
, getWReg(ws
));
10623 assign(t2
, getWReg(wt
));
10625 binop(Iop_InterleaveLO64x2
,
10626 mkexpr(t1
), mkexpr(t2
)));
10627 putWReg(wd
, mkexpr(t3
));
10635 case 0x06: { /* ILVEV.df */
10637 case 0x00: { /* ILVEV.B */
10638 DIP("ILVEV.B w%d, w%d, w%d", wd
, ws
, wt
);
10639 t1
= newTemp(Ity_V128
);
10640 t2
= newTemp(Ity_V128
);
10641 t3
= newTemp(Ity_V128
);
10642 assign(t1
, getWReg(ws
));
10643 assign(t2
, getWReg(wt
));
10645 binop(Iop_InterleaveEvenLanes8x16
,
10646 mkexpr(t1
), mkexpr(t2
)));
10647 putWReg(wd
, mkexpr(t3
));
10651 case 0x01: { /* ILVEV.H */
10652 DIP("ILVEV.H w%d, w%d, w%d", wd
, ws
, wt
);
10653 t1
= newTemp(Ity_V128
);
10654 t2
= newTemp(Ity_V128
);
10655 t3
= newTemp(Ity_V128
);
10656 assign(t1
, getWReg(ws
));
10657 assign(t2
, getWReg(wt
));
10659 binop(Iop_InterleaveEvenLanes16x8
,
10660 mkexpr(t1
), mkexpr(t2
)));
10661 putWReg(wd
, mkexpr(t3
));
10665 case 0x02: { /* ILVEV.W */
10666 DIP("ILVEV.W w%d, w%d, w%d", wd
, ws
, wt
);
10667 t1
= newTemp(Ity_V128
);
10668 t2
= newTemp(Ity_V128
);
10669 t3
= newTemp(Ity_V128
);
10670 assign(t1
, getWReg(ws
));
10671 assign(t2
, getWReg(wt
));
10673 binop(Iop_InterleaveEvenLanes32x4
,
10674 mkexpr(t1
), mkexpr(t2
)));
10675 putWReg(wd
, mkexpr(t3
));
10679 case 0x03: { /* ILVEV.D */
10680 DIP("ILVEV.D w%d, w%d, w%d", wd
, ws
, wt
);
10681 t1
= newTemp(Ity_V128
);
10682 t2
= newTemp(Ity_V128
);
10683 t3
= newTemp(Ity_V128
);
10684 assign(t1
, getWReg(ws
));
10685 assign(t2
, getWReg(wt
));
10687 binop(Iop_InterleaveLO64x2
,
10688 mkexpr(t1
), mkexpr(t2
)));
10689 putWReg(wd
, mkexpr(t3
));
10700 case 0x07: { /* ILVOD.df */
10702 case 0x00: { /* ILVOD.B */
10703 DIP("ILVOD.B w%d, w%d, w%d", wd
, ws
, wt
);
10704 t1
= newTemp(Ity_V128
);
10705 t2
= newTemp(Ity_V128
);
10706 t3
= newTemp(Ity_V128
);
10707 assign(t1
, getWReg(ws
));
10708 assign(t2
, getWReg(wt
));
10710 binop(Iop_InterleaveOddLanes8x16
,
10711 mkexpr(t1
), mkexpr(t2
)));
10712 putWReg(wd
, mkexpr(t3
));
10716 case 0x01: { /* ILVOD.H */
10717 DIP("ILVOD.H w%d, w%d, w%d", wd
, ws
, wt
);
10718 t1
= newTemp(Ity_V128
);
10719 t2
= newTemp(Ity_V128
);
10720 t3
= newTemp(Ity_V128
);
10721 assign(t1
, getWReg(ws
));
10722 assign(t2
, getWReg(wt
));
10724 binop(Iop_InterleaveOddLanes16x8
,
10725 mkexpr(t1
), mkexpr(t2
)));
10726 putWReg(wd
, mkexpr(t3
));
10730 case 0x02: { /* ILVOD.W */
10731 DIP("ILVOD.W w%d, w%d, w%d", wd
, ws
, wt
);
10732 t1
= newTemp(Ity_V128
);
10733 t2
= newTemp(Ity_V128
);
10734 t3
= newTemp(Ity_V128
);
10735 assign(t1
, getWReg(ws
));
10736 assign(t2
, getWReg(wt
));
10738 binop(Iop_InterleaveOddLanes32x4
,
10739 mkexpr(t1
), mkexpr(t2
)));
10740 putWReg(wd
, mkexpr(t3
));
10744 case 0x03: { /* ILVOD.D */
10745 DIP("ILVOD.D w%d, w%d, w%d", wd
, ws
, wt
);
10746 t1
= newTemp(Ity_V128
);
10747 t2
= newTemp(Ity_V128
);
10748 t3
= newTemp(Ity_V128
);
10749 assign(t1
, getWReg(ws
));
10750 assign(t2
, getWReg(wt
));
10752 binop(Iop_InterleaveHI64x2
,
10753 mkexpr(t1
), mkexpr(t2
)));
10754 putWReg(wd
, mkexpr(t3
));
10772 static Int
msa_3R_15(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x15) */
10774 IRTemp t1
, t2
, t3
, t4
;
10778 operation
= (cins
& 0x03800000) >> 23;
10779 df
= (cins
& 0x00600000) >> 21;
10780 wt
= (cins
& 0x001F0000) >> 16;
10782 switch (operation
) {
10783 case 0x00: { /* VSHF.df */
10784 t1
= newTemp(Ity_V128
);
10785 t2
= newTemp(Ity_V128
);
10786 t3
= newTemp(Ity_V128
);
10787 assign(t1
, getWReg(wd
));
10788 assign(t2
, getWReg(ws
));
10789 assign(t3
, getWReg(wt
));
10792 case 0x00: { /* VSHF.B */
10793 DIP("VSHF.B w%d, w%d, w%d", wd
, ws
, wt
);
10797 for (i
= 0; i
< 16; i
++) {
10798 tmp
[i
] = newTemp(Ity_I8
);
10803 binop(Iop_GetElem8x16
,
10811 binop(Iop_GetElem8x16
,
10816 binop(Iop_GetElem8x16
,
10818 binop(Iop_GetElem8x16
,
10821 binop(Iop_GetElem8x16
,
10823 binop(Iop_GetElem8x16
,
10830 binop(Iop_64HLtoV128
,
10831 binop(Iop_32HLto64
,
10832 binop(Iop_16HLto32
,
10839 binop(Iop_16HLto32
,
10846 binop(Iop_32HLto64
,
10847 binop(Iop_16HLto32
,
10854 binop(Iop_16HLto32
,
10860 mkexpr(tmp
[0]))))));
10864 case 0x01: { /* VSHF.H */
10865 DIP("VSHF.H w%d, w%d, w%d", wd
, ws
, wt
);
10869 for (i
= 0; i
< 8; i
++) {
10870 tmp
[i
] = newTemp(Ity_I16
);
10875 binop(Iop_GetElem16x8
,
10883 binop(Iop_GetElem16x8
,
10888 binop(Iop_GetElem16x8
,
10891 binop(Iop_GetElem16x8
,
10894 binop(Iop_GetElem16x8
,
10897 binop(Iop_GetElem16x8
,
10904 binop(Iop_64HLtoV128
,
10905 binop(Iop_32HLto64
,
10906 binop(Iop_16HLto32
,
10909 binop(Iop_16HLto32
,
10912 binop(Iop_32HLto64
,
10913 binop(Iop_16HLto32
,
10916 binop(Iop_16HLto32
,
10918 mkexpr(tmp
[0])))));
10922 case 0x02: { /* VSHF.W */
10923 DIP("VSHF.W w%d, w%d, w%d", wd
, ws
, wt
);
10927 for (i
= 0; i
< 4; i
++) {
10928 tmp
[i
] = newTemp(Ity_I32
);
10933 binop(Iop_GetElem32x4
,
10941 binop(Iop_GetElem32x4
,
10946 binop(Iop_GetElem32x4
,
10949 binop(Iop_GetElem32x4
,
10952 binop(Iop_GetElem32x4
,
10955 binop(Iop_GetElem32x4
,
10962 binop(Iop_64HLtoV128
,
10963 binop(Iop_32HLto64
,
10966 binop(Iop_32HLto64
,
10972 case 0x03: { /* VSHF.D */
10973 DIP("VSHF.D w%d, w%d, w%d", wd
, ws
, wt
);
10977 for (i
= 0; i
< 2; i
++) {
10978 tmp
[i
] = newTemp(Ity_I64
);
10983 binop(Iop_GetElem64x2
,
10991 binop(Iop_GetElem64x2
,
10996 binop(Iop_GetElem64x2
,
10999 binop(Iop_GetElem64x2
,
11002 binop(Iop_GetElem64x2
,
11005 binop(Iop_GetElem64x2
,
11012 binop(Iop_64HLtoV128
,
11013 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
11024 case 0x01: { /* SRAR.df */
11026 case 0x00: { /* SRAR.B */
11027 DIP("SRAR.B w%d, w%d, w%d", wd
, ws
, wt
);
11028 t1
= newTemp(Ity_V128
);
11029 t2
= newTemp(Ity_V128
);
11030 t3
= newTemp(Ity_V128
);
11031 t4
= newTemp(Ity_V128
);
11038 binop(Iop_64HLtoV128
,
11039 mkU64(0x808080808080808ull
),
11040 mkU64(0x808080808080808ull
)),
11044 binop(Iop_CmpEQ8x16
,
11045 binop(Iop_ShlN8x16
,
11048 binop(Iop_64HLtoV128
,
11049 mkU64(0), mkU64(0)))));
11051 binop(Iop_ShrN8x16
,
11060 mkexpr(t1
), mkexpr(t3
)));
11064 case 0x01: { /* SRAR.H */
11065 DIP("SRAR.H w%d, w%d, w%d", wd
, ws
, wt
);
11066 t1
= newTemp(Ity_V128
);
11067 t2
= newTemp(Ity_V128
);
11068 t3
= newTemp(Ity_V128
);
11069 t4
= newTemp(Ity_V128
);
11076 binop(Iop_64HLtoV128
,
11077 mkU64(0x10001000100010ul
),
11078 mkU64(0x10001000100010ul
)),
11082 binop(Iop_CmpEQ16x8
,
11083 binop(Iop_ShlN16x8
,
11086 binop(Iop_64HLtoV128
,
11087 mkU64(0), mkU64(0)))));
11089 binop(Iop_ShrN16x8
,
11098 mkexpr(t1
), mkexpr(t3
)));
11102 case 0x02: { /* SRAR.W */
11103 DIP("SRAR.W w%d, w%d, w%d", wd
, ws
, wt
);
11104 t1
= newTemp(Ity_V128
); // shifted
11105 t2
= newTemp(Ity_V128
); // 32 - wt
11106 t3
= newTemp(Ity_V128
); // rv
11107 t4
= newTemp(Ity_V128
); // wt % 32 == 0
11114 binop(Iop_64HLtoV128
,
11115 mkU64(0x2000000020ul
),
11116 mkU64(0x2000000020ul
)),
11120 binop(Iop_CmpEQ32x4
,
11121 binop(Iop_ShlN32x4
,
11124 binop(Iop_64HLtoV128
,
11125 mkU64(0), mkU64(0)))));
11127 binop(Iop_ShrN32x4
,
11136 mkexpr(t1
), mkexpr(t3
)));
11140 case 0x03: { /* SRAR.D */
11141 DIP("SRAR.D w%d, w%d, w%d", wd
, ws
, wt
);
11142 t1
= newTemp(Ity_V128
);
11143 t2
= newTemp(Ity_V128
);
11144 t3
= newTemp(Ity_V128
);
11145 t4
= newTemp(Ity_V128
);
11152 binop(Iop_64HLtoV128
,
11153 mkU64(64ul), mkU64(64ul)),
11157 binop(Iop_CmpEQ64x2
,
11158 binop(Iop_ShlN64x2
,
11161 binop(Iop_64HLtoV128
,
11162 mkU64(0), mkU64(0)))));
11164 binop(Iop_ShrN64x2
,
11173 mkexpr(t1
), mkexpr(t3
)));
11184 case 0x02: { /* SRLR.df */
11186 case 0x00: { /* SRLR.B */
11187 DIP("SRLR.B w%d, w%d, w%d", wd
, ws
, wt
);
11188 t1
= newTemp(Ity_V128
);
11189 t2
= newTemp(Ity_V128
);
11190 t3
= newTemp(Ity_V128
);
11191 t4
= newTemp(Ity_V128
);
11198 binop(Iop_64HLtoV128
,
11199 mkU64(0x808080808080808ull
),
11200 mkU64(0x808080808080808ull
)),
11204 binop(Iop_CmpEQ8x16
,
11205 binop(Iop_ShlN8x16
,
11208 binop(Iop_64HLtoV128
,
11209 mkU64(0), mkU64(0)))));
11211 binop(Iop_ShrN8x16
,
11220 mkexpr(t1
), mkexpr(t3
)));
11224 case 0x01: { /* SRLR.H */
11225 DIP("SRLR.H w%d, w%d, w%d", wd
, ws
, wt
);
11226 t1
= newTemp(Ity_V128
);
11227 t2
= newTemp(Ity_V128
);
11228 t3
= newTemp(Ity_V128
);
11229 t4
= newTemp(Ity_V128
);
11236 binop(Iop_64HLtoV128
,
11237 mkU64(0x10001000100010ul
),
11238 mkU64(0x10001000100010ul
)),
11242 binop(Iop_CmpEQ16x8
,
11243 binop(Iop_ShlN16x8
,
11246 binop(Iop_64HLtoV128
,
11247 mkU64(0), mkU64(0)))));
11249 binop(Iop_ShrN16x8
,
11258 mkexpr(t1
), mkexpr(t3
)));
11262 case 0x02: { /* SRLR.W */
11263 DIP("SRLR.W w%d, w%d, w%d", wd
, ws
, wt
);
11264 t1
= newTemp(Ity_V128
);
11265 t2
= newTemp(Ity_V128
);
11266 t3
= newTemp(Ity_V128
);
11267 t4
= newTemp(Ity_V128
);
11274 binop(Iop_64HLtoV128
,
11275 mkU64(0x2000000020ul
),
11276 mkU64(0x2000000020ul
)),
11280 binop(Iop_CmpEQ32x4
,
11281 binop(Iop_ShlN32x4
,
11284 binop(Iop_64HLtoV128
,
11285 mkU64(0), mkU64(0)))));
11287 binop(Iop_ShrN32x4
,
11296 mkexpr(t1
), mkexpr(t3
)));
11300 case 0x03: { /* SRLR.D */
11301 DIP("SRLR.D w%d, w%d, w%d", wd
, ws
, wt
);
11302 t1
= newTemp(Ity_V128
);
11303 t2
= newTemp(Ity_V128
);
11304 t3
= newTemp(Ity_V128
);
11305 t4
= newTemp(Ity_V128
);
11312 binop(Iop_64HLtoV128
,
11313 mkU64(64ul), mkU64(64ul)),
11317 binop(Iop_CmpEQ64x2
,
11318 binop(Iop_ShlN64x2
,
11321 binop(Iop_64HLtoV128
,
11322 mkU64(0), mkU64(0)))));
11324 binop(Iop_ShrN64x2
,
11333 mkexpr(t1
), mkexpr(t3
)));
11344 case 0x04: { /* HADD_S.df */
11346 case 0x01: { /* HADD_S.H */
11347 DIP("HADD_S.H w%d, w%d, w%d", wd
, ws
, wt
);
11348 t1
= newTemp(Ity_V128
);
11349 t2
= newTemp(Ity_V128
);
11350 t3
= newTemp(Ity_V128
);
11351 assign(t1
, getWReg(ws
));
11352 assign(t2
, getWReg(wt
));
11355 binop(Iop_SarN16x8
,
11356 mkexpr(t1
), mkU8(8)),
11357 binop(Iop_SarN16x8
,
11358 binop(Iop_ShlN16x8
,
11359 mkexpr(t2
), mkU8(8)),
11361 putWReg(wd
, mkexpr(t3
));
11365 case 0x02: { /* HADD_S.W */
11366 DIP("HADD_S.W w%d, w%d, w%d", wd
, ws
, wt
);
11367 t1
= newTemp(Ity_V128
);
11368 t2
= newTemp(Ity_V128
);
11369 t3
= newTemp(Ity_V128
);
11370 assign(t1
, getWReg(ws
));
11371 assign(t2
, getWReg(wt
));
11374 binop(Iop_SarN32x4
,
11375 mkexpr(t1
), mkU8(16)),
11376 binop(Iop_SarN32x4
,
11377 binop(Iop_ShlN32x4
,
11378 mkexpr(t2
), mkU8(16)),
11380 putWReg(wd
, mkexpr(t3
));
11384 case 0x03: { /* HADD_S.D */
11385 DIP("HADD_S.D w%d, w%d, w%d", wd
, ws
, wt
);
11386 t1
= newTemp(Ity_V128
);
11387 t2
= newTemp(Ity_V128
);
11388 t3
= newTemp(Ity_V128
);
11389 assign(t1
, getWReg(ws
));
11390 assign(t2
, getWReg(wt
));
11393 binop(Iop_SarN64x2
,
11394 mkexpr(t1
), mkU8(32)),
11395 binop(Iop_SarN64x2
,
11396 binop(Iop_ShlN64x2
,
11397 mkexpr(t2
), mkU8(32)),
11399 putWReg(wd
, mkexpr(t3
));
11410 case 0x05: { /* HADD_U.df */
11412 case 0x01: { /* HADD_U.H */
11413 DIP("HADD_U.H w%d, w%d, w%d", wd
, ws
, wt
);
11414 t1
= newTemp(Ity_V128
);
11415 t2
= newTemp(Ity_V128
);
11416 t3
= newTemp(Ity_V128
);
11417 assign(t1
, getWReg(ws
));
11418 assign(t2
, getWReg(wt
));
11421 binop(Iop_ShrN16x8
,
11422 mkexpr(t1
), mkU8(8)),
11423 binop(Iop_ShrN16x8
,
11424 binop(Iop_ShlN16x8
,
11425 mkexpr(t2
), mkU8(8)),
11427 putWReg(wd
, mkexpr(t3
));
11431 case 0x02: { /* HADD_U.W */
11432 DIP("HADD_U.W w%d, w%d, w%d", wd
, ws
, wt
);
11433 t1
= newTemp(Ity_V128
);
11434 t2
= newTemp(Ity_V128
);
11435 t3
= newTemp(Ity_V128
);
11436 assign(t1
, getWReg(ws
));
11437 assign(t2
, getWReg(wt
));
11440 binop(Iop_ShrN32x4
,
11441 mkexpr(t1
), mkU8(16)),
11442 binop(Iop_ShrN32x4
,
11443 binop(Iop_ShlN32x4
,
11444 mkexpr(t2
), mkU8(16)),
11446 putWReg(wd
, mkexpr(t3
));
11450 case 0x03: { /* HADD_U.D */
11451 DIP("HADD_U.D w%d, w%d, w%d", wd
, ws
, wt
);
11452 t1
= newTemp(Ity_V128
);
11453 t2
= newTemp(Ity_V128
);
11454 t3
= newTemp(Ity_V128
);
11455 assign(t1
, getWReg(ws
));
11456 assign(t2
, getWReg(wt
));
11459 binop(Iop_ShrN64x2
,
11460 mkexpr(t1
), mkU8(32)),
11461 binop(Iop_ShrN64x2
,
11462 binop(Iop_ShlN64x2
,
11463 mkexpr(t2
), mkU8(32)),
11465 putWReg(wd
, mkexpr(t3
));
11476 case 0x06: { /* HSUB_S.df */
11478 case 0x01: { /* HSUB_S.H */
11479 DIP("HSUB_S.H w%d, w%d, w%d", wd
, ws
, wt
);
11480 t1
= newTemp(Ity_V128
);
11481 t2
= newTemp(Ity_V128
);
11482 t3
= newTemp(Ity_V128
);
11483 assign(t1
, getWReg(ws
));
11484 assign(t2
, getWReg(wt
));
11487 binop(Iop_SarN16x8
,
11488 mkexpr(t1
), mkU8(8)),
11489 binop(Iop_SarN16x8
,
11490 binop(Iop_ShlN16x8
,
11491 mkexpr(t2
), mkU8(8)),
11493 putWReg(wd
, mkexpr(t3
));
11497 case 0x02: { /* HSUB_S.W */
11498 DIP("HSUB_S.W w%d, w%d, w%d", wd
, ws
, wt
);
11499 t1
= newTemp(Ity_V128
);
11500 t2
= newTemp(Ity_V128
);
11501 t3
= newTemp(Ity_V128
);
11502 assign(t1
, getWReg(ws
));
11503 assign(t2
, getWReg(wt
));
11506 binop(Iop_SarN32x4
,
11507 mkexpr(t1
), mkU8(16)),
11508 binop(Iop_SarN32x4
,
11509 binop(Iop_ShlN32x4
,
11510 mkexpr(t2
), mkU8(16)),
11512 putWReg(wd
, mkexpr(t3
));
11516 case 0x03: { /* HSUB_S.D */
11517 DIP("HSUB_S.D w%d, w%d, w%d", wd
, ws
, wt
);
11518 t1
= newTemp(Ity_V128
);
11519 t2
= newTemp(Ity_V128
);
11520 t3
= newTemp(Ity_V128
);
11521 assign(t1
, getWReg(ws
));
11522 assign(t2
, getWReg(wt
));
11525 binop(Iop_SarN64x2
,
11526 mkexpr(t1
), mkU8(32)),
11527 binop(Iop_SarN64x2
,
11528 binop(Iop_ShlN64x2
,
11529 mkexpr(t2
), mkU8(32)),
11531 putWReg(wd
, mkexpr(t3
));
11542 case 0x07: { /* HSUB_U.df */
11544 case 0x01: { /* HSUB_U.H */
11545 DIP("HSUB_U.H w%d, w%d, w%d", wd
, ws
, wt
);
11546 t1
= newTemp(Ity_V128
);
11547 t2
= newTemp(Ity_V128
);
11548 t3
= newTemp(Ity_V128
);
11549 assign(t1
, getWReg(ws
));
11550 assign(t2
, getWReg(wt
));
11553 binop(Iop_ShrN16x8
,
11554 mkexpr(t1
), mkU8(8)),
11555 binop(Iop_ShrN16x8
,
11556 binop(Iop_ShlN16x8
,
11557 mkexpr(t2
), mkU8(8)),
11559 putWReg(wd
, mkexpr(t3
));
11563 case 0x02: { /* HSUB_U.W */
11564 DIP("HSUB_U.W w%d, w%d, w%d", wd
, ws
, wt
);
11565 t1
= newTemp(Ity_V128
);
11566 t2
= newTemp(Ity_V128
);
11567 t3
= newTemp(Ity_V128
);
11568 assign(t1
, getWReg(ws
));
11569 assign(t2
, getWReg(wt
));
11572 binop(Iop_ShrN32x4
,
11573 mkexpr(t1
), mkU8(16)),
11574 binop(Iop_ShrN32x4
,
11575 binop(Iop_ShlN32x4
,
11576 mkexpr(t2
), mkU8(16)),
11578 putWReg(wd
, mkexpr(t3
));
11582 case 0x03: { /* HSUB_U.D */
11583 DIP("HSUB_U.D w%d, w%d, w%d", wd
, ws
, wt
);
11584 t1
= newTemp(Ity_V128
);
11585 t2
= newTemp(Ity_V128
);
11586 t3
= newTemp(Ity_V128
);
11587 assign(t1
, getWReg(ws
));
11588 assign(t2
, getWReg(wt
));
11591 binop(Iop_ShrN64x2
,
11592 mkexpr(t1
), mkU8(32)),
11593 binop(Iop_ShrN64x2
,
11594 binop(Iop_ShlN64x2
,
11595 mkexpr(t2
), mkU8(32)),
11597 putWReg(wd
, mkexpr(t3
));
11615 static Int
msa_3R_1A(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x1A) */
11620 operation
= (cins
& 0x03C00000) >> 22;
11621 df
= (cins
& 0x00200000) >> 21;
11622 wt
= (cins
& 0x001F0000) >> 16;
11624 switch (operation
) {
11625 case 0x00: { /* FCAF.df */
11627 case 0x00: { /* FCAF.W */
11628 DIP("FCAF.W w%d, w%d, w%d", wd
, ws
, wt
);
11629 calculateMSACSR(ws
, wt
, FCAFW
, 2);
11630 putWReg(wd
, binop(Iop_64HLtoV128
, mkU64(0ul), mkU64(0ul)));
11634 case 0x01: { /* FCAF.D */
11635 DIP("FCAF.D w%d, w%d, w%d", wd
, ws
, wt
);
11636 calculateMSACSR(ws
, wt
, FCAFD
, 2);
11637 putWReg(wd
, binop(Iop_64HLtoV128
, mkU64(0ul), mkU64(0ul)));
11648 case 0x01: { /* FCUN.df */
11650 case 0x00: { /* FCUN.W */
11651 DIP("FCUN.W w%d, w%d, w%d", wd
, ws
, wt
);
11652 calculateMSACSR(ws
, wt
, FCUNW
, 2);
11653 putWReg(wd
, binop(Iop_CmpUN32Fx4
,
11659 case 0x01: { /* FCUN.D */
11660 DIP("FCUN.D w%d, w%d, w%d", wd
, ws
, wt
);
11661 calculateMSACSR(ws
, wt
, FCUND
, 2);
11662 putWReg(wd
, binop(Iop_CmpUN64Fx2
,
11675 case 0x02: { /* FCEQ.df */
11677 case 0x00: { /* FCEQ.W */
11678 DIP("FCEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11679 calculateMSACSR(ws
, wt
, FCEQW
, 2);
11680 putWReg(wd
, binop(Iop_CmpEQ32Fx4
,
11686 case 0x01: { /* FCEQ.D */
11687 DIP("FCEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11688 calculateMSACSR(ws
, wt
, FCEQD
, 2);
11689 putWReg(wd
, binop(Iop_CmpEQ64Fx2
,
11702 case 0x03: { /* FCUEQ.df */
11704 case 0x00: { /* FCUEQ.W */
11705 DIP("FCUEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11706 calculateMSACSR(ws
, wt
, FCUEQW
, 2);
11709 binop(Iop_CmpEQ32Fx4
,
11712 binop(Iop_CmpUN32Fx4
,
11718 case 0x01: { /* FCUEQ.D */
11719 DIP("FCUEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11720 calculateMSACSR(ws
, wt
, FCUEQD
, 2);
11723 binop(Iop_CmpEQ64Fx2
,
11726 binop(Iop_CmpUN64Fx2
,
11739 case 0x04: { /* FCLT.df */
11741 case 0x00: { /* FCLT.W */
11742 DIP("FCLT.W w%d, w%d, w%d", wd
, ws
, wt
);
11743 calculateMSACSR(ws
, wt
, FCLTW
, 2);
11745 binop(Iop_CmpLT32Fx4
,
11751 case 0x01: { /* FCLT.D */
11752 DIP("FCLT.D w%d, w%d, w%d", wd
, ws
, wt
);
11753 calculateMSACSR(ws
, wt
, FCLTD
, 2);
11755 binop(Iop_CmpLT64Fx2
,
11768 case 0x05: { /* FCULT.df */
11770 case 0x00: { /* FCULT.W */
11771 DIP("FCULT.W w%d, w%d, w%d", wd
, ws
, wt
);
11772 calculateMSACSR(ws
, wt
, FCULTW
, 2);
11775 binop(Iop_CmpLT32Fx4
,
11778 binop(Iop_CmpUN32Fx4
,
11784 case 0x01: { /* FCULT.D */
11785 DIP("FCULT.D w%d, w%d, w%d", wd
, ws
, wt
);
11786 calculateMSACSR(ws
, wt
, FCULTD
, 2);
11789 binop(Iop_CmpLT64Fx2
,
11792 binop(Iop_CmpUN64Fx2
,
11805 case 0x06: { /* FCLE.df */
11807 case 0x00: { /* FCLE.W */
11808 DIP("FCLE.W w%d, w%d, w%d", wd
, ws
, wt
);
11809 calculateMSACSR(ws
, wt
, FCLEW
, 2);
11811 binop(Iop_CmpLE32Fx4
,
11817 case 0x01: { /* FCLE.D */
11818 DIP("FCLE.D w%d, w%d, w%d", wd
, ws
, wt
);
11819 calculateMSACSR(ws
, wt
, FCLED
, 2);
11821 binop(Iop_CmpLE64Fx2
,
11834 case 0x07: { /* FCULE.df */
11836 case 0x00: { /* FCULE.W */
11837 DIP("FCULE.W w%d, w%d, w%d", wd
, ws
, wt
);
11838 calculateMSACSR(ws
, wt
, FCULEW
, 2);
11841 binop(Iop_CmpLE32Fx4
,
11844 binop(Iop_CmpUN32Fx4
,
11850 case 0x01: { /* FCULE.D */
11851 DIP("FCULE.D w%d, w%d, w%d", wd
, ws
, wt
);
11852 calculateMSACSR(ws
, wt
, FCULED
, 2);
11855 binop(Iop_CmpLE64Fx2
,
11858 binop(Iop_CmpUN64Fx2
,
11871 case 0x08: { /* FSAF.df */
11873 case 0x00: { /* FSAF.W */
11874 DIP("FSAF.W w%d, w%d, w%d", wd
, ws
, wt
);
11875 calculateMSACSR(ws
, wt
, FSAFW
, 2);
11877 binop(Iop_64HLtoV128
,
11878 mkU64(0ul), mkU64(0ul)));
11882 case 0x01: { /* FSAF.D */
11883 DIP("FSAF.D w%d, w%d, w%d", wd
, ws
, wt
);
11884 calculateMSACSR(ws
, wt
, FSAFD
, 2);
11886 binop(Iop_64HLtoV128
,
11887 mkU64(0ul), mkU64(0ul)));
11898 case 0x09: { /* FSUN.df */
11900 case 0x00: { /* FSUN.W */
11901 DIP("FSUN.W w%d, w%d, w%d", wd
, ws
, wt
);
11902 calculateMSACSR(ws
, wt
, FSUNW
, 2);
11904 binop(Iop_CmpUN32Fx4
,
11910 case 0x01: { /* FSUN.D */
11911 DIP("FSUN.D w%d, w%d, w%d", wd
, ws
, wt
);
11912 calculateMSACSR(ws
, wt
, FSUND
, 2);
11914 binop(Iop_CmpUN64Fx2
,
11927 case 0x0A: { /* FSEQ.df */
11929 case 0x00: { /* FSEQ.W */
11930 DIP("FSEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11931 calculateMSACSR(ws
, wt
, FSEQW
, 2);
11933 binop(Iop_CmpEQ32Fx4
,
11939 case 0x01: { /* FSEQ.D */
11940 DIP("FSEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11941 calculateMSACSR(ws
, wt
, FSEQD
, 2);
11943 binop(Iop_CmpEQ64Fx2
,
11956 case 0x0B: { /* FSUEQ.df */
11958 case 0x00: { /* FSUEQ.W */
11959 DIP("FSUEQ.W w%d, w%d, w%d", wd
, ws
, wt
);
11960 calculateMSACSR(ws
, wt
, FSUEQW
, 2);
11963 binop(Iop_CmpEQ32Fx4
,
11966 binop(Iop_CmpUN32Fx4
,
11972 case 0x01: { /* FSUEQ.D */
11973 DIP("FSUEQ.D w%d, w%d, w%d", wd
, ws
, wt
);
11974 calculateMSACSR(ws
, wt
, FSUEQD
, 2);
11977 binop(Iop_CmpEQ64Fx2
,
11980 binop(Iop_CmpUN64Fx2
,
11993 case 0x0C: { /* FSLT.df */
11995 case 0x00: { /* FSLT.W */
11996 DIP("FSLT.W w%d, w%d, w%d", wd
, ws
, wt
);
11997 calculateMSACSR(ws
, wt
, FSLTW
, 2);
11999 binop(Iop_CmpLT32Fx4
,
12005 case 0x01: { /* FSLT.D */
12006 DIP("FSLT.D w%d, w%d, w%d", wd
, ws
, wt
);
12007 calculateMSACSR(ws
, wt
, FSLTD
, 2);
12009 binop(Iop_CmpLT64Fx2
,
12022 case 0x0D: { /* FSULT.df */
12024 case 0x00: { /* FSULT.W */
12025 DIP("FSULT.W w%d, w%d, w%d", wd
, ws
, wt
);
12026 calculateMSACSR(ws
, wt
, FSULTW
, 2);
12029 binop(Iop_CmpLT32Fx4
,
12032 binop(Iop_CmpUN32Fx4
,
12038 case 0x01: { /* FSULT.D */
12039 DIP("FSULT.D w%d, w%d, w%d", wd
, ws
, wt
);
12040 calculateMSACSR(ws
, wt
, FSULTD
, 2);
12043 binop(Iop_CmpLT64Fx2
,
12046 binop(Iop_CmpUN64Fx2
,
12059 case 0x0E: { /* FSLE.df */
12061 case 0x00: { /* FSLE.W */
12062 DIP("FSLE.W w%d, w%d, w%d", wd
, ws
, wt
);
12063 calculateMSACSR(ws
, wt
, FSLEW
, 2);
12065 binop(Iop_CmpLE32Fx4
,
12071 case 0x01: { /* FSLE.D */
12072 DIP("FSLE.D w%d, w%d, w%d", wd
, ws
, wt
);
12073 calculateMSACSR(ws
, wt
, FSLED
, 2);
12075 binop(Iop_CmpLE64Fx2
,
12088 case 0x0F: { /* FSULE.df */
12090 case 0x00: { /* FSULE.W */
12091 DIP("FSULE.W w%d, w%d, w%d", wd
, ws
, wt
);
12092 calculateMSACSR(ws
, wt
, FSULEW
, 2);
12095 binop(Iop_CmpLE32Fx4
,
12098 binop(Iop_CmpUN32Fx4
,
12104 case 0x01: { /* FSULE.D */
12105 DIP("FSULE.D w%d, w%d, w%d", wd
, ws
, wt
);
12106 calculateMSACSR(ws
, wt
, FSULED
, 2);
12109 binop(Iop_CmpLE64Fx2
,
12112 binop(Iop_CmpUN64Fx2
,
12132 static Int
msa_3R_1B(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x1B) */
12134 IRTemp t1
, t2
, t3
, t4
;
12138 operation
= (cins
& 0x03C00000) >> 22;
12139 df
= (cins
& 0x00200000) >> 21;
12140 wt
= (cins
& 0x001F0000) >> 16;
12142 switch (operation
) {
12143 case 0x00: { /* FADD.df */
12145 case 0x00: { /* FADD.W */
12146 DIP("FADD.W w%d, w%d, w%d", wd
, ws
, wt
);
12147 calculateMSACSR(ws
, wt
, FADDW
, 2);
12148 IRExpr
*rm
= get_IR_roundingmode_MSA();
12150 triop(Iop_Add32Fx4
, rm
,
12156 case 0x01: { /* FADD.D */
12157 DIP("FADD.D w%d, w%d, w%d", wd
, ws
, wt
);
12158 calculateMSACSR(ws
, wt
, FADDD
, 2);
12159 IRExpr
*rm
= get_IR_roundingmode_MSA();
12161 triop(Iop_Add64Fx2
, rm
,
12174 case 0x01: { /* FSUB.df */
12176 case 0x00: { /* FSUB.W */
12177 DIP("FSUB.W w%d, w%d, w%d", wd
, ws
, wt
);
12178 calculateMSACSR(ws
, wt
, FSUBW
, 2);
12179 IRExpr
*rm
= get_IR_roundingmode_MSA();
12181 triop(Iop_Sub32Fx4
, rm
,
12187 case 0x01: { /* FSUB.D */
12188 DIP("FSUB.D w%d, w%d, w%d", wd
, ws
, wt
);
12189 calculateMSACSR(ws
, wt
, FSUBD
, 2);
12190 IRExpr
*rm
= get_IR_roundingmode_MSA();
12192 triop(Iop_Sub64Fx2
, rm
,
12205 case 0x02: { /* FMUL.df */
12207 case 0x00: { /* FMUL.W */
12208 DIP("FMUL.W w%d, w%d, w%d", wd
, ws
, wt
);
12209 calculateMSACSR(ws
, wt
, FMULW
, 2);
12210 IRExpr
*rm
= get_IR_roundingmode_MSA();
12212 triop(Iop_Mul32Fx4
, rm
,
12218 case 0x01: { /* FMUL.D */
12219 DIP("FMUL.D w%d, w%d, w%d", wd
, ws
, wt
);
12220 calculateMSACSR(ws
, wt
, FMULW
, 2);
12221 IRExpr
*rm
= get_IR_roundingmode_MSA();
12223 triop(Iop_Mul64Fx2
, rm
,
12236 case 0x03: { /* FDIV.df */
12238 case 0x00: { /* FDIV.W */
12239 DIP("FDIV.W w%d, w%d, w%d", wd
, ws
, wt
);
12240 calculateMSACSR(ws
, wt
, FDIVW
, 2);
12241 IRExpr
*rm
= get_IR_roundingmode_MSA();
12243 triop(Iop_Div32Fx4
, rm
,
12249 case 0x01: { /* FDIV.D */
12250 DIP("FDIV.D w%d, w%d, w%d", wd
, ws
, wt
);
12251 calculateMSACSR(ws
, wt
, FDIVD
, 2);
12252 IRExpr
*rm
= get_IR_roundingmode_MSA();
12254 triop(Iop_Div64Fx2
, rm
,
12267 case 0x04: { /* FMADD.df */
12269 case 0x00: { /* FMADD.W */
12270 DIP("FMADD.W w%d, w%d, w%d", wd
, ws
, wt
);
12271 calculateMSACSR(ws
, wt
, FMADDW
, 2);
12272 IRExpr
*rm
= get_IR_roundingmode_MSA();
12276 for (i
= 0; i
< 4; i
++) {
12277 tmp
[i
] = newTemp(Ity_F32
);
12279 qop(Iop_MAddF32
, rm
,
12280 unop(Iop_ReinterpI32asF32
,
12281 binop(Iop_GetElem32x4
,
12284 unop(Iop_ReinterpI32asF32
,
12285 binop(Iop_GetElem32x4
,
12288 unop(Iop_ReinterpI32asF32
,
12289 binop(Iop_GetElem32x4
,
12295 binop(Iop_64HLtoV128
,
12296 binop(Iop_32HLto64
,
12297 unop(Iop_ReinterpF32asI32
,
12299 unop(Iop_ReinterpF32asI32
,
12301 binop(Iop_32HLto64
,
12302 unop(Iop_ReinterpF32asI32
,
12304 unop(Iop_ReinterpF32asI32
,
12305 mkexpr(tmp
[0])))));
12309 case 0x01: { /* FMADD.D */
12310 DIP("FMADD.D w%d, w%d, w%d", wd
, ws
, wt
);
12311 calculateMSACSR(ws
, wt
, FMADDW
, 2);
12312 IRExpr
*rm
= get_IR_roundingmode_MSA();
12316 for (i
= 0; i
< 2; i
++) {
12317 tmp
[i
] = newTemp(Ity_F64
);
12319 qop(Iop_MAddF64
, rm
,
12320 unop(Iop_ReinterpI64asF64
,
12321 binop(Iop_GetElem64x2
,
12324 unop(Iop_ReinterpI64asF64
,
12325 binop(Iop_GetElem64x2
,
12328 unop(Iop_ReinterpI64asF64
,
12329 binop(Iop_GetElem64x2
,
12335 binop(Iop_64HLtoV128
,
12336 unop(Iop_ReinterpF64asI64
,
12338 unop(Iop_ReinterpF64asI64
,
12350 case 0x05: { /* FMSUB.df */
12352 case 0x00: { /* FMSUB.W */
12353 DIP("FMSUB.W w%d, w%d, w%d", wd
, ws
, wt
);
12354 calculateMSACSR(ws
, wt
, FMADDW
, 2);
12355 IRExpr
*rm
= get_IR_roundingmode_MSA();
12359 for (i
= 0; i
< 4; i
++) {
12360 tmp
[i
] = newTemp(Ity_F32
);
12362 qop(Iop_MSubF32
, rm
,
12363 unop(Iop_ReinterpI32asF32
,
12364 binop(Iop_GetElem32x4
,
12367 unop(Iop_ReinterpI32asF32
,
12368 binop(Iop_GetElem32x4
,
12371 unop(Iop_ReinterpI32asF32
,
12372 binop(Iop_GetElem32x4
,
12378 binop(Iop_64HLtoV128
,
12379 binop(Iop_32HLto64
,
12380 unop(Iop_ReinterpF32asI32
,
12382 unop(Iop_ReinterpF32asI32
,
12384 binop(Iop_32HLto64
,
12385 unop(Iop_ReinterpF32asI32
,
12387 unop(Iop_ReinterpF32asI32
,
12388 mkexpr(tmp
[0])))));
12392 case 0x01: { /* FMSUB.D */
12393 DIP("FMSUB.D w%d, w%d, w%d", wd
, ws
, wt
);
12394 calculateMSACSR(ws
, wt
, FMADDD
, 2);
12395 IRExpr
*rm
= get_IR_roundingmode_MSA();
12399 for (i
= 0; i
< 2; i
++) {
12400 tmp
[i
] = newTemp(Ity_F64
);
12402 qop(Iop_MSubF64
, rm
,
12403 unop(Iop_ReinterpI64asF64
,
12404 binop(Iop_GetElem64x2
,
12407 unop(Iop_ReinterpI64asF64
,
12408 binop(Iop_GetElem64x2
,
12411 unop(Iop_ReinterpI64asF64
,
12412 binop(Iop_GetElem64x2
,
12418 binop(Iop_64HLtoV128
,
12419 unop(Iop_ReinterpF64asI64
,
12421 unop(Iop_ReinterpF64asI64
,
12433 case 0x07: { /* FEXP2.df */
12435 case 0x00: { /* FEXP2.W */
12436 DIP("FEXP2.W w%d, w%d, w%d", wd
, ws
, wt
);
12437 calculateMSACSR(ws
, wt
, FEXP2W
, 2);
12438 IRExpr
*rm
= get_IR_roundingmode_MSA();
12440 triop(Iop_Scale2_32Fx4
, rm
,
12446 case 0x01: { /* FEXP2.D */
12447 DIP("FEXP2.D w%d, w%d, w%d", wd
, ws
, wt
);
12448 calculateMSACSR(ws
, wt
, FEXP2D
, 2);
12449 IRExpr
*rm
= get_IR_roundingmode_MSA();
12451 triop(Iop_Scale2_64Fx2
, rm
,
12464 case 0x08: { /* FEXDO.df */
12466 case 0x00: { /* FEXDO.H */
12467 DIP("FEXDO.H w%d, w%d, w%d", wd
, ws
, wt
);
12468 calculateMSACSR(ws
, wt
, FEXDOH
, 2);
12469 t1
= newTemp(Ity_I64
);
12470 t2
= newTemp(Ity_I64
);
12472 unop(Iop_F32toF16x4_DEP
,
12475 unop(Iop_F32toF16x4_DEP
,
12478 binop(Iop_64HLtoV128
,
12479 mkexpr(t1
), mkexpr(t2
)));
12483 case 0x01: { /* FEXDO.W */
12484 DIP("FEXDO.W w%d, w%d, w%d", wd
, ws
, wt
);
12485 calculateMSACSR(ws
, wt
, FEXDOW
, 2);
12486 t1
= newTemp(Ity_I32
);
12487 t2
= newTemp(Ity_I32
);
12488 t3
= newTemp(Ity_I32
);
12489 t4
= newTemp(Ity_I32
);
12490 IRExpr
*rm
= get_IR_roundingmode_MSA();
12492 unop(Iop_ReinterpF32asI32
,
12493 binop(Iop_F64toF32
, rm
,
12494 unop(Iop_ReinterpI64asF64
,
12498 unop(Iop_ReinterpF32asI32
,
12499 binop(Iop_F64toF32
, rm
,
12500 unop(Iop_ReinterpI64asF64
,
12501 unop(Iop_V128HIto64
,
12504 unop(Iop_ReinterpF32asI32
,
12505 binop(Iop_F64toF32
, rm
,
12506 unop(Iop_ReinterpI64asF64
,
12510 unop(Iop_ReinterpF32asI32
,
12511 binop(Iop_F64toF32
, rm
,
12512 unop(Iop_ReinterpI64asF64
,
12513 unop(Iop_V128HIto64
,
12516 binop(Iop_64HLtoV128
,
12517 binop(Iop_32HLto64
,
12518 mkexpr(t2
), mkexpr(t1
)),
12519 binop(Iop_32HLto64
,
12520 mkexpr(t4
), mkexpr(t3
))));
12531 case 0x0A: { /* FTQ.df */
12533 case 0x00: { /* FTQ.H */
12534 DIP("FTQ.H w%d, w%d, w%d", wd
, ws
, wt
);
12535 calculateMSACSR(ws
, wt
, FTQH
, 2);
12536 IRExpr
*rm
= get_IR_roundingmode_MSA();
12538 triop(Iop_F32x4_2toQ16x8
, rm
,
12544 case 0x01: { /* FTQ.W */
12545 DIP("FTQ.W w%d, w%d, w%d", wd
, ws
, wt
);
12546 calculateMSACSR(ws
, wt
, FTQW
, 2);
12547 IRExpr
*rm
= get_IR_roundingmode_MSA();
12549 triop(Iop_F64x2_2toQ32x4
, rm
,
12562 case 0x0C: { /* FMIN.df */
12564 case 0x00: { /* FMIN.W */
12565 DIP("FMIN.W w%d, w%d, w%d", wd
, ws
, wt
);
12566 calculateMSACSR(ws
, wt
, FMINW
, 2);
12568 binop(Iop_Min32Fx4
,
12574 case 0x01: { /* FMIN.D */
12575 DIP("FMIN.D w%d, w%d, w%d", wd
, ws
, wt
);
12576 calculateMSACSR(ws
, wt
, FMINW
, 2);
12578 binop(Iop_Min64Fx2
,
12591 case 0x0D: { /* FMIN_A.df */
12593 case 0x00: { /* FMIN_A.W */
12594 DIP("FMIN_A.W w%d, w%d, w%d", wd
, ws
, wt
);
12595 calculateMSACSR(ws
, wt
, FMINAW
, 2);
12596 t1
= newTemp(Ity_V128
);
12597 t2
= newTemp(Ity_V128
);
12598 t3
= newTemp(Ity_V128
);
12599 t4
= newTemp(Ity_V128
);
12603 binop(Iop_64HLtoV128
,
12604 mkU64(0x7FFFFFFF7FFFFFFF),
12605 mkU64(0x7FFFFFFF7FFFFFFF))));
12609 binop(Iop_64HLtoV128
,
12610 mkU64(0x7FFFFFFF7FFFFFFF),
12611 mkU64(0x7FFFFFFF7FFFFFFF))));
12613 binop(Iop_Min32Fx4
,
12614 mkexpr(t2
), mkexpr(t1
)));
12619 binop(Iop_CmpUN32Fx4
,
12624 binop(Iop_CmpEQ32Fx4
,
12633 binop(Iop_CmpUN32Fx4
,
12636 binop(Iop_CmpLT32Fx4
,
12642 binop(Iop_CmpUN32Fx4
,
12645 binop(Iop_CmpLT32Fx4
,
12649 binop(Iop_64HLtoV128
,
12650 mkU64(0x8000000080000000),
12651 mkU64(0x8000000080000000))));
12654 mkexpr(t3
), mkexpr(t4
)));
12658 case 0x01: { /* FMIN_A.D */
12659 DIP("FMIN_A.D w%d, w%d, w%d", wd
, ws
, wt
);
12660 calculateMSACSR(ws
, wt
, FMINAD
, 2);
12661 t1
= newTemp(Ity_V128
);
12662 t2
= newTemp(Ity_V128
);
12663 t3
= newTemp(Ity_V128
);
12664 t4
= newTemp(Ity_V128
);
12668 binop(Iop_64HLtoV128
,
12669 mkU64(0x7FFFFFFFFFFFFFFF),
12670 mkU64(0x7FFFFFFFFFFFFFFF))));
12674 binop(Iop_64HLtoV128
,
12675 mkU64(0x7FFFFFFFFFFFFFFF),
12676 mkU64(0x7FFFFFFFFFFFFFFF))));
12678 binop(Iop_Min64Fx2
,
12679 mkexpr(t2
), mkexpr(t1
)));
12684 binop(Iop_CmpUN64Fx2
,
12689 binop(Iop_CmpEQ64Fx2
,
12698 binop(Iop_CmpUN64Fx2
,
12701 binop(Iop_CmpLT64Fx2
,
12707 binop(Iop_CmpUN64Fx2
,
12710 binop(Iop_CmpLT64Fx2
,
12714 binop(Iop_64HLtoV128
,
12715 mkU64(0x8000000000000000),
12716 mkU64(0x8000000000000000))));
12719 mkexpr(t3
), mkexpr(t4
)));
12730 case 0x0E: { /* FMAX.df */
12732 case 0x00: { /* FMAX.W */
12733 DIP("FMAX.W w%d, w%d, w%d", wd
, ws
, wt
);
12734 calculateMSACSR(ws
, wt
, FMAXW
, 2);
12736 binop(Iop_Max32Fx4
,
12742 case 0x01: { /* FMAX.D */
12743 DIP("FMAX.D w%d, w%d, w%d", wd
, ws
, wt
);
12744 calculateMSACSR(ws
, wt
, FMAXW
, 2);
12746 binop(Iop_Max64Fx2
,
12759 case 0x0F: { /* FMAX_A.df */
12761 case 0x00: { /* FMAX_A.W */
12762 DIP("FMAX_A.W w%d, w%d, w%d", wd
, ws
, wt
);
12763 calculateMSACSR(ws
, wt
, FMAXAW
, 2);
12764 t1
= newTemp(Ity_V128
);
12765 t2
= newTemp(Ity_V128
);
12766 t3
= newTemp(Ity_V128
);
12767 t4
= newTemp(Ity_V128
);
12771 binop(Iop_64HLtoV128
,
12772 mkU64(0x7FFFFFFF7FFFFFFF),
12773 mkU64(0x7FFFFFFF7FFFFFFF))));
12777 binop(Iop_64HLtoV128
,
12778 mkU64(0x7FFFFFFF7FFFFFFF),
12779 mkU64(0x7FFFFFFF7FFFFFFF))));
12781 binop(Iop_Max32Fx4
,
12782 mkexpr(t2
), mkexpr(t1
)));
12787 binop(Iop_CmpUN32Fx4
,
12792 binop(Iop_CmpEQ32Fx4
,
12801 binop(Iop_CmpUN32Fx4
,
12804 binop(Iop_CmpLT32Fx4
,
12810 binop(Iop_CmpUN32Fx4
,
12813 binop(Iop_CmpLT32Fx4
,
12817 binop(Iop_64HLtoV128
,
12818 mkU64(0x8000000080000000),
12819 mkU64(0x8000000080000000))));
12822 mkexpr(t3
), mkexpr(t4
)));
12826 case 0x01: { /* FMAX_A.D */
12827 DIP("FMAX_A.D w%d, w%d, w%d", wd
, ws
, wt
);
12828 calculateMSACSR(ws
, wt
, FMAXAD
, 2);
12829 t1
= newTemp(Ity_V128
);
12830 t2
= newTemp(Ity_V128
);
12831 t3
= newTemp(Ity_V128
);
12832 t4
= newTemp(Ity_V128
);
12836 binop(Iop_64HLtoV128
,
12837 mkU64(0x7FFFFFFFFFFFFFFF),
12838 mkU64(0x7FFFFFFFFFFFFFFF))));
12842 binop(Iop_64HLtoV128
,
12843 mkU64(0x7FFFFFFFFFFFFFFF),
12844 mkU64(0x7FFFFFFFFFFFFFFF))));
12846 binop(Iop_Max64Fx2
,
12847 mkexpr(t2
), mkexpr(t1
)));
12852 binop(Iop_CmpUN64Fx2
,
12857 binop(Iop_CmpEQ64Fx2
,
12866 binop(Iop_CmpUN64Fx2
,
12869 binop(Iop_CmpLT64Fx2
,
12875 binop(Iop_CmpUN64Fx2
,
12878 binop(Iop_CmpLT64Fx2
,
12882 binop(Iop_64HLtoV128
,
12883 mkU64(0x8000000000000000),
12884 mkU64(0x8000000000000000))));
12887 mkexpr(t3
), mkexpr(t4
)));
12905 static Int
msa_3R_1C(UInt cins
, UChar wd
, UChar ws
) /* 3R (0x1C) */
12907 IRTemp t1
, t2
, t3
, t4
, t5
, t6
;
12911 operation
= (cins
& 0x03C00000) >> 22;
12912 df
= (cins
& 0x00200000) >> 21;
12913 wt
= (cins
& 0x001F0000) >> 16;
12915 switch (operation
) {
12916 case 0x01: { /* FCOR.df */
12918 case 0x00: { /* FCOR.W */
12919 DIP("FCOR.W w%d, w%d, w%d", wd
, ws
, wt
);
12920 calculateMSACSR(ws
, wt
, FCORW
, 2);
12923 binop(Iop_CmpUN32Fx4
,
12929 case 0x01: { /* FCOR.D */
12930 DIP("FCOR.D w%d, w%d, w%d", wd
, ws
, wt
);
12931 calculateMSACSR(ws
, wt
, FCORD
, 2);
12934 binop(Iop_CmpUN64Fx2
,
12947 case 0x02: { /* FCUNE.df */
12949 case 0x00: { /* FCUNE.W */
12950 DIP("FCUNE.W w%d, w%d, w%d", wd
, ws
, wt
);
12951 calculateMSACSR(ws
, wt
, FCUNEW
, 2);
12954 binop(Iop_CmpEQ32Fx4
,
12960 case 0x01: { /* FCUNE.D */
12961 DIP("FCUNE.D w%d, w%d, w%d", wd
, ws
, wt
);
12962 calculateMSACSR(ws
, wt
, FCUNED
, 2);
12965 binop(Iop_CmpEQ64Fx2
,
12978 case 0x03: { /* FCNE.df */
12980 case 0x00: { /* FCNE.W */
12981 DIP("FCNE.W w%d, w%d, w%d", wd
, ws
, wt
);
12982 calculateMSACSR(ws
, wt
, FCNEW
, 2);
12986 binop(Iop_CmpEQ32Fx4
,
12989 binop(Iop_CmpUN32Fx4
,
12995 case 0x01: { /* FCNE.D */
12996 DIP("FCNE.D w%d, w%d, w%d", wd
, ws
, wt
);
12997 calculateMSACSR(ws
, wt
, FCNED
, 2);
13001 binop(Iop_CmpEQ64Fx2
,
13004 binop(Iop_CmpUN64Fx2
,
13017 case 0x04: { /* MUL_Q.df */
13019 case 0x00: { /* MUL_Q.H */
13020 DIP("MUL_Q.H w%d, w%d, w%d", wd
, ws
, wt
);
13021 t1
= newTemp(Ity_V128
);
13022 t2
= newTemp(Ity_V128
);
13023 t3
= newTemp(Ity_V128
);
13024 assign(t1
, getWReg(ws
));
13025 assign(t2
, getWReg(wt
));
13027 binop(Iop_QDMulHi16Sx8
,
13028 mkexpr(t1
), mkexpr(t2
)));
13029 putWReg(wd
, mkexpr(t3
));
13033 case 0x01: { /* MUL_Q.W */
13034 DIP("MUL_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13035 t1
= newTemp(Ity_V128
);
13036 t2
= newTemp(Ity_V128
);
13037 t3
= newTemp(Ity_V128
);
13038 assign(t1
, getWReg(ws
));
13039 assign(t2
, getWReg(wt
));
13041 binop(Iop_QDMulHi32Sx4
,
13042 mkexpr(t1
), mkexpr(t2
)));
13043 putWReg(wd
, mkexpr(t3
));
13054 case 0x05: { /* MADD_Q.df */
13056 case 0x00: { /* MADD_Q.W */
13057 DIP("MADD_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13058 t1
= newTemp(Ity_V128
);
13059 t2
= newTemp(Ity_V128
);
13060 t3
= newTemp(Ity_V128
);
13061 t4
= newTemp(Ity_V128
);
13062 t5
= newTemp(Ity_V128
);
13063 t6
= newTemp(Ity_V128
);
13065 binop(Iop_SarN32x4
,
13066 binop(Iop_InterleaveEvenLanes16x8
,
13071 binop(Iop_SarN32x4
,
13072 getWReg(ws
), mkU8(16)));
13074 binop(Iop_SarN32x4
,
13075 binop(Iop_InterleaveEvenLanes16x8
,
13080 binop(Iop_SarN32x4
,
13081 getWReg(wt
), mkU8(16)));
13084 binop(Iop_ShlN32x4
,
13085 binop(Iop_SarN32x4
,
13086 binop(Iop_InterleaveEvenLanes16x8
,
13092 mkexpr(t1
), mkexpr(t3
))));
13095 binop(Iop_ShlN32x4
,
13096 binop(Iop_SarN32x4
,
13101 mkexpr(t2
), mkexpr(t4
))));
13103 binop(Iop_InterleaveEvenLanes16x8
,
13104 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13105 mkexpr(t6
), mkU8(15)),
13106 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13107 mkexpr(t5
), mkU8(15))));
13111 case 0x01: { /* MADD_Q.W */
13112 DIP("MADD_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13113 t1
= newTemp(Ity_V128
);
13114 t2
= newTemp(Ity_V128
);
13115 t3
= newTemp(Ity_V128
);
13116 t4
= newTemp(Ity_V128
);
13117 t5
= newTemp(Ity_V128
);
13118 t6
= newTemp(Ity_V128
);
13120 binop(Iop_SarN64x2
,
13121 binop(Iop_InterleaveEvenLanes32x4
,
13126 binop(Iop_SarN64x2
,
13127 getWReg(ws
), mkU8(32)));
13129 binop(Iop_SarN64x2
,
13130 binop(Iop_InterleaveEvenLanes32x4
,
13135 binop(Iop_SarN64x2
,
13136 getWReg(wt
), mkU8(32)));
13139 binop(Iop_ShlN64x2
,
13140 binop(Iop_SarN64x2
,
13141 binop(Iop_InterleaveEvenLanes32x4
,
13146 binop(Iop_64HLtoV128
,
13148 unop(Iop_V128HIto64
,
13150 unop(Iop_V128HIto64
,
13159 binop(Iop_ShlN64x2
,
13160 binop(Iop_SarN64x2
,
13164 binop(Iop_64HLtoV128
,
13166 unop(Iop_V128HIto64
,
13168 unop(Iop_V128HIto64
,
13176 binop(Iop_InterleaveEvenLanes32x4
,
13177 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13178 mkexpr(t6
), mkU8(31)),
13179 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13180 mkexpr(t5
), mkU8(31))));
13191 case 0x06: { /* MSUB_Q.df */
13193 case 0x00: { /* MSUB_Q.H */
13194 DIP("MSUB_Q.H w%d, w%d, w%d", wd
, ws
, wt
);
13195 t1
= newTemp(Ity_V128
);
13196 t2
= newTemp(Ity_V128
);
13197 t3
= newTemp(Ity_V128
);
13198 t4
= newTemp(Ity_V128
);
13199 t5
= newTemp(Ity_V128
);
13200 t6
= newTemp(Ity_V128
);
13202 binop(Iop_SarN32x4
,
13203 binop(Iop_InterleaveEvenLanes16x8
,
13208 binop(Iop_SarN32x4
,
13209 getWReg(ws
), mkU8(16)));
13211 binop(Iop_SarN32x4
,
13212 binop(Iop_InterleaveEvenLanes16x8
,
13217 binop(Iop_SarN32x4
,
13218 getWReg(wt
), mkU8(16)));
13221 binop(Iop_ShlN32x4
,
13222 binop(Iop_SarN32x4
,
13223 binop(Iop_InterleaveEvenLanes16x8
,
13229 mkexpr(t1
), mkexpr(t3
))));
13232 binop(Iop_ShlN32x4
,
13233 binop(Iop_SarN32x4
,
13238 mkexpr(t2
), mkexpr(t4
))));
13240 binop(Iop_InterleaveEvenLanes16x8
,
13241 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13242 mkexpr(t6
), mkU8(15)),
13243 binop(Iop_QandQSarNnarrow32Sto16Sx4
,
13244 mkexpr(t5
), mkU8(15))));
13248 case 0x01: { /* MSUB_Q.W */
13249 DIP("MSUB_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13250 t1
= newTemp(Ity_V128
);
13251 t2
= newTemp(Ity_V128
);
13252 t3
= newTemp(Ity_V128
);
13253 t4
= newTemp(Ity_V128
);
13254 t5
= newTemp(Ity_V128
);
13255 t6
= newTemp(Ity_V128
);
13257 binop(Iop_SarN64x2
,
13258 binop(Iop_InterleaveEvenLanes32x4
,
13263 binop(Iop_SarN64x2
,
13264 getWReg(ws
), mkU8(32)));
13266 binop(Iop_SarN64x2
,
13267 binop(Iop_InterleaveEvenLanes32x4
,
13272 binop(Iop_SarN64x2
,
13273 getWReg(wt
), mkU8(32)));
13276 binop(Iop_ShlN64x2
,
13277 binop(Iop_SarN64x2
,
13278 binop(Iop_InterleaveEvenLanes32x4
,
13283 binop(Iop_64HLtoV128
,
13285 unop(Iop_V128HIto64
,
13287 unop(Iop_V128HIto64
,
13296 binop(Iop_ShlN64x2
,
13297 binop(Iop_SarN64x2
,
13301 binop(Iop_64HLtoV128
,
13303 unop(Iop_V128HIto64
,
13305 unop(Iop_V128HIto64
,
13313 binop(Iop_InterleaveEvenLanes32x4
,
13314 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13315 mkexpr(t6
), mkU8(31)),
13316 binop(Iop_QandQSarNnarrow64Sto32Sx2
,
13317 mkexpr(t5
), mkU8(31))));
13328 case 0x09: { /* FSOR.df */
13330 case 0x00: { /* FSOR.W */
13331 DIP("FSOR.W w%d, w%d, w%d", wd
, ws
, wt
);
13332 calculateMSACSR(ws
, wt
, FSORW
, 2);
13335 binop(Iop_CmpUN32Fx4
,
13341 case 0x01: { /* FSOR.D */
13342 DIP("FSOR.D w%d, w%d, w%d", wd
, ws
, wt
);
13343 calculateMSACSR(ws
, wt
, FSORD
, 2);
13346 binop(Iop_CmpUN64Fx2
,
13359 case 0x0A: { /* FSUNE.df */
13361 case 0x00: { /* FSUNE.W */
13362 DIP("FSUNE.W w%d, w%d, w%d", wd
, ws
, wt
);
13363 calculateMSACSR(ws
, wt
, FSUNEW
, 2);
13366 binop(Iop_CmpEQ32Fx4
,
13372 case 0x01: { /* FSUNE.D */
13373 DIP("FSUNE.D w%d, w%d, w%d", wd
, ws
, wt
);
13374 calculateMSACSR(ws
, wt
, FSUNED
, 2);
13377 binop(Iop_CmpEQ64Fx2
,
13390 case 0x0B: { /* FSNE.df */
13392 case 0x00: { /* FSNE.W */
13393 DIP("FSNE.W w%d, w%d, w%d", wd
, ws
, wt
);
13394 calculateMSACSR(ws
, wt
, FSNEW
, 2);
13398 binop(Iop_CmpEQ32Fx4
,
13401 binop(Iop_CmpUN32Fx4
,
13407 case 0x01: { /* FSNE.D */
13408 DIP("FSNE.D w%d, w%d, w%d", wd
, ws
, wt
);
13409 calculateMSACSR(ws
, wt
, FSNED
, 2);
13413 binop(Iop_CmpEQ64Fx2
,
13416 binop(Iop_CmpUN64Fx2
,
13429 case 0x0C: { /* MULR_Q.df */
13431 case 0x00: { /* MULR_Q.H */
13432 DIP("MULR_Q.H w%d, w%d, w%d", wd
, ws
, wt
);
13433 t1
= newTemp(Ity_V128
);
13434 t2
= newTemp(Ity_V128
);
13435 t3
= newTemp(Ity_V128
);
13436 assign(t1
, getWReg(ws
));
13437 assign(t2
, getWReg(wt
));
13438 assign(t3
, binop(Iop_QRDMulHi16Sx8
,
13439 mkexpr(t1
), mkexpr(t2
)));
13440 putWReg(wd
, mkexpr(t3
));
13444 case 0x01: { /* MULR_Q.W */
13445 DIP("MULR_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13446 t1
= newTemp(Ity_V128
);
13447 t2
= newTemp(Ity_V128
);
13448 t3
= newTemp(Ity_V128
);
13449 assign(t1
, getWReg(ws
));
13450 assign(t2
, getWReg(wt
));
13451 assign(t3
, binop(Iop_QRDMulHi32Sx4
,
13452 mkexpr(t1
), mkexpr(t2
)));
13453 putWReg(wd
, mkexpr(t3
));
13464 case 0x0D: { /* MADDR_Q.df */
13466 case 0x00: { /* MADDR_Q.W */
13467 DIP("MADDR_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13468 t1
= newTemp(Ity_V128
);
13469 t2
= newTemp(Ity_V128
);
13470 t3
= newTemp(Ity_V128
);
13471 t4
= newTemp(Ity_V128
);
13472 t5
= newTemp(Ity_V128
);
13473 t6
= newTemp(Ity_V128
);
13475 binop(Iop_SarN32x4
,
13476 binop(Iop_InterleaveEvenLanes16x8
,
13481 binop(Iop_SarN32x4
,
13482 getWReg(ws
), mkU8(16)));
13484 binop(Iop_SarN32x4
,
13485 binop(Iop_InterleaveEvenLanes16x8
,
13490 binop(Iop_SarN32x4
,
13491 getWReg(wt
), mkU8(16)));
13494 binop(Iop_ShlN32x4
,
13495 binop(Iop_SarN32x4
,
13496 binop(Iop_InterleaveEvenLanes16x8
,
13502 mkexpr(t1
), mkexpr(t3
))));
13505 binop(Iop_ShlN32x4
,
13506 binop(Iop_SarN32x4
,
13511 mkexpr(t2
), mkexpr(t4
))));
13513 binop(Iop_InterleaveEvenLanes16x8
,
13514 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13515 mkexpr(t6
), mkU8(15)),
13516 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13517 mkexpr(t5
), mkU8(15))));
13521 case 0x01: { /* MADDR_Q.D */
13522 DIP("MADDR_Q.D w%d, w%d, w%d", wd
, ws
, wt
);
13523 t1
= newTemp(Ity_V128
);
13524 t2
= newTemp(Ity_V128
);
13525 t3
= newTemp(Ity_V128
);
13526 t4
= newTemp(Ity_V128
);
13527 t5
= newTemp(Ity_V128
);
13528 t6
= newTemp(Ity_V128
);
13530 binop(Iop_SarN64x2
,
13531 binop(Iop_InterleaveEvenLanes32x4
,
13536 binop(Iop_SarN64x2
,
13537 getWReg(ws
), mkU8(32)));
13539 binop(Iop_SarN64x2
,
13540 binop(Iop_InterleaveEvenLanes32x4
,
13545 binop(Iop_SarN64x2
,
13546 getWReg(wt
), mkU8(32)));
13549 binop(Iop_ShlN64x2
,
13550 binop(Iop_SarN64x2
,
13551 binop(Iop_InterleaveEvenLanes32x4
,
13556 binop(Iop_64HLtoV128
,
13558 unop(Iop_V128HIto64
,
13560 unop(Iop_V128HIto64
,
13569 binop(Iop_ShlN64x2
,
13570 binop(Iop_SarN64x2
,
13574 binop(Iop_64HLtoV128
,
13576 unop(Iop_V128HIto64
,
13578 unop(Iop_V128HIto64
,
13586 binop(Iop_InterleaveEvenLanes32x4
,
13587 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13588 mkexpr(t6
), mkU8(31)),
13589 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13590 mkexpr(t5
), mkU8(31))));
13601 case 0x0E: { /* MSUBR_Q.df */
13603 case 0x00: { /* MSUBR_Q.W */
13604 DIP("MSUBR_Q.W w%d, w%d, w%d", wd
, ws
, wt
);
13605 t1
= newTemp(Ity_V128
);
13606 t2
= newTemp(Ity_V128
);
13607 t3
= newTemp(Ity_V128
);
13608 t4
= newTemp(Ity_V128
);
13609 t5
= newTemp(Ity_V128
);
13610 t6
= newTemp(Ity_V128
);
13612 binop(Iop_SarN32x4
,
13613 binop(Iop_InterleaveEvenLanes16x8
,
13618 binop(Iop_SarN32x4
,
13619 getWReg(ws
), mkU8(16)));
13621 binop(Iop_SarN32x4
,
13622 binop(Iop_InterleaveEvenLanes16x8
,
13627 binop(Iop_SarN32x4
,
13628 getWReg(wt
), mkU8(16)));
13631 binop(Iop_ShlN32x4
,
13632 binop(Iop_SarN32x4
,
13633 binop(Iop_InterleaveEvenLanes16x8
,
13639 mkexpr(t1
), mkexpr(t3
))));
13642 binop(Iop_ShlN32x4
,
13643 binop(Iop_SarN32x4
,
13648 mkexpr(t2
), mkexpr(t4
))));
13650 binop(Iop_InterleaveEvenLanes16x8
,
13651 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13652 mkexpr(t6
), mkU8(15)),
13653 binop(Iop_QandQRSarNnarrow32Sto16Sx4
,
13654 mkexpr(t5
), mkU8(15))));
13658 case 0x01: { /* MSUBR_Q.D */
13659 DIP("MSUBR_Q.D w%d, w%d, w%d", wd
, ws
, wt
);
13660 t1
= newTemp(Ity_V128
);
13661 t2
= newTemp(Ity_V128
);
13662 t3
= newTemp(Ity_V128
);
13663 t4
= newTemp(Ity_V128
);
13664 t5
= newTemp(Ity_V128
);
13665 t6
= newTemp(Ity_V128
);
13667 binop(Iop_SarN64x2
,
13668 binop(Iop_InterleaveEvenLanes32x4
,
13673 binop(Iop_SarN64x2
,
13674 getWReg(ws
), mkU8(32)));
13676 binop(Iop_SarN64x2
,
13677 binop(Iop_InterleaveEvenLanes32x4
,
13682 binop(Iop_SarN64x2
,
13683 getWReg(wt
), mkU8(32)));
13686 binop(Iop_ShlN64x2
,
13687 binop(Iop_SarN64x2
,
13688 binop(Iop_InterleaveEvenLanes32x4
,
13693 binop(Iop_64HLtoV128
,
13695 unop(Iop_V128HIto64
,
13697 unop(Iop_V128HIto64
,
13706 binop(Iop_ShlN64x2
,
13707 binop(Iop_SarN64x2
,
13711 binop(Iop_64HLtoV128
,
13713 unop(Iop_V128HIto64
,
13715 unop(Iop_V128HIto64
,
13723 binop(Iop_InterleaveEvenLanes32x4
,
13724 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13725 mkexpr(t6
), mkU8(31)),
13726 binop(Iop_QandQRSarNnarrow64Sto32Sx2
,
13727 mkexpr(t5
), mkU8(31))));
13745 static Int
msa_ELM(UInt cins
, UChar wd
, UChar ws
) /* ELM (0x19) */
13747 IRTemp t1
, t2
, t3
, t4
, t5
;
13752 operation
= (cins
& 0x03C00000) >> 22;
13753 ty
= mode64
? Ity_I64
: Ity_I32
;
13755 switch ((cins
& 0x03FF0000) >> 16) {
13756 case 0x07E: /* CFCMSA */
13757 DIP("CFCMSA r%d, c%d", wd
, ws
);
13760 case 0: { /* MSAIR */
13762 t1
= newTemp(Ity_I32
);
13763 /* IRExpr_BBPTR() =>
13764 Need to pass pointer to
13765 guest state to helper. */
13766 d
= unsafeIRDirty_1_N(t1
, 0,
13767 "mips_dirtyhelper_get_MSAIR",
13768 &mips_dirtyhelper_get_MSAIR
,
13770 /* d->nFxState = 0; */
13771 stmt(IRStmt_Dirty(d
));
13773 mkWidenFrom32(ty
, mkexpr(t1
), True
));
13777 case 1: /* MSACSR */
13779 mkWidenFrom32(ty
, getMSACSR(), True
));
13784 mkWidenFrom32(ty
, mkU32(0), False
));
13790 case 0x03E: /* CTCMSA */
13791 DIP("CTCMSA r%d, c%d", ws
, wd
);
13793 if (wd
== 1) { /* MSACSR */
13795 binop(Iop_And32
, mkNarrowTo32(ty
, getIReg(ws
)),
13796 mkU32(0x1FFFFFF)));
13801 case 0x0BE: /* MOVE.V */
13802 DIP("MOVE.V w%d, w%d", ws
, wd
);
13803 putWReg(wd
, getWReg(ws
));
13807 df
= (cins
& 0x003F0000) >> 16;
13809 if ((df
& 0x38) == 0x38) { // 11100n; dw
13812 } else if ((df
& 0x30) == 0x30) { // 1100nn; w
13815 } else if ((df
& 0x20) == 0x20) { // 100nnn; hw
13818 } else if ((df
& 0x00) == 0x00) { // 00nnnn; b
13823 switch (operation
) {
13824 case 0x00: /* SLDI.df */
13826 case 0x00: /* SLDI.B */
13827 DIP("SLDI.B w%d, w%d[%d]", wd
, ws
, n
);
13828 t1
= newTemp(Ity_V128
);
13829 t2
= newTemp(Ity_V128
);
13838 (16 - n
) << 3 : 0)));
13840 binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
13843 case 0x20: /* SLDI.H */
13844 DIP("SLDI.H w%d, w%d[%d]", wd
, ws
, n
);
13847 putWReg(wd
, getWReg(ws
));
13849 t1
= newTemp(Ity_V128
);
13850 t2
= newTemp(Ity_V128
);
13852 binop(Iop_ShrN64x2
,
13856 binop(Iop_ShlN64x2
,
13858 mkU8((8 - n
) << 3)));
13867 case 0x30: /* SLDI.W */
13868 DIP("SLDI.W w%d, w%d[%d]", wd
, ws
, n
);
13871 putWReg(wd
, getWReg(ws
));
13873 t1
= newTemp(Ity_V128
);
13874 t2
= newTemp(Ity_V128
);
13876 binop(Iop_ShrN32x4
,
13880 binop(Iop_ShlN32x4
,
13882 mkU8((4 - n
) << 3)));
13891 case 0x38: /* SLDI.D */
13892 DIP("SLDI.D w%d, w%d[%d]", wd
, ws
, n
);
13895 putWReg(wd
, getWReg(ws
));
13897 t1
= newTemp(Ity_V128
);
13898 t2
= newTemp(Ity_V128
);
13900 binop(Iop_ShrN16x8
,
13904 binop(Iop_ShlN16x8
,
13906 mkU8((2 - n
) << 3)));
13921 case 0x01: /* SPLATI.df */
13923 case 0x00: { /* SPLATI.B */
13924 DIP("SPLATI.B w%d, w%d[%d]", wd
, ws
, n
);
13925 t1
= newTemp(Ity_V128
);
13926 t2
= newTemp(Ity_V128
);
13927 t3
= newTemp(Ity_V128
);
13928 t4
= newTemp(Ity_V128
);
13932 binop(Iop_InterleaveOddLanes8x16
,
13937 binop(Iop_InterleaveEvenLanes8x16
,
13945 binop(Iop_InterleaveOddLanes16x8
,
13946 mkexpr(t1
), mkexpr(t1
)));
13949 binop(Iop_InterleaveEvenLanes16x8
,
13950 mkexpr(t1
), mkexpr(t1
)));
13956 binop(Iop_InterleaveOddLanes32x4
,
13957 mkexpr(t2
), mkexpr(t2
)));
13960 binop(Iop_InterleaveEvenLanes32x4
,
13961 mkexpr(t2
), mkexpr(t2
)));
13967 binop(Iop_InterleaveHI64x2
,
13968 mkexpr(t3
), mkexpr(t3
)));
13971 binop(Iop_InterleaveLO64x2
,
13972 mkexpr(t3
), mkexpr(t3
)));
13974 putWReg(wd
, mkexpr(t4
));
13978 case 0x20: { /* SPLATI.H */
13979 DIP("SPLATI.H w%d, w%d[%d]", wd
, ws
, n
);
13980 t1
= newTemp(Ity_V128
);
13981 t2
= newTemp(Ity_V128
);
13982 t3
= newTemp(Ity_V128
);
13986 binop(Iop_InterleaveOddLanes16x8
,
13991 binop(Iop_InterleaveEvenLanes16x8
,
13999 binop(Iop_InterleaveOddLanes32x4
,
14000 mkexpr(t1
), mkexpr(t1
)));
14003 binop(Iop_InterleaveEvenLanes32x4
,
14004 mkexpr(t1
), mkexpr(t1
)));
14010 binop(Iop_InterleaveHI64x2
,
14011 mkexpr(t2
), mkexpr(t2
)));
14014 binop(Iop_InterleaveLO64x2
,
14015 mkexpr(t2
), mkexpr(t2
)));
14017 putWReg(wd
, mkexpr(t3
));
14021 case 0x30: { /* SPLATI.W */
14022 DIP("SPLATI.W w%d, w%d[%d]", wd
, ws
, n
);
14023 t1
= newTemp(Ity_V128
);
14024 t2
= newTemp(Ity_V128
);
14025 t3
= newTemp(Ity_V128
);
14026 assign(t1
, getWReg(ws
));
14030 binop(Iop_InterleaveOddLanes32x4
,
14031 mkexpr(t1
), mkexpr(t1
)));
14034 binop(Iop_InterleaveEvenLanes32x4
,
14035 mkexpr(t1
), mkexpr(t1
)));
14041 binop(Iop_InterleaveHI64x2
,
14042 mkexpr(t2
), mkexpr(t2
)));
14045 binop(Iop_InterleaveLO64x2
,
14046 mkexpr(t2
), mkexpr(t2
)));
14048 putWReg(wd
, mkexpr(t3
));
14052 case 0x38: /* SPLATI.D */
14053 DIP("SPLATI.D w%d, w%d[%d]", wd
, ws
, n
);
14054 t1
= newTemp(Ity_V128
);
14055 t3
= newTemp(Ity_V128
);
14056 assign(t1
, getWReg(ws
));
14060 binop(Iop_InterleaveHI64x2
,
14061 mkexpr(t1
), mkexpr(t1
)));
14064 binop(Iop_InterleaveLO64x2
,
14065 mkexpr(t1
), mkexpr(t1
)));
14067 putWReg(wd
, mkexpr(t3
));
14076 case 0x02: /* COPY_S.df */
14078 case 0x00: /* COPY_S.B */
14079 DIP("COPY_S.B r%d, w%d[%d]", wd
, ws
, n
);
14080 t1
= newTemp(Ity_I8
);
14157 unop(Iop_V128HIto64
,
14166 unop(Iop_V128HIto64
,
14175 unop(Iop_V128HIto64
,
14184 unop(Iop_V128HIto64
,
14193 unop(Iop_V128HIto64
,
14202 unop(Iop_V128HIto64
,
14211 unop(Iop_V128HIto64
,
14220 unop(Iop_V128HIto64
,
14226 unop(mode64
? Iop_8Sto64
: Iop_8Sto32
,
14230 case 0x20: /* COPY_S.H */
14231 DIP("COPY_S.H r%d, w%d[%d]", wd
, ws
, n
);
14232 t1
= newTemp(Ity_I16
);
14271 unop(Iop_V128HIto64
,
14279 unop(Iop_V128HIto64
,
14287 unop(Iop_V128HIto64
,
14295 unop(Iop_V128HIto64
,
14301 unop(mode64
? Iop_16Sto64
: Iop_16Sto32
,
14305 case 0x30: /* COPY_S.W */
14306 DIP("COPY_S.W r%d, w%d[%d]", wd
, ws
, n
);
14318 t2
= newTemp(Ity_I64
);
14320 unop(Iop_V128to64
, getWReg(ws
)));
14329 t2
= newTemp(Ity_I64
);
14331 unop(Iop_V128HIto64
,
14341 t2
= newTemp(Ity_I64
);
14343 unop(Iop_V128HIto64
,
14358 case 0x38: /* COPY_S.D */
14360 DIP("COPY_S.D r%d, w%d[%d]", wd
, ws
, n
);
14371 unop(Iop_V128HIto64
,
14387 case 0x03: { /* COPY_U.df */
14389 case 0x00: /* COPY_U.B */
14390 DIP("COPY_U.B r%d, w%d[%d]", wd
, ws
, n
);
14391 t1
= newTemp(Ity_I8
);
14471 unop(Iop_V128HIto64
,
14480 unop(Iop_V128HIto64
,
14489 unop(Iop_V128HIto64
,
14498 unop(Iop_V128HIto64
,
14507 unop(Iop_V128HIto64
,
14516 unop(Iop_V128HIto64
,
14525 unop(Iop_V128HIto64
,
14534 unop(Iop_V128HIto64
,
14540 unop(mode64
? Iop_8Uto64
: Iop_8Uto32
,
14544 case 0x20: /* COPY_U.H */
14545 DIP("COPY_U.H r%d, w%d[%d]", wd
, ws
, n
);
14546 t1
= newTemp(Ity_I16
);
14585 unop(Iop_V128HIto64
,
14593 unop(Iop_V128HIto64
,
14601 unop(Iop_V128HIto64
,
14609 unop(Iop_V128HIto64
,
14615 unop(mode64
? Iop_16Uto64
: Iop_16Uto32
,
14619 case 0x30: /* COPY_U.W */
14620 DIP("COPY_U.W r%d, w%d[%d]", wd
, ws
, n
);
14632 t2
= newTemp(Ity_I64
);
14644 t2
= newTemp(Ity_I64
);
14646 unop(Iop_V128HIto64
,
14656 t2
= newTemp(Ity_I64
);
14658 unop(Iop_V128HIto64
,
14680 case 0x04: { /* INSERT.df */
14681 t5
= newTemp(Ity_I64
);
14685 assign(t5
, mode64
? getIReg(ws
) :
14686 unop(Iop_32Uto64
, getIReg(ws
)));
14688 if (df
== 0x38) { /* INSERT.D */
14690 DIP("INSERT.D w%d[%d], r%d", wd
, n
, ws
);
14694 binop(Iop_64HLtoV128
,
14695 unop(Iop_V128HIto64
,
14700 binop(Iop_64HLtoV128
,
14711 t1
= newTemp(Ity_I64
);
14712 t2
= newTemp(Ity_I64
);
14713 assign(t1
, unop(Iop_V128to64
, getWReg(wd
)));
14714 assign(t2
, unop(Iop_V128HIto64
, getWReg(wd
)));
14718 case 0x00: /* INSERT.B */
14719 DIP("INSERT.B w%d[%d], r%d", wd
, n
, ws
);
14731 case 0x20: /* INSERT.H */
14732 DIP("INSERT.H w%d[%d], r%d", wd
, n
, ws
);
14744 case 0x30: /* INSERT.W */
14745 DIP("INSERT.W w%d[%d], r%d", wd
, n
, ws
);
14754 mask
= 0xFFFFFFFFull
;
14762 t4
= newTemp(Ity_I64
);
14767 t3
= newTemp(Ity_I64
);
14776 binop(Iop_And64
, mkexpr(*src
), mkU64(~mask
)),
14778 binop(Iop_Shl64
, mkexpr(t5
), mkU8(n
)),
14781 binop(Iop_64HLtoV128
, mkexpr(t4
), mkexpr(t3
)));
14785 case 0x05: { /* INSVE.df */
14787 case 0x00: { /* INSVE.B */
14788 DIP("INSVE.B w%d[%d], w%d[0]", wd
, n
, ws
);
14789 t1
= newTemp(Ity_V128
);
14790 t2
= newTemp(Ity_V128
);
14791 assign(t1
, getWReg(wd
));
14792 assign(t2
, getWReg(ws
));
14796 for (i
= 0; i
< 16; i
++) {
14797 tmp
[i
] = newTemp(Ity_I8
);
14801 binop(Iop_GetElem8x16
,
14802 mkexpr(t2
), mkU8(0x0)));
14805 binop(Iop_GetElem8x16
,
14806 mkexpr(t1
), mkU8(i
)));
14810 binop(Iop_64HLtoV128
,
14811 binop(Iop_32HLto64
,
14812 binop(Iop_16HLto32
,
14819 binop(Iop_16HLto32
,
14826 binop(Iop_32HLto64
,
14827 binop(Iop_16HLto32
,
14834 binop(Iop_16HLto32
,
14840 mkexpr(tmp
[0]))))));
14844 case 0x20: { /* INSVE.H */
14845 DIP("INSVE.H w%d[%d], r%d[0]", wd
, n
, ws
);
14846 t1
= newTemp(Ity_V128
);
14847 t2
= newTemp(Ity_V128
);
14848 assign(t1
, getWReg(wd
));
14849 assign(t2
, getWReg(ws
));
14853 for (i
= 0; i
< 8; i
++) {
14854 tmp
[i
] = newTemp(Ity_I16
);
14858 binop(Iop_GetElem16x8
,
14859 mkexpr(t2
), mkU8(0x0)));
14862 binop(Iop_GetElem16x8
,
14863 mkexpr(t1
), mkU8(i
)));
14867 binop(Iop_64HLtoV128
,
14868 binop(Iop_32HLto64
,
14869 binop(Iop_16HLto32
,
14872 binop(Iop_16HLto32
,
14875 binop(Iop_32HLto64
,
14876 binop(Iop_16HLto32
,
14879 binop(Iop_16HLto32
,
14881 mkexpr(tmp
[0])))));
14885 case 0x30: { /* INSVE.W */
14886 DIP("INSVE.W w%d[%d], r%d[0]", wd
, n
, ws
);
14887 t1
= newTemp(Ity_V128
);
14888 t2
= newTemp(Ity_V128
);
14889 assign(t1
, getWReg(wd
));
14890 assign(t2
, getWReg(ws
));
14894 for (i
= 0; i
< 4; i
++) {
14895 tmp
[i
] = newTemp(Ity_I32
);
14899 binop(Iop_GetElem32x4
,
14900 mkexpr(t2
), mkU8(0x0)));
14903 binop(Iop_GetElem32x4
,
14904 mkexpr(t1
), mkU8(i
)));
14908 binop(Iop_64HLtoV128
,
14909 binop(Iop_32HLto64
,
14912 binop(Iop_32HLto64
,
14918 case 0x38: { /* INSVE.D */
14919 DIP("INSVE.D w%d[%d], r%d[0]", wd
, n
, ws
);
14920 t1
= newTemp(Ity_V128
);
14921 t2
= newTemp(Ity_V128
);
14922 assign(t1
, getWReg(wd
));
14923 assign(t2
, getWReg(ws
));
14927 for (i
= 0; i
< 2; i
++) {
14928 tmp
[i
] = newTemp(Ity_I64
);
14932 binop(Iop_GetElem64x2
,
14933 mkexpr(t2
), mkU8(0x0)));
14936 binop(Iop_GetElem64x2
,
14937 mkexpr(t1
), mkU8(i
)));
14941 binop(Iop_64HLtoV128
,
14942 mkexpr(tmp
[1]), mkexpr(tmp
[0])));
14958 static Int
msa_VEC(UInt cins
, UChar wd
, UChar ws
) /* VEC */
14964 vassert((cins
& 0x03000000) == 0);
14966 operation
= (cins
& 0x03E00000) >> 21;
14967 wt
= (cins
& 0x001F0000) >> 16;
14969 switch (operation
) {
14970 case 0x00: { /* AND.V */
14971 DIP("AND.V w%d, w%d, w%d", wd
, ws
, wt
);
14972 t1
= newTemp(Ity_V128
);
14973 t2
= newTemp(Ity_V128
);
14974 t3
= newTemp(Ity_V128
);
14975 assign(t1
, getWReg(ws
));
14976 assign(t2
, getWReg(wt
));
14977 assign(t3
, binop(Iop_AndV128
, mkexpr(t1
), mkexpr(t2
)));
14978 putWReg(wd
, mkexpr(t3
));
14982 case 0x01: { /* OR.V */
14983 DIP("OR.V w%d, w%d, w%d", wd
, ws
, wt
);
14984 t1
= newTemp(Ity_V128
);
14985 t2
= newTemp(Ity_V128
);
14986 t3
= newTemp(Ity_V128
);
14987 assign(t1
, getWReg(ws
));
14988 assign(t2
, getWReg(wt
));
14989 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
14990 putWReg(wd
, mkexpr(t3
));
14994 case 0x02: { /* NOR.V */
14995 DIP("NOR.V w%d, w%d, w%d", wd
, ws
, wt
);
14996 t1
= newTemp(Ity_V128
);
14997 t2
= newTemp(Ity_V128
);
14998 t3
= newTemp(Ity_V128
);
14999 assign(t1
, getWReg(ws
));
15000 assign(t2
, getWReg(wt
));
15003 binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
))));
15004 putWReg(wd
, mkexpr(t3
));
15008 case 0x03: { /* XOR.V */
15009 DIP("XOR.V w%d, w%d, w%d", wd
, ws
, wt
);
15010 t1
= newTemp(Ity_V128
);
15011 t2
= newTemp(Ity_V128
);
15012 t3
= newTemp(Ity_V128
);
15013 assign(t1
, getWReg(ws
));
15014 assign(t2
, getWReg(wt
));
15015 assign(t3
, binop(Iop_XorV128
, mkexpr(t1
), mkexpr(t2
)));
15016 putWReg(wd
, mkexpr(t3
));
15020 case 0x04: { /* BMNZ (ws AND wt) OR (wd AND NOT wt) */
15021 DIP("BMNZ.V w%d, w%d, w%d", wd
, ws
, wt
);
15022 t1
= newTemp(Ity_V128
);
15023 t2
= newTemp(Ity_V128
);
15024 t3
= newTemp(Ity_V128
);
15027 getWReg(ws
), getWReg(wt
)));
15031 unop(Iop_NotV128
, getWReg(wt
))));
15032 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
15033 putWReg(wd
, mkexpr(t3
));
15037 case 0x05: { /* BMZ.V (ws AND NOT wt) OR (wd AND wt) */
15038 DIP("BMZ.V w%d, w%d, w%d", wd
, ws
, wt
);
15039 t1
= newTemp(Ity_V128
);
15040 t2
= newTemp(Ity_V128
);
15041 t3
= newTemp(Ity_V128
);
15044 getWReg(wd
), getWReg(wt
)));
15048 unop(Iop_NotV128
, getWReg(wt
))));
15049 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
15050 putWReg(wd
, mkexpr(t3
));
15054 case 0x06: { /* BSEL (ws AND NOT wd) OR (wt AND wd) */
15055 DIP("BSEL.V w%d, w%d, w%d", wd
, ws
, wt
);
15056 t1
= newTemp(Ity_V128
);
15057 t2
= newTemp(Ity_V128
);
15058 t3
= newTemp(Ity_V128
);
15061 getWReg(wd
), getWReg(wt
)));
15065 unop(Iop_NotV128
, getWReg(wd
))));
15066 assign(t3
, binop(Iop_OrV128
, mkexpr(t1
), mkexpr(t2
)));
15067 putWReg(wd
, mkexpr(t3
));
15078 static Int
msa_2R(UInt cins
, UChar wd
, UChar ws
) /* 2R */
15080 IRTemp t1
, t2
, t3
, t4
;
15085 vassert((cins
& 0x00200000) == 0);
15087 operation
= (cins
& 0x03FC0000) >> 18;
15088 df
= (cins
& 0x00030000) >> 16;
15089 ty
= mode64
? Ity_I64
: Ity_I32
;
15091 switch (operation
) {
15092 case 0xC0: { /* FILL.df */
15093 t1
= newTemp(Ity_I64
);
15096 case 0x00: /* FILL.B */
15097 DIP("FILL.B w%d, r%d", wd
, ws
);
15098 t2
= newTemp(Ity_I32
);
15099 t3
= newTemp(Ity_I16
);
15100 t4
= newTemp(Ity_I8
);
15101 assign(t4
, mkNarrowTo8(ty
, getIReg(ws
)));
15103 binop(Iop_8HLto16
, mkexpr(t4
), mkexpr(t4
)));
15105 binop(Iop_16HLto32
, mkexpr(t3
), mkexpr(t3
)));
15107 binop(Iop_32HLto64
, mkexpr(t2
), mkexpr(t2
)));
15110 case 0x01: /* FILL.H */
15111 DIP("FILL.H w%d, r%d", wd
, ws
);
15112 t2
= newTemp(Ity_I32
);
15113 t3
= newTemp(Ity_I16
);
15114 assign(t3
, mkNarrowTo16(ty
, getIReg(ws
)));
15116 binop(Iop_16HLto32
, mkexpr(t3
), mkexpr(t3
)));
15118 binop(Iop_32HLto64
, mkexpr(t2
), mkexpr(t2
)));
15121 case 0x02: /* FILL.W */
15122 DIP("FILL.W w%d, r%d", wd
, ws
);
15123 t2
= newTemp(Ity_I32
);
15124 assign(t2
, mkNarrowTo32(ty
, getIReg(ws
)));
15126 binop(Iop_32HLto64
, mkexpr(t2
), mkexpr(t2
)));
15129 case 0x03: /* FILL.D */
15131 DIP("FILL.W w%d, r%d", wd
, ws
);
15132 t2
= newTemp(Ity_I32
);
15133 assign(t1
, getIReg(ws
));
15145 binop(Iop_64HLtoV128
, mkexpr(t1
), mkexpr(t1
)));
15149 case 0xC1: { /* PCNT.df */
15151 case 0x00: /* PCNT.B */
15152 DIP("PCNT.B w%d, r%d", wd
, ws
);
15154 unop(Iop_Cnt8x16
, getWReg(ws
)));
15157 case 0x01: /* PCNT.H */
15158 DIP("PCNT.H w%d, r%d", wd
, ws
);
15159 t1
= newTemp(Ity_V128
);
15160 t2
= newTemp(Ity_V128
);
15161 assign(t1
, unop(Iop_Cnt8x16
, getWReg(ws
)));
15166 binop(Iop_64HLtoV128
,
15167 mkU64(0x00FF00FF00FF00FFULL
),
15168 mkU64(0x00FF00FF00FF00FFULL
))),
15170 binop(Iop_ShrN16x8
,
15171 mkexpr(t1
), mkU8(8)),
15172 binop(Iop_64HLtoV128
,
15173 mkU64(0x00FF00FF00FF00FFULL
),
15174 mkU64(0x00FF00FF00FF00FFULL
)))));
15175 putWReg(wd
, mkexpr(t2
));
15178 case 0x02: /* PCNT.W */
15179 DIP("PCNT.W w%d, r%d", wd
, ws
);
15180 t1
= newTemp(Ity_V128
);
15181 t2
= newTemp(Ity_V128
);
15182 t3
= newTemp(Ity_V128
);
15183 assign(t1
, unop(Iop_Cnt8x16
, getWReg(ws
)));
15188 binop(Iop_64HLtoV128
,
15189 mkU64(0x00FF00FF00FF00FFULL
),
15190 mkU64(0x00FF00FF00FF00FFULL
))),
15192 binop(Iop_ShrN32x4
,
15193 mkexpr(t1
), mkU8(8)),
15194 binop(Iop_64HLtoV128
,
15195 mkU64(0x00FF00FF00FF00FFULL
),
15196 mkU64(0x00FF00FF00FF00FFULL
)))));
15201 binop(Iop_64HLtoV128
,
15202 mkU64(0x0000FFFF0000FFFFULL
),
15203 mkU64(0x0000FFFF0000FFFFULL
))),
15205 binop(Iop_ShrN32x4
,
15206 mkexpr(t2
), mkU8(16)),
15207 binop(Iop_64HLtoV128
,
15208 mkU64(0x0000FFFF0000FFFFULL
),
15209 mkU64(0x0000FFFF0000FFFFULL
)))));
15210 putWReg(wd
, mkexpr(t3
));
15213 case 0x03: /* PCNT.D */
15214 DIP("PCNT.D w%d, r%d", wd
, ws
);
15215 t1
= newTemp(Ity_V128
);
15216 t2
= newTemp(Ity_V128
);
15217 t3
= newTemp(Ity_V128
);
15218 t4
= newTemp(Ity_V128
);;
15219 assign(t1
, unop(Iop_Cnt8x16
, getWReg(ws
)));
15224 binop(Iop_64HLtoV128
,
15225 mkU64(0x00FF00FF00FF00FFULL
),
15226 mkU64(0x00FF00FF00FF00FFULL
))),
15228 binop(Iop_ShrN64x2
,
15229 mkexpr(t1
), mkU8(8)),
15230 binop(Iop_64HLtoV128
,
15231 mkU64(0x00FF00FF00FF00FFULL
),
15232 mkU64(0x00FF00FF00FF00FFULL
)))));
15237 binop(Iop_64HLtoV128
,
15238 mkU64(0x0000FFFF0000FFFFULL
),
15239 mkU64(0x0000FFFF0000FFFFULL
))),
15241 binop(Iop_ShrN64x2
,
15242 mkexpr(t2
), mkU8(16)),
15243 binop(Iop_64HLtoV128
,
15244 mkU64(0x0000FFFF0000FFFFULL
),
15245 mkU64(0x0000FFFF0000FFFFULL
)))));
15250 binop(Iop_64HLtoV128
,
15251 mkU64(0x00000000FFFFFFFFULL
),
15252 mkU64(0x00000000FFFFFFFFULL
))),
15254 binop(Iop_ShrN64x2
,
15255 mkexpr(t3
), mkU8(32)),
15256 binop(Iop_64HLtoV128
,
15257 mkU64(0x00000000FFFFFFFFULL
),
15258 mkU64(0x00000000FFFFFFFFULL
)))));
15259 putWReg(wd
, mkexpr(t4
));
15269 case 0xC2: { /* NLOC.df */
15271 case 0x00: /* NLOC.B */
15272 DIP("NLOC.B w%d, w%d", wd
, ws
);
15274 unop(Iop_Cls8x16
, getWReg(ws
)));
15277 case 0x01: /* NLOC.H */
15278 DIP("NLOC.H w%d, w%d", wd
, ws
);
15280 unop(Iop_Cls16x8
, getWReg(ws
)));
15283 case 0x02: /* NLOC.W */
15284 DIP("NLOC.W w%d, w%d", wd
, ws
);
15286 unop(Iop_Cls32x4
, getWReg(ws
)));
15289 case 0x03: /* NLOC.D */
15290 DIP("NLOC.D w%d, w%d", wd
, ws
);
15291 t1
= newTemp(Ity_V128
);
15292 assign(t1
, unop(Iop_NotV128
, getWReg(ws
)));
15293 putWReg(wd
, unop(Iop_Clz64x2
, mkexpr(t1
)));
15303 case 0xC3: { /* NLZC.df */
15305 case 0x00: /* NLZC.B */
15306 DIP("NLZC.W w%d, w%d", wd
, ws
);
15308 unop(Iop_Clz8x16
, getWReg(ws
)));
15311 case 0x01: /* NLZC.H */
15312 DIP("NLZC.H w%d, w%d", wd
, ws
);
15314 unop(Iop_Clz16x8
, getWReg(ws
)));
15317 case 0x02: /* NLZC.W */
15318 DIP("NLZC.W w%d, w%d", wd
, ws
);
15320 unop(Iop_Clz32x4
, getWReg(ws
)));
15323 case 0x03: {/* NLZC.D */
15325 unop(Iop_Clz64x2
, getWReg(ws
)));
15343 static Int
msa_2RF(UInt cins
, UChar wd
, UChar ws
) /* 2RF */
15345 IRTemp t1
, t2
, t3
, t4
, t5
;
15349 operation
= (cins
& 0x03FE0000) >> 17;
15350 df
= (cins
& 0x00010000) >> 16;
15351 wt
= (cins
& 0x001F0000) >> 16;
15353 switch (operation
) {
15355 case 0x190: { /* FCLASS.df */
15356 IRTemp t0
= newTemp(Ity_V128
);
15357 t1
= newTemp(Ity_V128
);
15358 t2
= newTemp(Ity_V128
);
15359 t3
= newTemp(Ity_V128
);
15360 t4
= newTemp(Ity_V128
);
15361 t5
= newTemp(Ity_V128
);
15364 case 0x00: { /* FCLASS.W */
15365 DIP("FCLASS.W w%d, w%d", wd
, ws
);
15367 binop(Iop_CmpEQ32x4
,
15370 binop(Iop_64HLtoV128
,
15371 mkU64(0x7F8000007F800000ull
),
15372 mkU64(0x7F8000007F800000ull
))),
15373 binop(Iop_64HLtoV128
,
15374 mkU64(0ull), mkU64(0ull))));
15376 binop(Iop_CmpEQ32x4
,
15379 binop(Iop_64HLtoV128
,
15380 mkU64(0x7F8000007F800000ull
),
15381 mkU64(0x7F8000007F800000ull
))),
15382 binop(Iop_64HLtoV128
,
15383 mkU64(0x7F8000007F800000ull
),
15384 mkU64(0x7F8000007F800000ull
))));
15386 binop(Iop_SarN32x4
,
15387 getWReg(ws
), mkU8(31)));
15389 binop(Iop_CmpEQ32x4
,
15392 binop(Iop_64HLtoV128
,
15393 mkU64(0x0040000000400000ull
),
15394 mkU64(0x0040000000400000ull
))),
15395 binop(Iop_64HLtoV128
,
15396 mkU64(0x0040000000400000ull
),
15397 mkU64(0x0040000000400000ull
))));
15399 binop(Iop_CmpEQ32x4
,
15402 binop(Iop_64HLtoV128
,
15403 mkU64(0x007FFFFF007FFFFFULL
),
15404 mkU64(0x007FFFFF007FFFFFULL
))),
15405 binop(Iop_64HLtoV128
,
15406 mkU64(0ull), mkU64(0ull))));
15414 binop(Iop_64HLtoV128
,
15415 mkU64(0x100000001ull
),
15416 mkU64(0x100000001ull
)))),
15423 binop(Iop_64HLtoV128
,
15424 mkU64(0x800000008ull
),
15425 mkU64(0x800000008ull
))),
15429 binop(Iop_64HLtoV128
,
15430 mkU64(0x400000004ull
),
15431 mkU64(0x400000004ull
))))),
15438 binop(Iop_64HLtoV128
,
15439 mkU64(0x200000002ull
),
15440 mkU64(0x200000002ull
)))))),
15444 binop(Iop_64HLtoV128
,
15445 mkU64(0x200000002ull
),
15446 mkU64(0x200000002ull
))),
15450 binop(Iop_64HLtoV128
,
15451 mkU64(0x600000006ull
),
15452 mkU64(0x600000006ull
))))));
15457 binop(Iop_CmpEQ32x4
,
15459 binop(Iop_64HLtoV128
,
15465 binop(Iop_64HLtoV128
,
15466 mkU64(0x100000001ull
),
15467 mkU64(0x100000001ull
))),
15469 unop(Iop_NotV128
, mkexpr(t3
)),
15470 binop(Iop_64HLtoV128
,
15471 mkU64(0x200000002ull
),
15472 mkU64(0x200000002ull
)))))));
15476 case 0x01: { /* FCLASS.D */
15477 DIP("FCLASS.D w%d, w%d", wd
, ws
);
15479 binop(Iop_CmpEQ64x2
,
15482 binop(Iop_64HLtoV128
,
15483 mkU64(0x7FF0000000000000ull
),
15484 mkU64(0x7FF0000000000000ull
))),
15485 binop(Iop_64HLtoV128
,
15486 mkU64(0ull), mkU64(0ull))));
15488 binop(Iop_CmpEQ64x2
,
15491 binop(Iop_64HLtoV128
,
15492 mkU64(0x7FF0000000000000ull
),
15493 mkU64(0x7FF0000000000000ull
))),
15494 binop(Iop_64HLtoV128
,
15495 mkU64(0x7FF0000000000000ull
),
15496 mkU64(0x7FF0000000000000ull
))));
15498 binop(Iop_SarN64x2
,
15499 getWReg(ws
), mkU8(63)));
15501 binop(Iop_CmpEQ64x2
,
15504 binop(Iop_64HLtoV128
,
15505 mkU64(0x0008000000000000ull
),
15506 mkU64(0x0008000000000000ull
))),
15507 binop(Iop_64HLtoV128
,
15508 mkU64(0x0008000000000000ull
),
15509 mkU64(0x0008000000000000ull
))));
15511 binop(Iop_CmpEQ64x2
,
15514 binop(Iop_64HLtoV128
,
15515 mkU64(0x000FFFFFFFFFFFFFULL
),
15516 mkU64(0x000FFFFFFFFFFFFFULL
))),
15517 binop(Iop_64HLtoV128
,
15518 mkU64(0ull), mkU64(0ull))));
15526 binop(Iop_64HLtoV128
,
15535 binop(Iop_64HLtoV128
,
15541 binop(Iop_64HLtoV128
,
15550 binop(Iop_64HLtoV128
,
15556 binop(Iop_64HLtoV128
,
15562 binop(Iop_64HLtoV128
,
15569 binop(Iop_CmpEQ64x2
,
15571 binop(Iop_64HLtoV128
,
15577 binop(Iop_64HLtoV128
,
15583 binop(Iop_64HLtoV128
,
15596 case 0x191: { /* FTRUNC_S.df */
15598 case 0x00: { /* FTRUNC_S.W */
15599 DIP("FTRUNC_S.W w%d, w%d", wd
, ws
);
15600 calculateMSACSR(ws
, wd
, FTRUNCSW
, 1);
15601 putWReg(wd
, unop(Iop_F32toI32Sx4_RZ
, getWReg(ws
)));
15605 case 0x01: { /* FTRUNC_S.D */
15606 DIP("FTRUNC_S.D w%d, w%d", wd
, ws
);
15607 calculateMSACSR(ws
, wd
, FTRUNCSD
, 1);
15608 t1
= newTemp(Ity_I64
);
15609 t2
= newTemp(Ity_I64
);
15610 t3
= newTemp(Ity_V128
);
15614 binop(Iop_CmpUN64Fx2
,
15617 binop(Iop_Max64Fx2
,
15619 binop(Iop_64HLtoV128
,
15620 mkU64(0xC3E0000000000000),
15621 mkU64(0xC3E0000000000000)))));
15623 binop(Iop_F64toI64S
, mkU32(0x3),
15624 unop(Iop_ReinterpI64asF64
,
15625 unop(Iop_V128to64
, mkexpr(t3
)))));
15627 binop(Iop_F64toI64S
, mkU32(0x3),
15628 unop(Iop_ReinterpI64asF64
,
15629 unop(Iop_V128HIto64
, mkexpr(t3
)))));
15631 binop(Iop_64HLtoV128
,
15632 mkexpr(t2
), mkexpr(t1
)));
15643 case 0x192: { /* FTRUNC_U.df */
15645 case 0x00: { /* FTRUNC_U.W */
15646 DIP("FTRUNC_U.W w%d, w%d", wd
, ws
);
15647 calculateMSACSR(ws
, wd
, FTRUNCUW
, 1);
15648 putWReg(wd
, unop(Iop_F32toI32Ux4_RZ
, getWReg(ws
)));
15652 case 0x01: { /* FTRUNC_U.D */
15653 DIP("FTRUNC_U.D w%d, w%d", wd
, ws
);
15654 calculateMSACSR(ws
, wd
, FTRUNCUD
, 1);
15655 t1
= newTemp(Ity_I64
);
15656 t2
= newTemp(Ity_I64
);
15658 binop(Iop_F64toI64U
,
15660 unop(Iop_ReinterpI64asF64
,
15664 binop(Iop_F64toI64U
,
15666 unop(Iop_ReinterpI64asF64
,
15667 unop(Iop_V128HIto64
,
15670 binop(Iop_64HLtoV128
,
15671 mkexpr(t2
), mkexpr(t1
)));
15682 case 0x193: { /* FSQRT.df */
15684 case 0x00: { /* FSQRT.W */
15685 DIP("FSQRT.W w%d, w%d", wd
, ws
);
15686 IRExpr
*rm
= get_IR_roundingmode_MSA();
15687 calculateMSACSR(ws
, wd
, FSQRTW
, 1);
15688 putWReg(wd
, binop(Iop_Sqrt32Fx4
, rm
, getWReg(ws
)));
15692 case 0x01: { /* FSQRT.D */
15693 DIP("FSQRT.D w%d, w%d", wd
, ws
);
15694 IRExpr
*rm
= get_IR_roundingmode_MSA();
15695 calculateMSACSR(ws
, wd
, FSQRTD
, 1);
15696 putWReg(wd
, binop(Iop_Sqrt64Fx2
, rm
, getWReg(ws
)));
15707 case 0x194: { /* FRSQRT.df */
15709 case 0x00: { /* FRSQRT.W */
15710 DIP("FRSQRT.W w%d, w%d", wd
, ws
);
15711 calculateMSACSR(ws
, wd
, FRSQRTW
, 1);
15712 putWReg(wd
, unop(Iop_RSqrtEst32Fx4
, getWReg(ws
)));
15716 case 0x01: { /* FRSQRT.D */
15717 DIP("FRSQRT.D w%d, w%d", wd
, ws
);
15718 calculateMSACSR(ws
, wd
, FRSQRTD
, 1);
15719 putWReg(wd
, unop(Iop_RSqrtEst64Fx2
, getWReg(ws
)));
15730 case 0x195: { /* FRCP.df */
15731 switch (df
) { /* FRCP.W */
15733 DIP("FRCP.W w%d, w%d", wd
, ws
);
15734 calculateMSACSR(ws
, wd
, FRCPW
, 1);
15735 putWReg(wd
, unop(Iop_RecipEst32Fx4
, getWReg(ws
)));
15739 case 0x01: { /* FRCP.D */
15740 DIP("FRCP.D w%d, w%d", wd
, ws
);
15741 calculateMSACSR(ws
, wd
, FRCPD
, 1);
15742 putWReg(wd
, unop(Iop_RecipEst64Fx2
, getWReg(ws
)));
15753 case 0x196: { /* FRINT.df */
15754 t1
= newTemp(Ity_V128
);
15755 t2
= newTemp(Ity_V128
);
15756 t3
= newTemp(Ity_V128
);
15757 t4
= newTemp(Ity_V128
);
15758 IRExpr
*rm
= get_IR_roundingmode_MSA();
15759 assign(t1
, getWReg(ws
));
15762 case 0x00: { /* FRINT.W */
15763 DIP("FRINT.W w%d, w%d", wd
, ws
);
15764 calculateMSACSR(ws
, wt
, FRINTW
, 1);
15767 binop(Iop_CmpLT32Fx4
,
15769 binop(Iop_64HLtoV128
,
15770 mkU64(0xCF000000CF000000ull
),
15771 mkU64(0xCF000000CF000000ull
))),
15772 binop(Iop_CmpLT32Fx4
,
15773 binop(Iop_64HLtoV128
,
15774 mkU64(0x4F0000004F000000ull
),
15775 mkU64(0x4F0000004F000000ull
)),
15778 binop(Iop_CmpEQ32x4
,
15781 binop(Iop_64HLtoV128
,
15782 mkU64(0x0040000000400000ull
),
15783 mkU64(0x0040000000400000ull
))),
15784 binop(Iop_64HLtoV128
,
15785 mkU64(0x0040000000400000ull
),
15786 mkU64(0x0040000000400000ull
))));
15788 binop(Iop_CmpUN32Fx4
,
15789 mkexpr(t1
), mkexpr(t1
)));
15793 for (i
= 0; i
< 4; i
++) {
15794 tmp
[i
] = newTemp(Ity_I32
);
15796 unop(Iop_ReinterpF32asI32
,
15797 binop(Iop_RoundF32toInt
, rm
,
15798 unop(Iop_ReinterpI32asF32
,
15799 binop(Iop_GetElem32x4
,
15800 mkexpr(t1
), mkU8(i
))))));
15818 binop(Iop_64HLtoV128
,
15819 mkU64(0x7FBFFFFF7FBFFFFF),
15820 mkU64(0x7FBFFFFF7FBFFFFF)))),
15827 binop(Iop_64HLtoV128
,
15828 binop(Iop_32HLto64
,
15831 binop(Iop_32HLto64
,
15836 binop(Iop_64HLtoV128
,
15837 mkU64(0x8000000080000000ull
),
15838 mkU64(0x8000000080000000ull
)))
15843 case 0x01: { /* FRINT.D */
15844 DIP("FRINT.D w%d, w%d", wd
, ws
);
15845 calculateMSACSR(ws
, wt
, FRINTD
, 1);
15848 binop(Iop_CmpLT64Fx2
,
15850 binop(Iop_64HLtoV128
,
15851 mkU64(0xC3E0000000000000ull
),
15852 mkU64(0xC3E0000000000000ull
))),
15853 binop(Iop_CmpLT64Fx2
,
15854 binop(Iop_64HLtoV128
,
15855 mkU64(0x43E0000000000000ull
),
15856 mkU64(0x43E0000000000000ull
)),
15859 binop(Iop_CmpEQ64x2
,
15862 binop(Iop_64HLtoV128
,
15863 mkU64(0x0008000000000000ull
),
15864 mkU64(0x0008000000000000ull
))),
15865 binop(Iop_64HLtoV128
,
15866 mkU64(0x0008000000000000ull
),
15867 mkU64(0x0008000000000000ull
))));
15869 binop(Iop_CmpUN64Fx2
,
15870 mkexpr(t1
), mkexpr(t1
)));
15874 for (i
= 0; i
< 2; i
++) {
15875 tmp
[i
] = newTemp(Ity_I64
);
15877 unop(Iop_ReinterpF64asI64
,
15878 binop(Iop_RoundF64toInt
, rm
,
15879 unop(Iop_ReinterpI64asF64
,
15880 binop(Iop_GetElem64x2
,
15881 mkexpr(t1
), mkU8(i
))))));
15899 binop(Iop_64HLtoV128
,
15900 mkU64(0x7FF7FFFFFFFFFFFF),
15901 mkU64(0x7FF7FFFFFFFFFFFF)))),
15908 binop(Iop_64HLtoV128
,
15913 binop(Iop_64HLtoV128
,
15914 mkU64(0x8000000000000000ull
),
15915 mkU64(0x8000000000000000ull
))
15927 case 0x197: { /* FLOG2.df */
15930 case 0x00: { /* FLOG2.W */
15931 DIP("FLOG2.W w%d, w%d", wd
, ws
);
15932 calculateMSACSR(ws
, wt
, FLOG2W
, 1);
15933 putWReg(wd
, unop(Iop_Log2_32Fx4
, getWReg(ws
)));
15937 case 0x01: { /* FLOG2.D */
15938 DIP("FLOG2.D w%d, w%d", wd
, ws
);
15939 calculateMSACSR(ws
, wt
, FLOG2D
, 1);
15940 putWReg(wd
, unop(Iop_Log2_64Fx2
, getWReg(ws
)));
15951 case 0x198: { /* FEXUPL.df */
15953 case 0x00: { /* FEXUPL.W */
15954 DIP("FEXUPL.W w%d, w%d", wd
, ws
);
15955 calculateMSACSR(ws
, wt
, FEXUPLW
, 1);
15957 unop(Iop_F16toF32x4
,
15958 unop(Iop_V128HIto64
,
15963 case 0x01: { /* FEXUPL.D */
15964 DIP("FEXUPL.D w%d, w%d", wd
, ws
);
15965 calculateMSACSR(ws
, wt
, FEXUPLD
, 1);
15966 t1
= newTemp(Ity_I64
);
15967 t2
= newTemp(Ity_I64
);
15969 unop(Iop_ReinterpF64asI64
,
15971 unop(Iop_ReinterpI32asF32
,
15973 unop(Iop_V128HIto64
,
15976 unop(Iop_ReinterpF64asI64
,
15978 unop(Iop_ReinterpI32asF32
,
15980 unop(Iop_V128HIto64
,
15983 binop(Iop_64HLtoV128
,
15984 mkexpr(t2
), mkexpr(t1
)));
15995 case 0x199: { /* FEXUPR.df */
15997 case 0x00: { /* FEXUPR.W */
15998 DIP("FEXUPR.W w%d, w%d", wd
, ws
);
15999 calculateMSACSR(ws
, wt
, FEXUPRW
, 1);
16001 unop(Iop_F16toF32x4
,
16007 case 0x01: { /* FEXUPR.D */
16008 DIP("FEXUPR.D w%d, w%d", wd
, ws
);
16009 calculateMSACSR(ws
, wt
, FEXUPRD
, 1);
16010 t1
= newTemp(Ity_I64
);
16011 t2
= newTemp(Ity_I64
);
16013 unop(Iop_ReinterpF64asI64
,
16015 unop(Iop_ReinterpI32asF32
,
16020 unop(Iop_ReinterpF64asI64
,
16022 unop(Iop_ReinterpI32asF32
,
16027 binop(Iop_64HLtoV128
,
16028 mkexpr(t2
), mkexpr(t1
)));
16039 case 0x19A: { /* FFQL.df */
16041 case 0x00: { /* FFQL.W */
16042 DIP("FFQL.W w%d, w%d", wd
, ws
);
16043 calculateMSACSR(ws
, wt
, FFQLW
, 1);
16044 t1
= newTemp(Ity_V128
);
16045 t2
= newTemp(Ity_I64
);
16046 t3
= newTemp(Ity_I64
);
16047 IRExpr
*rm
= get_IR_roundingmode_MSA();
16049 binop(Iop_SarN32x4
,
16050 binop(Iop_InterleaveHI16x8
,
16055 binop(Iop_32HLto64
,
16056 unop(Iop_ReinterpF32asI32
,
16057 binop(Iop_I32StoF32
, rm
,
16058 binop(Iop_GetElem32x4
,
16061 unop(Iop_ReinterpF32asI32
,
16062 binop(Iop_I32StoF32
, rm
,
16063 binop(Iop_GetElem32x4
,
16067 binop(Iop_32HLto64
,
16068 unop(Iop_ReinterpF32asI32
,
16069 binop(Iop_I32StoF32
, rm
,
16070 binop(Iop_GetElem32x4
,
16073 unop(Iop_ReinterpF32asI32
,
16074 binop(Iop_I32StoF32
, rm
,
16075 binop(Iop_GetElem32x4
,
16079 triop(Iop_Div32Fx4
, rm
,
16080 binop(Iop_64HLtoV128
,
16081 mkexpr(t3
), mkexpr(t2
)),
16082 binop(Iop_64HLtoV128
,
16083 mkU64(0x4700000047000000),
16084 mkU64(0x4700000047000000))));
16088 case 0x01: { /* FFQL.D */
16089 DIP("FFQL.D w%d, w%d", wd
, ws
);
16090 calculateMSACSR(ws
, wt
, FFQLD
, 1);
16091 t1
= newTemp(Ity_V128
);
16092 t2
= newTemp(Ity_I64
);
16093 t3
= newTemp(Ity_I64
);
16094 IRExpr
*rm
= get_IR_roundingmode_MSA();
16096 binop(Iop_SarN64x2
,
16097 binop(Iop_InterleaveHI32x4
,
16102 unop(Iop_ReinterpF64asI64
,
16103 binop(Iop_I64StoF64
, rm
,
16107 unop(Iop_ReinterpF64asI64
,
16108 binop(Iop_I64StoF64
, rm
,
16109 unop(Iop_V128HIto64
,
16112 triop(Iop_Div64Fx2
, rm
,
16113 binop(Iop_64HLtoV128
,
16114 mkexpr(t3
), mkexpr(t2
)),
16115 binop(Iop_64HLtoV128
,
16116 mkU64(0x41E0000000000000),
16117 mkU64(0x41E0000000000000))));
16128 case 0x19B: { /* FFQR.df */
16130 case 0x00: { /* FFQR.W */
16131 DIP("FFQR.W w%d, w%d", wd
, ws
);
16132 calculateMSACSR(ws
, wt
, FFQRW
, 1);
16133 t1
= newTemp(Ity_V128
);
16134 t2
= newTemp(Ity_I64
);
16135 t3
= newTemp(Ity_I64
);
16136 IRExpr
*rm
= get_IR_roundingmode_MSA();
16138 binop(Iop_SarN32x4
,
16139 binop(Iop_InterleaveLO16x8
,
16144 binop(Iop_32HLto64
,
16145 unop(Iop_ReinterpF32asI32
,
16146 binop(Iop_I32StoF32
, rm
,
16147 binop(Iop_GetElem32x4
,
16150 unop(Iop_ReinterpF32asI32
,
16151 binop(Iop_I32StoF32
, rm
,
16152 binop(Iop_GetElem32x4
,
16156 binop(Iop_32HLto64
,
16157 unop(Iop_ReinterpF32asI32
,
16158 binop(Iop_I32StoF32
, rm
,
16159 binop(Iop_GetElem32x4
,
16162 unop(Iop_ReinterpF32asI32
,
16163 binop(Iop_I32StoF32
, rm
,
16164 binop(Iop_GetElem32x4
,
16168 triop(Iop_Div32Fx4
, rm
,
16169 binop(Iop_64HLtoV128
,
16170 mkexpr(t3
), mkexpr(t2
)),
16171 binop(Iop_64HLtoV128
,
16172 mkU64(0x4700000047000000),
16173 mkU64(0x4700000047000000))));
16177 case 0x01: { /* FFQR.D */
16178 DIP("FFQR.D w%d, w%d", wd
, ws
);
16179 calculateMSACSR(ws
, wt
, FFQRD
, 1);
16180 t1
= newTemp(Ity_V128
);
16181 t2
= newTemp(Ity_I64
);
16182 t3
= newTemp(Ity_I64
);
16183 IRExpr
*rm
= get_IR_roundingmode_MSA();
16185 binop(Iop_SarN64x2
,
16186 binop(Iop_InterleaveLO32x4
,
16191 unop(Iop_ReinterpF64asI64
,
16192 binop(Iop_I64StoF64
, rm
,
16196 unop(Iop_ReinterpF64asI64
,
16197 binop(Iop_I64StoF64
, rm
,
16198 unop(Iop_V128HIto64
,
16201 triop(Iop_Div64Fx2
, rm
,
16202 binop(Iop_64HLtoV128
,
16203 mkexpr(t3
), mkexpr(t2
)),
16204 binop(Iop_64HLtoV128
,
16205 mkU64(0x41E0000000000000),
16206 mkU64(0x41E0000000000000))));
16217 case 0x19C: { /* FTINT_S.df */
16218 switch (df
) { /* FTINT_S.W */
16220 DIP("FTINT_S.W w%d, w%d", wd
, ws
);
16221 calculateMSACSR(ws
, wd
, FTINT_SW
, 1);
16222 t1
= newTemp(Ity_I64
);
16223 t2
= newTemp(Ity_I64
);
16224 t3
= newTemp(Ity_V128
);
16225 t4
= newTemp(Ity_I32
);
16229 binop(Iop_CmpUN32Fx4
,
16232 binop(Iop_Max32Fx4
,
16234 binop(Iop_64HLtoV128
,
16235 mkU64(0xCF000000CF000000),
16236 mkU64(0xCF000000CF000000)))));
16237 IRExpr
*rm
= get_IR_roundingmode_MSA();
16239 binop(Iop_32HLto64
,
16240 binop(Iop_F32toI32S
, rm
,
16241 unop(Iop_ReinterpI32asF32
,
16242 binop(Iop_GetElem32x4
,
16243 mkexpr(t3
), mkU8(1)))),
16244 binop(Iop_F32toI32S
, rm
,
16245 unop(Iop_ReinterpI32asF32
,
16246 binop(Iop_GetElem32x4
,
16247 mkexpr(t3
), mkU8(0))))));
16249 binop(Iop_32HLto64
,
16250 binop(Iop_F32toI32S
, rm
,
16251 unop(Iop_ReinterpI32asF32
,
16252 binop(Iop_GetElem32x4
,
16253 mkexpr(t3
), mkU8(3)))),
16254 binop(Iop_F32toI32S
, rm
,
16255 unop(Iop_ReinterpI32asF32
,
16256 binop(Iop_GetElem32x4
,
16257 mkexpr(t3
), mkU8(2))))));
16259 binop(Iop_64HLtoV128
,
16260 mkexpr(t2
), mkexpr(t1
)));
16264 case 0x01: { /* FTINT_S.D */
16265 DIP("FTINT_S.D w%d, w%d", wd
, ws
);
16266 calculateMSACSR(ws
, wd
, FTINT_SD
, 1);
16267 t1
= newTemp(Ity_I64
);
16268 t2
= newTemp(Ity_I64
);
16269 t3
= newTemp(Ity_V128
);
16273 binop(Iop_CmpUN64Fx2
,
16276 binop(Iop_Max64Fx2
,
16278 binop(Iop_64HLtoV128
,
16279 mkU64(0xC3E0000000000000),
16280 mkU64(0xC3E0000000000000)))));
16281 IRExpr
*rm
= get_IR_roundingmode_MSA();
16283 binop(Iop_F64toI64S
, rm
,
16284 unop(Iop_ReinterpI64asF64
,
16285 unop(Iop_V128to64
, mkexpr(t3
)))));
16287 binop(Iop_F64toI64S
, rm
,
16288 unop(Iop_ReinterpI64asF64
,
16289 unop(Iop_V128HIto64
, mkexpr(t3
)))));
16291 binop(Iop_64HLtoV128
,
16292 mkexpr(t2
), mkexpr(t1
)));
16303 case 0x19D: {/* FTINT_U.df */
16304 switch (df
) { /* FTINT_U.W */
16306 DIP("FTINT_U.W w%d, w%d", wd
, ws
);
16307 calculateMSACSR(ws
, wd
, FTINT_UW
, 1);
16308 t1
= newTemp(Ity_I64
);
16309 t2
= newTemp(Ity_I64
);
16310 t3
= newTemp(Ity_V128
);
16311 t4
= newTemp(Ity_V128
);
16312 IRExpr
*rm
= get_IR_roundingmode_MSA();
16314 binop(Iop_32HLto64
,
16315 binop(Iop_F32toI32U
, rm
,
16316 unop(Iop_ReinterpI32asF32
,
16317 binop(Iop_GetElem32x4
,
16318 getWReg(ws
), mkU8(1)))),
16319 binop(Iop_F32toI32U
, rm
,
16320 unop(Iop_ReinterpI32asF32
,
16321 binop(Iop_GetElem32x4
,
16322 getWReg(ws
), mkU8(0))))));
16324 binop(Iop_32HLto64
,
16325 binop(Iop_F32toI32U
, rm
,
16326 unop(Iop_ReinterpI32asF32
,
16327 binop(Iop_GetElem32x4
,
16328 getWReg(ws
), mkU8(3)))),
16329 binop(Iop_F32toI32U
, rm
,
16330 unop(Iop_ReinterpI32asF32
,
16331 binop(Iop_GetElem32x4
,
16332 getWReg(ws
), mkU8(2))))));
16335 binop(Iop_SarN32x4
,
16339 binop(Iop_CmpLT32Fx4
,
16341 binop(Iop_64HLtoV128
,
16342 mkU64(0x4EFFFFFF4EFFFFFF),
16343 mkU64(0x4EFFFFFF4EFFFFFF))));
16349 binop(Iop_64HLtoV128
,
16354 unop(Iop_NotV128
, mkexpr(t4
)),
16355 unop(Iop_F32toI32Ux4_RZ
,
16360 case 0x01: { /* FTINT_U.D */
16361 DIP("FTINT_U.D w%d, w%d", wd
, ws
);
16362 calculateMSACSR(ws
, wd
, FTINT_UD
, 1);
16363 t1
= newTemp(Ity_I64
);
16364 t2
= newTemp(Ity_I64
);
16365 IRExpr
*rm
= get_IR_roundingmode_MSA();
16367 binop(Iop_F64toI64U
, rm
,
16368 unop(Iop_ReinterpI64asF64
,
16372 binop(Iop_F64toI64U
, rm
,
16373 unop(Iop_ReinterpI64asF64
,
16374 unop(Iop_V128HIto64
,
16377 binop(Iop_64HLtoV128
,
16378 mkexpr(t2
), mkexpr(t1
)));
16389 case 0x19E: { /* FFINT_S.df */
16390 t1
= newTemp(Ity_V128
);
16391 assign(t1
, getWReg(ws
));
16392 IRExpr
*rm
= get_IR_roundingmode_MSA();
16395 case 0x00: { /* FFINT_S.W */
16396 DIP("FFINT_S.W w%d, w%d", wd
, ws
);
16397 calculateMSACSR(ws
, wt
, FFINTSW
, 1);
16401 for (i
= 0; i
< 4; i
++) {
16402 tmp
[i
] = newTemp(Ity_F32
);
16404 binop(Iop_I32StoF32
, rm
,
16405 binop(Iop_GetElem32x4
,
16406 mkexpr(t1
), mkU8(i
))));
16410 binop(Iop_64HLtoV128
,
16411 binop(Iop_32HLto64
,
16412 unop(Iop_ReinterpF32asI32
,
16414 unop(Iop_ReinterpF32asI32
,
16416 binop(Iop_32HLto64
,
16417 unop(Iop_ReinterpF32asI32
,
16419 unop(Iop_ReinterpF32asI32
,
16420 mkexpr(tmp
[0])))));
16424 case 0x01: { /* FFINT_S.D */
16425 DIP("FFINT_S.D w%d, w%d", wd
, ws
);
16426 calculateMSACSR(ws
, wt
, FFINTSD
, 1);
16430 for (i
= 0; i
< 2; i
++) {
16431 tmp
[i
] = newTemp(Ity_F64
);
16433 binop(Iop_I64StoF64
, rm
,
16434 binop(Iop_GetElem64x2
,
16435 mkexpr(t1
), mkU8(i
))));
16439 binop(Iop_64HLtoV128
,
16440 unop(Iop_ReinterpF64asI64
,
16442 unop(Iop_ReinterpF64asI64
,
16454 case 0x19F: { /* FFINT_U.df */
16455 IRExpr
*rm
= get_IR_roundingmode_MSA();
16458 case 0x00: { /* FFINT_U.W */
16459 DIP("FFINT_U.W w%d, w%d", wd
, ws
);
16460 calculateMSACSR(ws
, wt
, FFINT_UW
, 1);
16461 putWReg(wd
, unop(Iop_I32UtoF32x4_DEP
, getWReg(ws
)));
16465 case 0x01: { /* FFINT_U.D */
16466 DIP("FFINT_U.D w%d, w%d",
16468 calculateMSACSR(ws
, wt
,
16470 t1
= newTemp(Ity_I64
);
16471 t2
= newTemp(Ity_I64
);
16473 unop(Iop_ReinterpF64asI64
,
16474 binop(Iop_I64UtoF64
, rm
,
16478 unop(Iop_ReinterpF64asI64
,
16479 binop(Iop_I64UtoF64
, rm
,
16480 unop(Iop_V128HIto64
,
16483 binop(Iop_64HLtoV128
,
16484 mkexpr(t2
), mkexpr(t1
)));
16502 static Int
msa_MI10_load(UInt cins
, UChar wd
, UChar ws
) /* MI10 (0x20) */
16508 i10
= (cins
& 0x03FF0000) >> 16;
16509 df
= cins
& 0x00000003;
16512 case 0x00: { /* LD.B */
16513 DIP("LD.B w%d, %d(r%d)", wd
, ws
, i10
);
16514 LOAD_STORE_PATTERN_MSA(i10
);
16515 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16519 case 0x01: { /* LD.H */
16520 DIP("LD.H w%d, %d(r%d)", wd
, ws
, i10
);
16521 LOAD_STORE_PATTERN_MSA(i10
<< 1);
16522 #if defined (_MIPSEL)
16523 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16524 #elif defined (_MIPSEB)
16526 unop(Iop_Reverse8sIn16_x8
,
16527 load(Ity_V128
, mkexpr(t1
))));
16532 case 0x02: { /* LD.W */
16533 DIP("LD.W w%d, %d(r%d)", wd
, ws
, i10
);
16534 LOAD_STORE_PATTERN_MSA(i10
<< 2);
16535 #if defined (_MIPSEL)
16536 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16537 #elif defined (_MIPSEB)
16539 unop(Iop_Reverse8sIn32_x4
,
16540 load(Ity_V128
, mkexpr(t1
))));
16545 case 0x03: { /* LD.D */
16546 DIP("LD.D w%d, %d(r%d)", wd
, ws
, i10
);
16547 LOAD_STORE_PATTERN_MSA(i10
<< 3);
16548 #if defined (_MIPSEL)
16549 putWReg(wd
, load(Ity_V128
, mkexpr(t1
)));
16550 #elif defined (_MIPSEB)
16552 unop(Iop_Reverse8sIn64_x2
,
16553 load(Ity_V128
, mkexpr(t1
))));
16565 static Int
msa_MI10_store(UInt cins
, UChar wd
, UChar ws
) /* MI10 (0x24) */
16571 df
= cins
& 0x00000003;
16572 i10
= (cins
& 0x03FF0000) >> 16;
16575 case 0x00: { /* ST.B */
16576 DIP("ST.B w%d, %d(r%d)", wd
, ws
, i10
);
16577 LOAD_STORE_PATTERN_MSA(i10
);
16578 store(mkexpr(t1
), getWReg(wd
));
16582 case 0x01: { /* ST.H */
16583 DIP("ST.H w%d, %d(r%d)", wd
, ws
, i10
);
16584 LOAD_STORE_PATTERN_MSA(i10
<< 1);
16585 #if defined (_MIPSEL)
16586 store(mkexpr(t1
), getWReg(wd
));
16587 #elif defined (_MIPSEB)
16589 unop(Iop_Reverse8sIn16_x8
, getWReg(wd
)));
16594 case 0x02: { /* ST.W */
16595 DIP("ST.W w%d, %d(r%d)", wd
, ws
, i10
);
16596 LOAD_STORE_PATTERN_MSA(i10
<< 2);
16597 #if defined (_MIPSEL)
16598 store(mkexpr(t1
), getWReg(wd
));
16599 #elif defined (_MIPSEB)
16601 unop(Iop_Reverse8sIn32_x4
, getWReg(wd
)));
16606 case 0x03: { /* ST.D */
16607 DIP("ST.D w%d, %d(r%d)", wd
, ws
, i10
);
16608 LOAD_STORE_PATTERN_MSA(i10
<< 3);
16609 #if defined (_MIPSEL)
16610 store(mkexpr(t1
), getWReg(wd
));
16611 #elif defined (_MIPSEB)
16613 unop(Iop_Reverse8sIn64_x2
, getWReg(wd
)));
16625 /*------------------------------------------------------------*/
16626 /*--- Disassemble a single MIPS MSA (SIMD) instruction ---*/
16627 /*--- Return values: ---*/
16628 /*--- 0: Success ---*/
16629 /*--- -1: Decode failure (unknown instruction) ---*/
16630 /*--- -2: Illegal instruction ---*/
16631 /*------------------------------------------------------------*/
16632 static Int
disMSAInstr_MIPS_WRK ( UInt cins
)
16634 UChar minor_opcode
, wd
, ws
;
16637 vassert((cins
& 0xFC000000) == 0x78000000);
16639 minor_opcode
= (cins
& 0x20) > 0 ? (cins
& 0x3C) : (cins
& 0x3F);
16640 wd
= (cins
& 0x000007C0) >> 6;
16641 ws
= (cins
& 0x0000F800) >> 11;
16643 switch (minor_opcode
) {
16645 return msa_I8_logical(cins
, wd
, ws
);
16648 return msa_I8_branch(cins
, wd
, ws
);
16651 return msa_I8_shift(cins
, wd
, ws
);
16654 return msa_I5_06(cins
, wd
, ws
);
16657 return msa_I5_07(cins
, wd
, ws
);
16660 return msa_BIT_09(cins
, wd
, ws
);
16663 return msa_BIT_0A(cins
, wd
, ws
);
16666 return msa_3R_0D(cins
, wd
, ws
);
16669 return msa_3R_0E(cins
, wd
, ws
);
16672 return msa_3R_0F(cins
, wd
, ws
);
16675 return msa_3R_10(cins
, wd
, ws
);
16678 return msa_3R_11(cins
, wd
, ws
);
16681 return msa_3R_12(cins
, wd
, ws
);
16684 return msa_3R_13(cins
, wd
, ws
);
16687 return msa_3R_14(cins
, wd
, ws
);
16690 return msa_3R_15(cins
, wd
, ws
);
16693 return msa_ELM(cins
, wd
, ws
);
16696 return msa_3R_1A(cins
, wd
, ws
);
16699 return msa_3R_1B(cins
, wd
, ws
);
16702 return msa_3R_1C(cins
, wd
, ws
);
16705 if ((cins
& 0x03000000) == 0)
16706 return msa_VEC(cins
, wd
, ws
);
16707 else if ((cins
& 0x00200000) == 0)
16708 return msa_2R(cins
, wd
, ws
);
16710 return msa_2RF(cins
, wd
, ws
);
16713 return msa_MI10_load(cins
, wd
, ws
);
16716 return msa_MI10_store(cins
, wd
, ws
);
16722 /*------------------------------------------------------------*/
16723 /*--- DSP to IR function ---*/
16724 /*------------------------------------------------------------*/
16726 extern UInt
disDSPInstr_MIPS_WRK ( UInt
);
16728 /*------------------------------------------------------------*/
16729 /*--- Disassemble a single instruction ---*/
16730 /*------------------------------------------------------------*/
16732 /* Disassemble a single instruction into IR. The instruction is
16733 located in host memory at guest_instr, and has guest IP of
16734 guest_PC_curr_instr, which will have been set before the call
16738 static UInt
disInstr_MIPS_WRK_Special(UInt cins
, const VexArchInfo
* archinfo
,
16739 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
16740 IRStmt
** bstmt
, IRExpr
** lastn
)
16742 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
;
16743 UInt rs
, rt
, rd
, sa
, tf
, function
, trap_code
, imm
, instr_index
, rot
, sel
;
16744 /* Additional variables for instruction fields in DSP ASE insructions */
16747 imm
= get_imm(cins
);
16753 sel
= get_sel(cins
);
16754 instr_index
= get_instr_index(cins
);
16755 trap_code
= get_code(cins
);
16756 function
= get_function(cins
);
16757 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
16759 ac
= get_acNo(cins
);
16761 switch (function
) {
16762 case 0x00: { /* SLL */
16763 DIP("sll r%u, r%u, %u", rd
, rt
, sa
);
16764 IRTemp tmpRt32
= newTemp(Ity_I32
);
16765 IRTemp tmpSh32
= newTemp(Ity_I32
);
16766 IRTemp tmpRd
= newTemp(Ity_I64
);
16769 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16770 assign(tmpSh32
, binop(Iop_Shl32
, mkexpr(tmpRt32
), mkU8(sa
)));
16771 assign(tmpRd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16772 putIReg(rd
, mkexpr(tmpRd
));
16774 SXX_PATTERN(Iop_Shl32
);
16779 case 0x01: { /* MOVCI */
16780 UInt mov_cc
= get_mov_cc(cins
);
16782 if (tf
== 0) { /* MOVF */
16783 DIP("movf r%u, r%u, %u", rd
, rs
, mov_cc
);
16784 t1
= newTemp(Ity_I1
);
16785 t2
= newTemp(Ity_I32
);
16786 t3
= newTemp(Ity_I1
);
16788 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
16789 assign(t2
, IRExpr_ITE(mkexpr(t1
),
16791 binop(Iop_Shr32
, getFCSR(),
16795 binop(Iop_Shr32
, getFCSR(),
16796 mkU8(24 + mov_cc
)),
16799 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
16800 putIReg(rd
, IRExpr_ITE(mkexpr(t3
), getIReg(rs
), getIReg(rd
)));
16801 } else if (tf
== 1) { /* MOVT */
16802 DIP("movt r%u, r%u, %u", rd
, rs
, mov_cc
);
16803 t1
= newTemp(Ity_I1
);
16804 t2
= newTemp(Ity_I32
);
16805 t3
= newTemp(Ity_I1
);
16807 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
16808 assign(t2
, IRExpr_ITE(mkexpr(t1
),
16810 binop(Iop_Shr32
, getFCSR(),
16814 binop(Iop_Shr32
, getFCSR(),
16815 mkU8(24 + mov_cc
)),
16818 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
16819 putIReg(rd
, IRExpr_ITE(mkexpr(t3
), getIReg(rs
), getIReg(rd
)));
16825 case 0x02: { /* SRL */
16826 rot
= get_rot(cins
);
16829 DIP("rotr r%u, r%u, %u", rd
, rt
, sa
);
16830 putIReg(rd
, mkWidenFrom32(ty
, genROR32(mkNarrowTo32(ty
,
16831 getIReg(rt
)), sa
), True
));
16833 DIP("srl r%u, r%u, %u", rd
, rt
, sa
);
16836 IRTemp tmpSh32
= newTemp(Ity_I32
);
16837 IRTemp tmpRt32
= newTemp(Ity_I32
);
16839 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16840 assign(tmpSh32
, binop(Iop_Shr32
, mkexpr(tmpRt32
), mkU8(sa
)));
16841 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16843 SXX_PATTERN(Iop_Shr32
);
16850 case 0x03: /* SRA */
16851 DIP("sra r%u, r%u, %u", rd
, rt
, sa
);
16854 IRTemp tmpRt32
= newTemp(Ity_I32
);
16855 IRTemp tmpSh32
= newTemp(Ity_I32
);
16857 t1
= newTemp(Ity_I64
);
16858 t2
= newTemp(Ity_I64
);
16859 t3
= newTemp(Ity_I64
);
16861 assign(t1
, binop(Iop_And64
, getIReg(rt
), /* hi */
16862 mkU64(0xFFFFFFFF00000000ULL
)));
16864 assign(t2
, binop(Iop_Sar64
, mkexpr(t1
), mkU8(sa
)));
16866 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16867 assign(tmpSh32
, binop(Iop_Sar32
, mkexpr(tmpRt32
), mkU8(sa
)));
16869 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16871 SXX_PATTERN(Iop_Sar32
);
16876 case 0x04: { /* SLLV */
16877 DIP("sllv r%u, r%u, r%u", rd
, rt
, rs
);
16880 IRTemp tmpRs8
= newTemp(Ity_I8
);
16881 IRTemp tmpRt32
= newTemp(Ity_I32
);
16882 IRTemp tmpSh32
= newTemp(Ity_I32
);
16883 IRTemp tmp
= newTemp(ty
);
16884 assign(tmp
, binop(mkSzOp(ty
, Iop_And8
), getIReg(rs
),
16886 assign(tmpRs8
, mkNarrowTo8(ty
, mkexpr(tmp
)));
16887 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16888 assign(tmpSh32
, binop(Iop_Shl32
, mkexpr(tmpRt32
), mkexpr(tmpRs8
)));
16889 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16891 SXXV_PATTERN(Iop_Shl32
);
16897 case 0x05: { /* LSA */
16898 UInt imm2
= (imm
& 0xC0) >> 6;
16900 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) || has_msa
) {
16901 DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
16904 DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
16905 putIReg(rd
, unop(Iop_32Sto64
,
16908 unop(Iop_64to32
, getIReg(rs
)),
16910 unop(Iop_64to32
, getIReg(rt
)))));
16913 DIP("lsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
16914 putIReg(rd
, binop(Iop_Add32
,
16916 getIReg(rs
), mkU8(imm2
+ 1)), getIReg(rt
)));
16920 ILLEGAL_INSTRUCTON
;
16925 case 0x06: { /* SRLV */
16926 rot
= get_rotv(cins
);
16929 DIP("rotrv r%u, r%u, r%u", rd
, rt
, rs
);
16930 putIReg(rd
, mkWidenFrom32(ty
, genRORV32(mkNarrowTo32(ty
,
16931 getIReg(rt
)), mkNarrowTo32(ty
, getIReg(rs
))), True
));
16933 } else { /* SRLV */
16934 DIP("srlv r%u, r%u, r%u", rd
, rt
, rs
);
16937 SXXV_PATTERN64(Iop_Shr32
);
16939 SXXV_PATTERN(Iop_Shr32
);
16946 case 0x07: /* SRAV */
16947 DIP("srav r%u, r%u, r%u", rd
, rt
, rs
);
16950 IRTemp tmpRt32
= newTemp(Ity_I32
);
16951 IRTemp tmpSh32
= newTemp(Ity_I32
);
16953 t1
= newTemp(Ity_I64
);
16954 t2
= newTemp(Ity_I64
);
16955 t3
= newTemp(Ity_I64
);
16956 t4
= newTemp(Ity_I8
);
16958 assign(t4
, unop(Iop_32to8
, binop(Iop_And32
,
16959 mkNarrowTo32(ty
, getIReg(rs
)), mkU32(0x0000001F))));
16961 assign(t1
, binop(Iop_And64
, getIReg(rt
), /* hi */
16962 mkU64(0xFFFFFFFF00000000ULL
)));
16964 assign(t2
, binop(Iop_Sar64
, mkexpr(t1
), mkexpr(t4
)));
16966 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
16967 assign(tmpSh32
, binop(Iop_Sar32
, mkexpr(tmpRt32
), mkexpr(t4
)));
16969 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpSh32
), True
));
16971 SXXV_PATTERN(Iop_Sar32
);
16976 case 0x08: /* JR */
16979 assign(t0
, getIReg(rs
));
16980 *lastn
= mkexpr(t0
);
16983 case 0x09: /* JALR */
16984 DIP("jalr r%u r%u", rd
, rs
);
16987 putIReg(rd
, mkU64(guest_PC_curr_instr
+ 8));
16988 t0
= newTemp(Ity_I64
);
16989 assign(t0
, getIReg(rs
));
16990 *lastn
= mkexpr(t0
);
16992 putIReg(rd
, mkU32(guest_PC_curr_instr
+ 8));
16993 t0
= newTemp(Ity_I32
);
16994 assign(t0
, getIReg(rs
));
16995 *lastn
= mkexpr(t0
);
17000 case 0x0A: { /* MOVZ */
17001 DIP("movz r%u, r%u, r%u", rd
, rs
, rt
);
17006 assign(t1
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpEQ64
,
17007 getIReg(rt
), mkU64(0x0)))));
17008 assign(t2
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpNE64
,
17009 getIReg(rt
), mkU64(0x0)))));
17010 putIReg(rd
, binop(Iop_Add64
, binop(Iop_And64
, getIReg(rs
),
17011 mkexpr(t1
)), binop(Iop_And64
, getIReg(rd
), mkexpr(t2
))));
17013 assign(t1
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, getIReg(rt
),
17015 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, getIReg(rt
),
17017 putIReg(rd
, binop(Iop_Add32
, binop(Iop_And32
, getIReg(rs
),
17018 mkexpr(t1
)), binop(Iop_And32
, getIReg(rd
),
17025 case 0x0B: { /* MOVN */
17026 DIP("movn r%u, r%u, r%u", rd
, rs
, rt
);
17031 assign(t1
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpEQ64
,
17032 getIReg(rt
), mkU64(0x0)))));
17033 assign(t2
, unop(Iop_32Sto64
, unop(Iop_1Sto32
, binop(Iop_CmpNE64
,
17034 getIReg(rt
), mkU64(0x0)))));
17035 putIReg(rd
, binop(Iop_Add64
, binop(Iop_And64
, getIReg(rs
),
17036 mkexpr(t2
)), binop(Iop_And64
, getIReg(rd
),
17039 assign(t1
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, getIReg(rt
),
17041 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, getIReg(rt
),
17043 putIReg(rd
, binop(Iop_Add32
, binop(Iop_And32
, getIReg(rs
),
17044 mkexpr(t2
)), binop(Iop_And32
, getIReg(rd
),
17051 case 0x0C: /* SYSCALL */
17055 putPC(mkU64(guest_PC_curr_instr
+ 4));
17057 putPC(mkU32(guest_PC_curr_instr
+ 4));
17059 dres
->jk_StopHere
= Ijk_Sys_syscall
;
17060 dres
->whatNext
= Dis_StopHere
;
17063 case 0x0D: /* BREAK */
17064 DIP("break 0x%x", trap_code
);
17067 jmp_lit64(dres
, Ijk_SigTRAP
, (guest_PC_curr_instr
+ 4));
17069 jmp_lit32(dres
, Ijk_SigTRAP
, (guest_PC_curr_instr
+ 4));
17071 vassert(dres
->whatNext
== Dis_StopHere
);
17074 case 0x0F: /* SYNC */
17075 DIP("sync 0x%x", sel
);
17076 /* Just ignore it. */
17079 case 0x10: { /* MFHI, CLZ R6 */
17080 if (((instr_index
>> 6) & 0x1f) == 1) { /* CLZ */
17081 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17082 DIP("clz r%u, r%u", rd
, rs
);
17085 IRTemp tmpClz32
= newTemp(Ity_I32
);
17086 IRTemp tmpRs32
= newTemp(Ity_I32
);
17088 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17089 assign(tmpClz32
, unop(Iop_Clz32
, mkexpr(tmpRs32
)));
17090 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClz32
), True
));
17092 t1
= newTemp(Ity_I1
);
17093 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0)));
17094 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17096 unop(Iop_Clz32
, getIReg(rs
))));
17099 ILLEGAL_INSTRUCTON
;
17103 } else if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17104 /* If DSP is present -> DSP ASE MFHI */
17105 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17107 if (0 != retVal
) {
17113 DIP("mfhi r%u", rd
);
17114 putIReg(rd
, getHI());
17119 case 0x11: { /* MTHI, CLO R6 */
17120 if (((instr_index
>> 6) & 0x1f) == 1) { /* CLO */
17121 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17122 DIP("clo r%u, r%u", rd
, rs
);
17125 IRTemp tmpClo32
= newTemp(Ity_I32
);
17126 IRTemp tmpRs32
= newTemp(Ity_I32
);
17127 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17129 t1
= newTemp(Ity_I1
);
17130 assign(t1
, binop(Iop_CmpEQ32
, mkexpr(tmpRs32
), mkU32(0xffffffff)));
17131 assign(tmpClo32
, IRExpr_ITE(mkexpr(t1
),
17133 unop(Iop_Clz32
, unop(Iop_Not32
, mkexpr(tmpRs32
)))));
17135 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClo32
), True
));
17138 t1
= newTemp(Ity_I1
);
17139 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0xffffffff)));
17140 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17143 unop(Iop_Not32
, getIReg(rs
)))));
17146 ILLEGAL_INSTRUCTON
;
17150 } else if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17151 /* If DSP is present -> DSP ASE MTHI */
17152 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17154 if (0 != retVal
) {
17160 DIP("mthi r%u", rs
);
17161 putHI(getIReg(rs
));
17166 case 0x12: { /* MFLO */
17167 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17168 /* If DSP is present -> DSP ASE MFLO */
17169 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17171 if (0 != retVal
) {
17179 DIP("mflo r%u", rd
);
17181 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17182 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17186 putIReg(rd
, getLO());
17190 DIP("dclz r%u, r%u", rd
, rs
);
17191 t1
= newTemp(Ity_I1
);
17192 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0)));
17193 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17195 unop(Iop_Clz64
, getIReg(rs
))));
17203 case 0x13: { /* MTLO */
17204 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17205 /* If DSP is present -> DSP ASE MTLO */
17206 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17208 if (0 != retVal
) {
17216 DIP("mtlo r%u", rs
);
17218 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17219 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17223 putLO(getIReg(rs
));
17227 DIP("dclo r%u, r%u", rd
, rs
);
17228 t1
= newTemp(Ity_I1
);
17229 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
),
17230 mkU64(0xffffffffffffffffULL
)));
17231 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
17233 unop(Iop_Clz64
, unop(Iop_Not64
,
17242 case 0x15: { /* DLSA */
17243 UInt imm2
= (imm
& 0xC0) >> 6;
17245 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) || has_msa
) {
17246 DIP("dlsa r%u, r%u, r%u, imm: 0x%x", rd
, rs
, rt
, imm2
);
17247 putIReg(rd
, binop(Iop_Add64
,
17248 binop(Iop_Shl64
, getIReg(rs
), mkU8(imm2
+ 1)),
17257 case 0x18: { /* MULT */
17258 switch (sa
& 0x3) {
17260 if ((1 <= ac
) && ( 3 >= ac
)) {
17261 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17262 /* If DSP is present -> DSP ASE MULT */
17263 UInt retVal
= disDSPInstr_MIPS_WRK(cins
);
17274 DIP("mult r%u, r%u", rs
, rt
);
17276 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17277 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17281 t2
= newTemp(Ity_I64
);
17283 assign(t2
, binop(Iop_MullS32
, mkNarrowTo32(ty
, getIReg(rs
)),
17284 mkNarrowTo32(ty
, getIReg(rt
))));
17286 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t2
)), True
));
17287 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t2
)), True
));
17292 case 2: { /* MUL R6 */
17293 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17294 DIP("mul r%u, r%u, r%u", rs
, rt
, rd
);
17297 putIReg(rd
, unop(Iop_32Sto64
,
17300 unop(Iop_64to32
, getIReg(rs
)),
17301 unop(Iop_64to32
, getIReg(rt
))))));
17303 putIReg(rd
, unop(Iop_64to32
,
17305 getIReg(rs
), getIReg(rt
))));
17308 ILLEGAL_INSTRUCTON
;
17314 case 3: { /* MUH R6 */
17315 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17316 DIP("muh r%u, r%u, r%u", rs
, rt
, rd
);
17319 putIReg(rd
, unop(Iop_32Sto64
,
17322 unop(Iop_64to32
, getIReg(rs
)),
17323 unop(Iop_64to32
, getIReg(rt
))))));
17325 putIReg(rd
, unop(Iop_64HIto32
,
17327 getIReg(rs
), getIReg(rt
))));
17330 ILLEGAL_INSTRUCTON
;
17340 case 0x19: { /* MULTU */
17341 switch (sa
& 0x3) {
17343 if ((1 <= ac
) && ( 3 >= ac
)) {
17344 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
17345 /* If DSP is present -> DSP ASE MULTU */
17346 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
17357 DIP("multu r%u, r%u", rs
, rt
);
17359 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17360 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17364 t2
= newTemp(Ity_I64
);
17366 assign(t2
, binop(Iop_MullU32
, mkNarrowTo32(ty
, getIReg(rs
)),
17367 mkNarrowTo32(ty
, getIReg(rt
))));
17369 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t2
)), True
));
17370 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t2
)), True
));
17375 case 2: { /* MULU R6 */
17376 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17377 DIP("mulu r%u, r%u, r%u", rs
, rt
, rd
);
17380 putIReg(rd
, unop(Iop_32Uto64
,
17383 unop(Iop_64to32
, getIReg(rs
)),
17384 unop(Iop_64to32
, getIReg(rt
))))));
17386 putIReg(rd
, unop(Iop_64to32
,
17388 getIReg(rs
), getIReg(rt
))));
17391 ILLEGAL_INSTRUCTON
;
17397 case 3: { /* MUHU R6 */
17398 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17399 DIP("muhu r%u, r%u, r%u", rs
, rt
, rd
);
17402 putIReg(rd
, unop(Iop_32Uto64
,
17405 unop(Iop_64to32
, getIReg(rs
)),
17406 unop(Iop_64to32
, getIReg(rt
))))));
17408 putIReg(rd
, unop(Iop_64HIto32
,
17410 getIReg(rs
), getIReg(rt
))));
17413 ILLEGAL_INSTRUCTON
;
17423 case 0x1A: /* DIV */
17424 switch (sa
& 0x3) {
17426 DIP("div r%u, r%u", rs
, rt
);
17428 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17429 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17434 t2
= newTemp(Ity_I64
);
17436 assign(t2
, binop(Iop_DivModS32to32
,
17437 mkNarrowTo32(ty
, getIReg(rs
)),
17438 mkNarrowTo32(ty
, getIReg(rt
))));
17440 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t2
)), True
));
17441 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t2
)), True
));
17443 t1
= newTemp(Ity_I64
);
17445 assign(t1
, binop(Iop_DivModS32to32
, getIReg(rs
), getIReg(rt
)));
17447 putHI(unop(Iop_64HIto32
, mkexpr(t1
)));
17448 putLO(unop(Iop_64to32
, mkexpr(t1
)));
17454 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17455 DIP("div r%u, r%u, r%u", rs
, rt
, rd
);
17458 putIReg(rd
, unop(Iop_32Sto64
,
17460 unop(Iop_64to32
, getIReg(rs
)),
17461 unop(Iop_64to32
, getIReg(rt
)))));
17463 putIReg(rd
, binop(Iop_DivS32
, getIReg(rs
), getIReg(rt
)));
17472 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17473 DIP("mod r%u, r%u, r%u", rs
, rt
, rd
);
17476 putIReg(rd
, unop(Iop_32Sto64
,
17478 binop(Iop_DivModS32to32
,
17479 unop(Iop_64to32
, getIReg(rs
)),
17480 unop(Iop_64to32
, getIReg(rt
))))));
17482 t1
= newTemp(Ity_I64
);
17484 assign(t1
, binop(Iop_DivModS32to32
, getIReg(rs
), getIReg(rt
)));
17485 putIReg(rd
, unop(Iop_64HIto32
, mkexpr(t1
)));
17496 case 0x1B: /* DIVU */
17497 switch (sa
& 0x3) {
17499 DIP("divu r%u, r%u", rs
, rt
);
17501 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17502 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17507 t1
= newTemp(Ity_I64
);
17509 assign(t1
, binop(Iop_DivModU32to32
,
17510 mkNarrowTo32(ty
, getIReg(rs
)),
17511 mkNarrowTo32(ty
, getIReg(rt
))));
17513 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t1
)), True
));
17514 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t1
)), True
));
17516 t1
= newTemp(Ity_I64
);
17518 assign(t1
, binop(Iop_DivModU32to32
, getIReg(rs
), getIReg(rt
)));
17519 putHI(unop(Iop_64HIto32
, mkexpr(t1
)));
17520 putLO(unop(Iop_64to32
, mkexpr(t1
)));
17526 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17527 DIP("divu r%u, r%u, r%u", rs
, rt
, rd
);
17530 putIReg(rd
, unop(Iop_32Sto64
,
17532 unop(Iop_64to32
, getIReg(rs
)),
17533 unop(Iop_64to32
, getIReg(rt
)))));
17535 putIReg(rd
, binop(Iop_DivU32
, getIReg(rs
), getIReg(rt
)));
17546 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17547 DIP("modu r%u, r%u, r%u", rs
, rt
, rd
);
17550 putIReg(rd
, unop(Iop_32Uto64
,
17552 binop(Iop_DivModU32to32
,
17553 unop(Iop_64to32
, getIReg(rs
)),
17554 unop(Iop_64to32
, getIReg(rt
))))));
17556 t1
= newTemp(Ity_I64
);
17558 assign(t1
, binop(Iop_DivModU32to32
, getIReg(rs
), getIReg(rt
)));
17559 putIReg(rd
, unop(Iop_64HIto32
, mkexpr(t1
)));
17570 case 0x1C: /* Doubleword Multiply - DMULT; MIPS64 */
17573 DIP("dmult r%u, r%u", rs
, rt
);
17575 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17576 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17580 t0
= newTemp(Ity_I128
);
17582 assign(t0
, binop(Iop_MullS64
, getIReg(rs
), getIReg(rt
)));
17584 putHI(unop(Iop_128HIto64
, mkexpr(t0
)));
17585 putLO(unop(Iop_128to64
, mkexpr(t0
)));
17589 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17590 DIP("dmul r%u, r%u, r%u", rd
, rs
, rt
);
17591 putIReg(rd
, unop(Iop_128to64
,
17592 binop(Iop_MullS64
, getIReg(rs
), getIReg(rt
))));
17600 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17601 DIP("dmuh r%u, r%u, r%u", rd
, rs
, rt
);
17602 putIReg(rd
, unop(Iop_128HIto64
,
17603 binop(Iop_MullS64
, getIReg(rs
), getIReg(rt
))));
17613 case 0x1D: /* Doubleword Multiply Unsigned - DMULTU; MIPS64 */
17616 DIP("dmultu r%u, r%u", rs
, rt
);
17618 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17619 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17623 t0
= newTemp(Ity_I128
);
17625 assign(t0
, binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
)));
17627 putHI(unop(Iop_128HIto64
, mkexpr(t0
)));
17628 putLO(unop(Iop_128to64
, mkexpr(t0
)));
17631 case 2: /* DMULU */
17632 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17633 DIP("dmulu r%u, r%u, r%u", rd
, rs
, rt
);
17634 putIReg(rd
, unop(Iop_128to64
,
17635 binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
))));
17642 case 3: /* DMUHU */
17643 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17644 DIP("dmuhu r%u, r%u, r%u", rd
, rs
, rt
);
17645 putIReg(rd
, unop(Iop_128HIto64
,
17646 binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
))));
17656 case 0x1E: /* Doubleword Divide DDIV; MIPS64 */
17659 DIP("ddiv r%u, r%u", rs
, rt
);
17661 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17662 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17666 t1
= newTemp(Ity_I128
);
17668 assign(t1
, binop(Iop_DivModS64to64
, getIReg(rs
), getIReg(rt
)));
17670 putHI(unop(Iop_128HIto64
, mkexpr(t1
)));
17671 putLO(unop(Iop_128to64
, mkexpr(t1
)));
17674 case 2: /* DDIV r6 */
17675 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17676 DIP("ddiv r%u, r%u, r%u", rs
, rt
, rd
);
17677 putIReg(rd
, unop(Iop_128to64
,
17678 binop(Iop_DivModS64to64
,
17679 getIReg(rs
), getIReg(rt
))));
17686 case 3: /* DMOD r6 */
17687 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17688 DIP("dmod r%u, r%u, r%u", rs
, rt
, rd
);
17689 t2
= newTemp(Ity_I128
);
17690 assign(t2
, binop(Iop_DivModS64to64
, getIReg(rs
), getIReg(rt
)));
17691 putIReg(rd
, unop(Iop_128HIto64
, mkexpr(t2
)));
17701 case 0x1F: /* Doubleword Divide Unsigned DDIVU; MIPS64 check this */
17704 DIP("ddivu r%u, r%u", rs
, rt
);
17706 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) &&
17707 !VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
)) {
17711 t1
= newTemp(Ity_I128
);
17713 assign(t1
, binop(Iop_DivModU64to64
, getIReg(rs
), getIReg(rt
)));
17715 putHI(unop(Iop_128HIto64
, mkexpr(t1
)));
17716 putLO(unop(Iop_128to64
, mkexpr(t1
)));
17720 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17721 DIP("ddivu r%u, r%u, r%u", rs
, rt
, rd
);
17722 putIReg(rd
, unop(Iop_128to64
, binop(Iop_DivModU64to64
,
17723 getIReg(rs
), getIReg(rt
))));
17731 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
17732 DIP("dmodu r%u, r%u, r%u", rs
, rt
, rd
);
17733 putIReg(rd
, unop(Iop_128HIto64
, binop(Iop_DivModU64to64
,
17734 getIReg(rs
), getIReg(rt
))));
17744 case 0x20: { /* ADD */
17745 DIP("add r%u, r%u, r%u", rd
, rs
, rt
);
17746 IRTemp tmpRs32
= newTemp(Ity_I32
);
17747 IRTemp tmpRt32
= newTemp(Ity_I32
);
17749 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17750 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
17752 t0
= newTemp(Ity_I32
);
17753 t1
= newTemp(Ity_I32
);
17754 t2
= newTemp(Ity_I32
);
17755 t3
= newTemp(Ity_I32
);
17756 t4
= newTemp(Ity_I32
);
17757 /* dst = src0 + src1
17758 if (sign(src0 ) != sign(src1 ))
17760 if (sign(dst) == sign(src0 ))
17762 we have overflow! */
17764 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
17765 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
17766 assign(t2
, unop(Iop_1Uto32
,
17768 binop(Iop_And32
, mkexpr(t1
), mkU32(0x80000000)),
17769 mkU32(0x80000000))));
17771 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
17772 assign(t4
, unop(Iop_1Uto32
,
17774 binop(Iop_And32
, mkexpr(t3
), mkU32(0x80000000)),
17775 mkU32(0x80000000))));
17777 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
,
17778 binop(Iop_Or32
, mkexpr(t2
), mkexpr(t4
)),
17781 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
17782 IRConst_U32(guest_PC_curr_instr
+ 4),
17785 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(t0
), True
));
17789 case 0x21: /* ADDU */
17790 DIP("addu r%u, r%u, r%u", rd
, rs
, rt
);
17793 ALU_PATTERN64(Iop_Add32
);
17795 ALU_PATTERN(Iop_Add32
);
17800 case 0x22: { /* SUB */
17801 DIP("sub r%u, r%u, r%u", rd
, rs
, rt
);
17802 IRTemp tmpRs32
= newTemp(Ity_I32
);
17803 IRTemp tmpRt32
= newTemp(Ity_I32
);
17805 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
17806 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
17807 t0
= newTemp(Ity_I32
);
17808 t1
= newTemp(Ity_I32
);
17809 t2
= newTemp(Ity_I32
);
17810 t3
= newTemp(Ity_I32
);
17811 t4
= newTemp(Ity_I32
);
17812 t5
= newTemp(Ity_I32
);
17813 /* dst = src0 + (-1 * src1)
17814 if(sign(src0 ) != sign((-1 * src1) ))
17816 if(sign(dst) == sign(src0 ))
17818 we have overflow! */
17820 assign(t5
, binop(Iop_Mul32
, mkexpr(tmpRt32
), mkU32(-1)));
17821 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(t5
)));
17822 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(t5
)));
17823 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, binop(Iop_And32
,
17824 mkexpr(t1
), mkU32(0x80000000)), mkU32(0x80000000))));
17826 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
17827 assign(t4
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, binop(Iop_And32
,
17828 mkexpr(t3
), mkU32(0x80000000)), mkU32(0x80000000))));
17830 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, binop(Iop_Or32
, mkexpr(t2
),
17831 mkexpr(t4
)), mkU32(0)), Ijk_SigFPE_IntOvf
,
17832 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
17833 IRConst_U32(guest_PC_curr_instr
+ 4),
17836 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(t0
), True
));
17840 case 0x23: /* SUBU */
17841 DIP("subu r%u, r%u, r%u", rd
, rs
, rt
);
17844 ALU_PATTERN64(Iop_Sub32
);
17846 ALU_PATTERN(Iop_Sub32
);
17851 case 0x24: /* AND */
17852 DIP("and r%u, r%u, r%u", rd
, rs
, rt
);
17855 ALU_PATTERN(Iop_And64
);
17857 ALU_PATTERN(Iop_And32
);
17862 case 0x25: /* OR */
17863 DIP("or r%u, r%u, r%u", rd
, rs
, rt
);
17866 ALU_PATTERN(Iop_Or64
);
17868 ALU_PATTERN(Iop_Or32
);
17873 case 0x26: /* XOR */
17874 DIP("xor r%u, r%u, r%u", rd
, rs
, rt
);
17877 ALU_PATTERN(Iop_Xor64
);
17879 ALU_PATTERN(Iop_Xor32
);
17884 case 0x27: /* NOR */
17885 DIP("nor r%u, r%u, r%u", rd
, rs
, rt
);
17888 putIReg(rd
, unop(Iop_Not64
, binop(Iop_Or64
, getIReg(rs
),
17891 putIReg(rd
, unop(Iop_Not32
, binop(Iop_Or32
, getIReg(rs
),
17896 case 0x2A: /* SLT */
17897 DIP("slt r%u, r%u, r%u", rd
, rs
, rt
);
17900 putIReg(rd
, unop(Iop_1Uto64
, binop(Iop_CmpLT64S
, getIReg(rs
),
17903 putIReg(rd
, unop(Iop_1Uto32
, binop(Iop_CmpLT32S
, getIReg(rs
),
17908 case 0x2B: /* SLTU */
17909 DIP("sltu r%u, r%u, r%u", rd
, rs
, rt
);
17912 putIReg(rd
, unop(Iop_1Uto64
, binop(Iop_CmpLT64U
, getIReg(rs
),
17915 putIReg(rd
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, getIReg(rs
),
17920 case 0x2C: { /* Doubleword Add - DADD; MIPS64 */
17921 DIP("dadd r%u, r%u, r%u", rd
, rs
, rt
);
17922 IRTemp tmpRs64
= newTemp(Ity_I64
);
17923 IRTemp tmpRt64
= newTemp(Ity_I64
);
17925 assign(tmpRs64
, getIReg(rs
));
17926 assign(tmpRt64
, getIReg(rt
));
17928 t0
= newTemp(Ity_I64
);
17929 t1
= newTemp(Ity_I64
);
17930 t2
= newTemp(Ity_I64
);
17931 t3
= newTemp(Ity_I64
);
17932 t4
= newTemp(Ity_I64
);
17933 /* dst = src0 + src1
17934 if(sign(src0 ) != sign(src1 ))
17936 if(sign(dst) == sign(src0 ))
17938 we have overflow! */
17940 assign(t0
, binop(Iop_Add64
, mkexpr(tmpRs64
), mkexpr(tmpRt64
)));
17941 assign(t1
, binop(Iop_Xor64
, mkexpr(tmpRs64
), mkexpr(tmpRt64
)));
17942 assign(t2
, unop(Iop_1Uto64
,
17944 binop(Iop_And64
, mkexpr(t1
),
17945 mkU64(0x8000000000000000ULL
)),
17946 mkU64(0x8000000000000000ULL
))));
17948 assign(t3
, binop(Iop_Xor64
, mkexpr(t0
), mkexpr(tmpRs64
)));
17949 assign(t4
, unop(Iop_1Uto64
,
17951 binop(Iop_And64
, mkexpr(t3
),
17952 mkU64(0x8000000000000000ULL
)),
17953 mkU64(0x8000000000000000ULL
))));
17955 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
,
17956 binop(Iop_Or64
, mkexpr(t2
), mkexpr(t4
)),
17959 IRConst_U64(guest_PC_curr_instr
+ 4),
17962 putIReg(rd
, mkexpr(t0
));
17966 case 0x2D: /* Doubleword Add Unsigned - DADDU; MIPS64 */
17967 DIP("daddu r%u, r%u, r%u", rd
, rs
, rt
);
17968 ALU_PATTERN(Iop_Add64
);
17971 case 0x2E: { /* Doubleword Subtract - DSUB; MIPS64 */
17972 DIP("dsub r%u, r%u, r%u", rd
, rs
, rt
);
17973 IRTemp tmpRs64
= newTemp(Ity_I64
);
17974 IRTemp tmpRt64
= newTemp(Ity_I64
);
17976 assign(tmpRs64
, getIReg(rs
));
17977 assign(tmpRt64
, getIReg(rt
));
17978 t0
= newTemp(Ity_I64
);
17979 t1
= newTemp(Ity_I64
);
17980 t2
= newTemp(Ity_I64
);
17981 t3
= newTemp(Ity_I64
);
17982 t4
= newTemp(Ity_I64
);
17983 t5
= newTemp(Ity_I64
);
17984 /* dst = src0 + (-1 * src1)
17985 if(sign(src0 ) != sign((-1 * src1) ))
17987 if(sign(dst) == sign(src0 ))
17989 we have overflow! */
17991 assign(t5
, binop(Iop_Mul64
,
17993 mkU64(0xffffffffffffffffULL
)));
17994 assign(t0
, binop(Iop_Add64
, mkexpr(tmpRs64
), mkexpr(t5
)));
17995 assign(t1
, binop(Iop_Xor64
, mkexpr(tmpRs64
), mkexpr(t5
)));
17996 assign(t2
, unop(Iop_1Sto64
,
18000 mkU64(0x8000000000000000ULL
)),
18001 mkU64(0x8000000000000000ULL
))));
18003 assign(t3
, binop(Iop_Xor64
, mkexpr(t0
), mkexpr(tmpRs64
)));
18004 assign(t4
, unop(Iop_1Sto64
,
18008 mkU64(0x8000000000000000ULL
)),
18009 mkU64(0x8000000000000000ULL
))));
18011 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, binop(Iop_Or64
, mkexpr(t2
),
18012 mkexpr(t4
)), mkU64(0)), Ijk_SigFPE_IntOvf
,
18013 IRConst_U64(guest_PC_curr_instr
+ 4),
18016 putIReg(rd
, binop(Iop_Sub64
, getIReg(rs
), getIReg(rt
)));
18020 case 0x2F: /* Doubleword Subtract Unsigned - DSUBU; MIPS64 */
18021 DIP("dsub r%u, r%u,r%u", rd
, rt
, rt
);
18022 ALU_PATTERN(Iop_Sub64
);
18025 case 0x30: { /* TGE */
18026 DIP("tge r%u, r%u %u", rs
, rt
, trap_code
);
18029 if (trap_code
== 7)
18030 stmt (IRStmt_Exit (unop (Iop_Not1
,
18031 binop (Iop_CmpLT64S
,
18035 IRConst_U64(guest_PC_curr_instr
+ 4),
18037 else if (trap_code
== 6)
18038 stmt (IRStmt_Exit (unop (Iop_Not1
,
18039 binop (Iop_CmpLT64S
,
18043 IRConst_U64(guest_PC_curr_instr
+ 4),
18046 stmt (IRStmt_Exit (unop (Iop_Not1
,
18047 binop (Iop_CmpLT64S
,
18051 IRConst_U64(guest_PC_curr_instr
+ 4),
18054 if (trap_code
== 7)
18055 stmt (IRStmt_Exit (unop (Iop_Not1
,
18056 binop (Iop_CmpLT32S
,
18060 IRConst_U32(guest_PC_curr_instr
+ 4),
18062 else if (trap_code
== 6)
18063 stmt (IRStmt_Exit (unop (Iop_Not1
,
18064 binop (Iop_CmpLT32S
,
18068 IRConst_U32(guest_PC_curr_instr
+ 4),
18071 stmt (IRStmt_Exit (unop (Iop_Not1
,
18072 binop (Iop_CmpLT32S
,
18076 IRConst_U32(guest_PC_curr_instr
+ 4),
18083 case 0x31: { /* TGEU */
18084 DIP("tgeu r%u, r%u %u", rs
, rt
, trap_code
);
18087 if (trap_code
== 7)
18088 stmt (IRStmt_Exit (unop (Iop_Not1
,
18089 binop (Iop_CmpLT64U
,
18093 IRConst_U64(guest_PC_curr_instr
+ 4),
18095 else if (trap_code
== 6)
18096 stmt (IRStmt_Exit (unop (Iop_Not1
,
18097 binop (Iop_CmpLT64U
,
18101 IRConst_U64(guest_PC_curr_instr
+ 4),
18104 stmt (IRStmt_Exit (unop (Iop_Not1
,
18105 binop (Iop_CmpLT64U
,
18109 IRConst_U64(guest_PC_curr_instr
+ 4),
18112 if (trap_code
== 7)
18113 stmt (IRStmt_Exit (unop (Iop_Not1
,
18114 binop (Iop_CmpLT32U
,
18118 IRConst_U32(guest_PC_curr_instr
+ 4),
18120 else if (trap_code
== 6)
18121 stmt (IRStmt_Exit (unop (Iop_Not1
,
18122 binop (Iop_CmpLT32U
,
18126 IRConst_U32(guest_PC_curr_instr
+ 4),
18129 stmt (IRStmt_Exit (unop (Iop_Not1
,
18130 binop (Iop_CmpLT32U
,
18134 IRConst_U32(guest_PC_curr_instr
+ 4),
18141 case 0x32: { /* TLT */
18142 DIP("tlt r%u, r%u %u", rs
, rt
, trap_code
);
18145 if (trap_code
== 7)
18146 stmt(IRStmt_Exit(binop(Iop_CmpLT64S
, getIReg(rs
),
18147 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18148 IRConst_U64(guest_PC_curr_instr
+ 4),
18150 else if (trap_code
== 6)
18151 stmt(IRStmt_Exit(binop(Iop_CmpLT64S
, getIReg(rs
),
18152 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18153 IRConst_U64(guest_PC_curr_instr
+ 4),
18156 stmt(IRStmt_Exit(binop(Iop_CmpLT64S
, getIReg(rs
),
18157 getIReg(rt
)), Ijk_SigTRAP
,
18158 IRConst_U64(guest_PC_curr_instr
+ 4),
18161 if (trap_code
== 7)
18162 stmt(IRStmt_Exit(binop(Iop_CmpLT32S
, getIReg(rs
),
18163 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18164 IRConst_U32(guest_PC_curr_instr
+ 4),
18166 else if (trap_code
== 6)
18167 stmt(IRStmt_Exit(binop(Iop_CmpLT32S
, getIReg(rs
),
18168 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18169 IRConst_U32(guest_PC_curr_instr
+ 4),
18172 stmt(IRStmt_Exit(binop(Iop_CmpLT32S
, getIReg(rs
),
18173 getIReg(rt
)), Ijk_SigTRAP
,
18174 IRConst_U32(guest_PC_curr_instr
+ 4),
18181 case 0x33: { /* TLTU */
18182 DIP("tltu r%u, r%u %u", rs
, rt
, trap_code
);
18185 if (trap_code
== 7)
18186 stmt(IRStmt_Exit(binop(Iop_CmpLT64U
, getIReg(rs
),
18187 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18188 IRConst_U64(guest_PC_curr_instr
+ 4),
18190 else if (trap_code
== 6)
18191 stmt(IRStmt_Exit(binop(Iop_CmpLT64U
, getIReg(rs
),
18192 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18193 IRConst_U64(guest_PC_curr_instr
+ 4),
18196 stmt(IRStmt_Exit(binop(Iop_CmpLT64U
, getIReg(rs
),
18197 getIReg(rt
)), Ijk_SigTRAP
,
18198 IRConst_U64(guest_PC_curr_instr
+ 4),
18201 if (trap_code
== 7)
18202 stmt(IRStmt_Exit(binop(Iop_CmpLT32U
, getIReg(rs
),
18203 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18204 IRConst_U32(guest_PC_curr_instr
+ 4),
18206 else if (trap_code
== 6)
18207 stmt(IRStmt_Exit(binop(Iop_CmpLT32U
, getIReg(rs
),
18208 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18209 IRConst_U32(guest_PC_curr_instr
+ 4),
18212 stmt(IRStmt_Exit(binop(Iop_CmpLT32U
, getIReg(rs
),
18213 getIReg (rt
)), Ijk_SigTRAP
,
18214 IRConst_U32(guest_PC_curr_instr
+ 4),
18221 case 0x34: { /* TEQ */
18222 DIP("teq r%u, r%u, %u", rs
, rt
, trap_code
);
18225 if (trap_code
== 7)
18226 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, getIReg(rs
),
18227 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18228 IRConst_U64(guest_PC_curr_instr
+ 4),
18230 else if (trap_code
== 6)
18231 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, getIReg(rs
),
18232 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18233 IRConst_U64(guest_PC_curr_instr
+ 4),
18236 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, getIReg(rs
),
18237 getIReg(rt
)), Ijk_SigTRAP
,
18238 IRConst_U64(guest_PC_curr_instr
+ 4),
18241 if (trap_code
== 7)
18242 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, getIReg(rs
),
18243 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18244 IRConst_U32(guest_PC_curr_instr
+ 4),
18246 else if (trap_code
== 6)
18247 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, getIReg(rs
),
18248 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18249 IRConst_U32(guest_PC_curr_instr
+ 4),
18252 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, getIReg(rs
),
18253 getIReg(rt
)), Ijk_SigTRAP
,
18254 IRConst_U32(guest_PC_curr_instr
+ 4),
18261 case 0x35: { /* SELEQZ */
18262 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
18263 DIP("seleqz r%u, r%u, r%u", rd
, rs
, rt
);
18266 putIReg(rd
, binop(Iop_And64
,
18268 unop(Iop_CmpwNEZ64
, getIReg(rt
))),
18271 putIReg(rd
, binop(Iop_And32
,
18273 unop(Iop_CmpwNEZ32
, getIReg(rt
))),
18277 ILLEGAL_INSTRUCTON
;
18283 case 0x36: { /* TNE */
18284 DIP("tne r%u, r%u %u", rs
, rt
, trap_code
);
18287 if (trap_code
== 7)
18288 stmt(IRStmt_Exit(binop(Iop_CmpNE64
, getIReg(rs
),
18289 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18290 IRConst_U64(guest_PC_curr_instr
+ 4),
18292 else if (trap_code
== 6)
18293 stmt(IRStmt_Exit(binop(Iop_CmpNE64
, getIReg(rs
),
18294 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18295 IRConst_U64(guest_PC_curr_instr
+ 4),
18298 stmt(IRStmt_Exit(binop(Iop_CmpNE64
, getIReg(rs
),
18299 getIReg(rt
)), Ijk_SigTRAP
,
18300 IRConst_U64(guest_PC_curr_instr
+ 4),
18303 if (trap_code
== 7)
18304 stmt(IRStmt_Exit(binop(Iop_CmpNE32
, getIReg(rs
),
18305 getIReg(rt
)), Ijk_SigFPE_IntDiv
,
18306 IRConst_U32(guest_PC_curr_instr
+ 4),
18308 else if (trap_code
== 6)
18309 stmt(IRStmt_Exit(binop(Iop_CmpNE32
, getIReg(rs
),
18310 getIReg(rt
)), Ijk_SigFPE_IntOvf
,
18311 IRConst_U32(guest_PC_curr_instr
+ 4),
18314 stmt(IRStmt_Exit(binop(Iop_CmpNE32
, getIReg(rs
),
18315 getIReg(rt
)), Ijk_SigTRAP
,
18316 IRConst_U32(guest_PC_curr_instr
+ 4),
18323 case 0x37: { /* SELNEZ */
18324 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
18325 DIP("selnez r%u, r%u, r%u", rd
, rs
, rt
);
18328 putIReg(rd
, binop(Iop_And64
,
18329 unop(Iop_CmpwNEZ64
, getIReg(rt
)), getIReg(rs
)));
18331 putIReg(rd
, binop(Iop_And32
,
18332 unop(Iop_CmpwNEZ32
, getIReg(rt
)), getIReg(rs
)));
18335 ILLEGAL_INSTRUCTON
;
18343 case 0x17: /* DSLLV, DROTRV:DSRLV, DSRAV */
18346 case 0x3B: /* DSLL, DROTL:DSRL, DSRA */
18349 case 0x3F: /* DSLL32, DROTR32:DSRL32, DSRA32 */
18350 if (dis_instr_shrt(cins
))
18362 static UInt
disInstr_MIPS_WRK_Special2(UInt cins
, const VexArchInfo
* archinfo
,
18363 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
18364 IRStmt
** bstmt
, IRExpr
** lastn
)
18366 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
, t6
;
18367 UInt rs
, rt
, rd
, function
;
18368 /* Additional variables for instruction fields in DSP ASE insructions */
18374 function
= get_function(cins
);
18375 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
18377 ac
= get_acNo(cins
);
18379 switch (function
) {
18380 /* Cavium Specific instructions */
18383 case 0x33: /* DMUL, CINS , CINS32 */
18386 case 0x2B: /* EXT, EXT32, SNE */
18388 /* CVM Compare Instructions */
18391 case 0x2F: /* SEQ, SEQI, SNEI */
18393 /* CPU Load, Store, Memory, and Control Instructions */
18395 case 0x19: /* SAA, SAAD */
18396 case 0x1F: /* LAA, LAAD, LAI, LAID */
18399 case 0x2D: /* BADDU, POP, DPOP */
18400 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
18401 if (dis_instr_CVM(cins
))
18411 case 0x02: { /* MUL */
18412 DIP("mul r%u, r%u, r%u", rd
, rs
, rt
);
18415 IRTemp tmpRs32
= newTemp(Ity_I32
);
18416 IRTemp tmpRt32
= newTemp(Ity_I32
);
18417 IRTemp tmpRes
= newTemp(Ity_I32
);
18419 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
18420 assign(tmpRt32
, mkNarrowTo32(ty
, getIReg(rt
)));
18421 assign(tmpRes
, binop(Iop_Mul32
,
18422 mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
18423 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpRes
), True
));
18425 putIReg(rd
, binop(Iop_Mul32
, getIReg(rs
), getIReg(rt
)));
18430 case 0x00: { /* MADD */
18432 DIP("madd r%u, r%u", rs
, rt
);
18433 t1
= newTemp(Ity_I32
);
18434 t2
= newTemp(Ity_I32
);
18435 t3
= newTemp(Ity_I64
);
18436 t4
= newTemp(Ity_I64
);
18437 t5
= newTemp(Ity_I64
);
18438 t6
= newTemp(Ity_I32
);
18440 assign(t1
, mkNarrowTo32(ty
, getHI()));
18441 assign(t2
, mkNarrowTo32(ty
, getLO()));
18443 assign(t3
, binop(Iop_MullS32
, mkNarrowTo32(ty
, getIReg(rs
)),
18444 mkNarrowTo32(ty
, getIReg(rt
))));
18446 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18447 assign(t5
, binop(Iop_Add64
, mkexpr(t3
), mkexpr(t4
)));
18449 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18450 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18452 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18453 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18454 /* If DSP is present -> DSP ASE MADD */
18455 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18457 if (0 != retVal
) {
18466 DIP("madd r%u, r%u", rs
, rt
);
18467 t1
= newTemp(Ity_I32
);
18468 t2
= newTemp(Ity_I32
);
18469 t3
= newTemp(Ity_I64
);
18470 t4
= newTemp(Ity_I32
);
18471 t5
= newTemp(Ity_I32
);
18472 t6
= newTemp(Ity_I32
);
18474 assign(t1
, getHI());
18475 assign(t2
, getLO());
18477 assign(t3
, binop(Iop_MullS32
, getIReg(rs
), getIReg(rt
)));
18479 assign(t4
, binop(Iop_Add32
, mkexpr(t2
), unop(Iop_64to32
,
18482 assign(t5
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, mkexpr(t4
),
18483 unop(Iop_64to32
, mkexpr(t3
)))));
18484 assign(t6
, binop(Iop_Add32
, mkexpr(t5
), mkexpr(t1
)));
18486 putHI(binop(Iop_Add32
, mkexpr(t6
), unop(Iop_64HIto32
,
18496 case 0x01: { /* MADDU */
18498 DIP("maddu r%u, r%u", rs
, rt
);
18499 t1
= newTemp(Ity_I32
);
18500 t2
= newTemp(Ity_I32
);
18501 t3
= newTemp(Ity_I64
);
18502 t4
= newTemp(Ity_I64
);
18503 t5
= newTemp(Ity_I64
);
18504 t6
= newTemp(Ity_I32
);
18506 assign(t1
, mkNarrowTo32(ty
, getHI()));
18507 assign(t2
, mkNarrowTo32(ty
, getLO()));
18509 assign(t3
, binop(Iop_MullU32
, mkNarrowTo32(ty
, getIReg(rs
)),
18510 mkNarrowTo32(ty
, getIReg(rt
))));
18512 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18513 assign(t5
, binop(Iop_Add64
, mkexpr(t3
), mkexpr(t4
)));
18515 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18516 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18518 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18519 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18520 /* If DSP is present -> DSP ASE MADDU */
18521 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18523 if (0 != retVal
) {
18532 DIP("maddu r%u, r%u", rs
, rt
);
18533 t1
= newTemp(Ity_I32
);
18534 t2
= newTemp(Ity_I32
);
18535 t3
= newTemp(Ity_I64
);
18536 t4
= newTemp(Ity_I32
);
18537 t5
= newTemp(Ity_I32
);
18538 t6
= newTemp(Ity_I32
);
18540 assign(t1
, getHI());
18541 assign(t2
, getLO());
18543 assign(t3
, binop(Iop_MullU32
, getIReg(rs
), getIReg(rt
)));
18545 assign(t4
, binop(Iop_Add32
, mkexpr(t2
), unop(Iop_64to32
,
18547 assign(t5
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, mkexpr(t4
),
18548 unop(Iop_64to32
, mkexpr(t3
)))));
18549 assign(t6
, binop(Iop_Add32
, mkexpr(t5
), mkexpr(t1
)));
18551 putHI(binop(Iop_Add32
, mkexpr(t6
), unop(Iop_64HIto32
,
18561 case 0x04: { /* MSUB */
18563 DIP("msub r%u, r%u", rs
, rt
);
18564 t1
= newTemp(Ity_I32
);
18565 t2
= newTemp(Ity_I32
);
18566 t3
= newTemp(Ity_I64
);
18567 t4
= newTemp(Ity_I64
);
18568 t5
= newTemp(Ity_I64
);
18569 t6
= newTemp(Ity_I32
);
18571 assign(t1
, mkNarrowTo32(ty
, getHI()));
18572 assign(t2
, mkNarrowTo32(ty
, getLO()));
18574 assign(t3
, binop(Iop_MullS32
, mkNarrowTo32(ty
, getIReg(rs
)),
18575 mkNarrowTo32(ty
, getIReg(rt
))));
18577 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18578 assign(t5
, binop(Iop_Sub64
, mkexpr(t4
), mkexpr(t3
)));
18580 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18581 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18583 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18584 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18585 /* If DSP is present -> DSP ASE MSUB */
18586 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18588 if (0 != retVal
) {
18597 DIP("msub r%u, r%u", rs
, rt
);
18598 t1
= newTemp(Ity_I32
);
18599 t2
= newTemp(Ity_I32
);
18600 t3
= newTemp(Ity_I64
);
18601 t4
= newTemp(Ity_I32
);
18602 t5
= newTemp(Ity_I1
);
18603 t6
= newTemp(Ity_I32
);
18605 assign(t1
, getHI());
18606 assign(t2
, getLO());
18608 assign(t3
, binop(Iop_MullS32
, getIReg(rs
), getIReg(rt
)));
18609 assign(t4
, unop(Iop_64to32
, mkexpr(t3
))); /* new lo */
18611 /* if lo<lo(mul) hi = hi - 1 */
18612 assign(t5
, binop(Iop_CmpLT32U
,
18616 assign(t6
, IRExpr_ITE(mkexpr(t5
),
18617 binop(Iop_Sub32
, mkexpr(t1
), mkU32(0x1)),
18620 putHI(binop(Iop_Sub32
, mkexpr(t6
), unop(Iop_64HIto32
,
18622 putLO(binop(Iop_Sub32
, mkexpr(t2
), mkexpr(t4
)));
18630 case 0x05: { /* MSUBU */
18632 DIP("msubu r%u, r%u", rs
, rt
);
18633 t1
= newTemp(Ity_I32
);
18634 t2
= newTemp(Ity_I32
);
18635 t3
= newTemp(Ity_I64
);
18636 t4
= newTemp(Ity_I64
);
18637 t5
= newTemp(Ity_I64
);
18638 t6
= newTemp(Ity_I32
);
18640 assign(t1
, mkNarrowTo32(ty
, getHI()));
18641 assign(t2
, mkNarrowTo32(ty
, getLO()));
18643 assign(t3
, binop(Iop_MullU32
, mkNarrowTo32(ty
, getIReg(rs
)),
18644 mkNarrowTo32(ty
, getIReg(rt
))));
18646 assign(t4
, binop(Iop_32HLto64
, mkexpr(t1
), mkexpr(t2
)));
18647 assign(t5
, binop(Iop_Sub64
, mkexpr(t4
), mkexpr(t3
)));
18649 putHI(mkWidenFrom32(ty
, unop(Iop_64HIto32
, mkexpr(t5
)), True
));
18650 putLO(mkWidenFrom32(ty
, unop(Iop_64to32
, mkexpr(t5
)), True
));
18652 if ( (1 <= ac
) && ( 3 >= ac
) ) {
18653 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
18654 /* If DSP is present -> DSP ASE MSUBU */
18655 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
18657 if (0 != retVal
) {
18666 DIP("msubu r%u, r%u", rs
, rt
);
18667 t1
= newTemp(Ity_I32
);
18668 t2
= newTemp(Ity_I32
);
18669 t3
= newTemp(Ity_I64
);
18670 t4
= newTemp(Ity_I32
);
18671 t5
= newTemp(Ity_I1
);
18672 t6
= newTemp(Ity_I32
);
18674 assign(t1
, getHI());
18675 assign(t2
, getLO());
18677 assign(t3
, binop(Iop_MullU32
, getIReg(rs
), getIReg(rt
)));
18678 assign(t4
, unop(Iop_64to32
, mkexpr(t3
))); /* new lo */
18680 /* if lo<lo(mul) hi = hi - 1 */
18681 assign(t5
, binop(Iop_CmpLT32U
,
18685 assign(t6
, IRExpr_ITE(mkexpr(t5
),
18691 putHI(binop(Iop_Sub32
, mkexpr(t6
), unop(Iop_64HIto32
,
18693 putLO(binop(Iop_Sub32
, mkexpr(t2
), mkexpr(t4
)));
18701 case 0x6: /* dmul MIPS64 - Netlogic */
18702 DIP("dmul r%u, r%u, r%u", rd
, rs
, rt
);
18703 t0
= newTemp(Ity_I128
);
18705 assign(t0
, binop(Iop_MullU64
, getIReg(rs
), getIReg(rt
)));
18707 putIReg(rd
, unop(Iop_128to64
, mkexpr(t0
)));
18710 case 0x10: /* LDADDW - Swap Word - Netlogic */
18711 DIP("ldaddw r%u, r%u", rt
, rs
);
18712 t0
= newTemp(Ity_I32
);
18713 t1
= newTemp(Ity_I32
);
18714 t2
= newTemp(Ity_I32
);
18715 t3
= newTemp(Ity_I64
);
18716 t4
= newTemp(Ity_I32
);
18717 t5
= newTemp(Ity_I32
);
18718 t6
= newTemp(Ity_I32
);
18721 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
18723 /* GPR[rt] = memory[base]; */
18724 assign(t1
, load(Ity_I32
, getIReg(rs
)));
18725 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
18727 /* memory[base] = memory[base] + v; */
18728 store(getIReg(rs
), binop(Iop_Add32
, mkexpr(t0
), mkexpr(t1
)));
18731 case 0x12: /* LDADDD - Swap Word - Netlogic */
18732 DIP("ldaddw r%u, r%u", rt
, rs
);
18733 t0
= newTemp(Ity_I64
);
18734 t1
= newTemp(Ity_I64
);
18737 assign(t0
, getIReg(rt
));
18739 /* GPR[rt] = memory[base]; */
18740 assign(t1
, load(Ity_I64
, getIReg(rs
)));
18741 putIReg(rt
, mkexpr(t1
));
18743 /* memory[base] = memory[base] + v; */
18744 store(getIReg(rs
), binop(Iop_Add64
, mkexpr(t0
), mkexpr(t1
)));
18747 case 0x14: /* SWAPW - Swap Word - Netlogic */
18748 DIP("swapw r%u, r%u", rt
, rs
);
18749 t0
= newTemp(Ity_I32
);
18750 t1
= newTemp(Ity_I32
);
18751 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
18752 assign(t1
, load(Ity_I32
, getIReg(rs
)));
18753 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
18754 store(getIReg(rs
), mkexpr(t0
));
18757 case 0x16: /* SWAPD - Swap Double - Netlogic */
18758 DIP("swapw r%u, r%u", rt
, rs
);
18759 t0
= newTemp(Ity_I64
);
18760 t1
= newTemp(Ity_I64
);
18761 assign(t0
, getIReg(rt
));
18762 assign(t1
, load(Ity_I64
, getIReg(rs
)));
18763 putIReg(rt
, mkexpr(t1
));
18764 store(getIReg(rs
), mkexpr(t0
));
18767 case 0x20: { /* CLZ */
18768 DIP("clz r%u, r%u", rd
, rs
);
18771 IRTemp tmpClz32
= newTemp(Ity_I32
);
18772 IRTemp tmpRs32
= newTemp(Ity_I32
);
18774 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
18775 assign(tmpClz32
, unop(Iop_Clz32
, mkexpr(tmpRs32
)));
18776 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClz32
), True
));
18778 t1
= newTemp(Ity_I1
);
18779 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0)));
18780 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18782 unop(Iop_Clz32
, getIReg(rs
))));
18788 case 0x21: { /* CLO */
18789 DIP("clo r%u, r%u", rd
, rs
);
18792 IRTemp tmpClo32
= newTemp(Ity_I32
);
18793 IRTemp tmpRs32
= newTemp(Ity_I32
);
18794 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
18796 t1
= newTemp(Ity_I1
);
18797 assign(t1
, binop(Iop_CmpEQ32
, mkexpr(tmpRs32
), mkU32(0xffffffff)));
18798 assign(tmpClo32
, IRExpr_ITE(mkexpr(t1
),
18800 unop(Iop_Clz32
, unop(Iop_Not32
, mkexpr(tmpRs32
)))));
18802 putIReg(rd
, mkWidenFrom32(ty
, mkexpr(tmpClo32
), True
));
18805 t1
= newTemp(Ity_I1
);
18806 assign(t1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0xffffffff)));
18807 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18810 unop(Iop_Not32
, getIReg(rs
)))));
18815 case 0x24: /* Count Leading Zeros in Doubleword - DCLZ; MIPS64 */
18816 DIP("dclz r%u, r%u", rd
, rs
);
18817 t1
= newTemp(Ity_I1
);
18818 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0)));
18819 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18821 unop(Iop_Clz64
, getIReg(rs
))));
18824 case 0x25: /* Count Leading Ones in Doubleword - DCLO; MIPS64 */
18825 DIP("dclo r%u, r%u", rd
, rs
);
18826 t1
= newTemp(Ity_I1
);
18827 assign(t1
, binop(Iop_CmpEQ64
, getIReg(rs
),
18828 mkU64(0xffffffffffffffffULL
)));
18829 putIReg(rd
, IRExpr_ITE(mkexpr(t1
),
18831 unop(Iop_Clz64
, unop(Iop_Not64
,
18842 static UInt
disInstr_MIPS_WRK_Special3(UInt cins
, const VexArchInfo
* archinfo
,
18843 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
18844 IRStmt
** bstmt
, IRExpr
** lastn
)
18847 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
, t6
;
18848 UInt rs
, rt
, rd
, sa
, function
, imm
, instr_index
, msb
, lsb
, size
;
18849 /* Additional variables for instruction fields in DSP ASE insructions */
18851 imm
= get_imm(cins
);
18856 instr_index
= get_instr_index(cins
);
18857 function
= get_function(cins
);
18858 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
18860 switch (function
) {
18861 case 0x01: { /* Doubleword Extract Bit Field - DEXTM; MIPS64r2 */
18862 msb
= get_msb(cins
);
18863 lsb
= get_lsb(cins
);
18866 UInt dstSz
= msb
+ 33;
18867 t1
= newTemp(Ity_I64
);
18868 DIP("dextm r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
+ 1);
18870 UChar lsAmt
= 64 - (srcPos
+ dstSz
); /* left shift amount; */
18871 UChar rsAmt
= 64 - dstSz
; /* right shift amount; */
18873 assign(t1
, binop(Iop_Shl64
, getIReg(rs
), mkU8(lsAmt
)));
18874 putIReg(rt
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(rsAmt
)));
18879 case 0x02: { /* Doubleword Extract Bit Field Upper - DEXTU; MIPS64r2 */
18880 msb
= get_msb(cins
);
18881 lsb
= get_lsb(cins
);
18883 UInt srcPos
= lsb
+ 32;
18884 UInt dstSz
= msb
+ 1;
18885 DIP("dextu r%u, r%u, %u, %u", rt
, rs
, srcPos
, dstSz
);
18886 t1
= newTemp(Ity_I64
);
18888 vassert(srcPos
>= 32 && srcPos
< 64);
18889 vassert(dstSz
> 0 && dstSz
<= 32);
18890 vassert((srcPos
+ dstSz
) > 32 && (srcPos
+ dstSz
) <= 64);
18892 UChar lsAmt
= 64 - (srcPos
+ dstSz
); /* left shift amount; */
18893 UChar rsAmt
= 64 - dstSz
; /* right shift amount; */
18895 assign(t1
, binop(Iop_Shl64
, getIReg(rs
), mkU8(lsAmt
)));
18896 putIReg(rt
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(rsAmt
)));
18900 case 0x05: { /* Doubleword Insert Bit Field Middle - DINSM; MIPS64r2 */
18901 msb
= get_msb(cins
);
18902 lsb
= get_lsb(cins
);
18905 UInt srcSz
= msb
- lsb
+ 33;
18910 IRTemp tmpT1
= newTemp(ty
);
18911 IRTemp tmpT2
= newTemp(ty
);
18912 IRTemp tmpT3
= newTemp(ty
);
18913 IRTemp tmpT4
= newTemp(ty
);
18914 IRTemp tmpT5
= newTemp(ty
);
18915 IRTemp tmpT6
= newTemp(ty
);
18916 IRTemp tmpT7
= newTemp(ty
);
18917 IRTemp tmpRs
= newTemp(ty
);
18918 IRTemp tmpRt
= newTemp(ty
);
18919 IRTemp tmpRd
= newTemp(ty
);
18921 assign(tmpRs
, getIReg(rs
));
18922 assign(tmpRt
, getIReg(rt
));
18923 DIP("dinsm r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
);
18925 UChar lsAmt
= dstPos
+ srcSz
- 1; /* left shift amount; */
18926 UChar rsAmt
= dstPos
+ srcSz
- 1; /* right shift amount; */
18928 assign(t1
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(rsAmt
)));
18929 assign(tmpT1
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(1)));
18930 assign(t2
, binop(Iop_Shl64
, mkexpr(tmpT1
), mkU8(lsAmt
)));
18931 assign(tmpT2
, binop(Iop_Shl64
, mkexpr(t2
), mkU8(1)));
18933 lsAmt
= 63 - dstPos
; /* left shift amount; */
18934 rsAmt
= 63 - dstPos
; /* right shift amount; */
18936 assign(t3
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(lsAmt
)));
18937 assign(tmpT3
, binop(Iop_Shl64
, mkexpr(t3
), mkU8(1)));
18938 assign(t4
, binop(Iop_Shr64
, mkexpr(tmpT3
), mkU8(rsAmt
)));
18939 assign(tmpT4
, binop(Iop_Shr64
, mkexpr(t4
), mkU8(1)));
18941 /* extract size from src register */
18942 lsAmt
= 64 - srcSz
; /* left shift amount; */
18943 rsAmt
= 64 - (lsb
+ srcSz
); /* right shift amount; */
18945 assign(tmpT5
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
18946 assign(tmpT6
, binop(Iop_Shr64
, mkexpr(tmpT5
), mkU8(rsAmt
)));
18948 assign(tmpT7
, binop(Iop_Or64
, mkexpr(tmpT2
), mkexpr(tmpT4
)));
18949 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT6
), mkexpr(tmpT7
)));
18950 putIReg(rt
, mkexpr(tmpRd
));
18954 case 0x06: { /* Doubleword Insert Bit Field Upper - DINSU; MIPS64r2 */
18955 msb
= get_msb(cins
);
18956 lsb
= get_lsb(cins
);
18958 UInt dstPos
= lsb
+ 32;
18959 UInt srcSz
= msb
- lsb
+ 1;
18960 IRTemp tmpT1
= newTemp(ty
);
18961 IRTemp tmpT2
= newTemp(ty
);
18962 IRTemp tmpT3
= newTemp(ty
);
18963 IRTemp tmpT4
= newTemp(ty
);
18964 IRTemp tmpT5
= newTemp(ty
);
18965 IRTemp tmpT6
= newTemp(ty
);
18966 IRTemp tmpT7
= newTemp(ty
);
18967 IRTemp tmpT8
= newTemp(ty
);
18968 IRTemp tmpT9
= newTemp(ty
);
18969 IRTemp tmpRs
= newTemp(ty
);
18970 IRTemp tmpRt
= newTemp(ty
);
18971 IRTemp tmpRd
= newTemp(ty
);
18973 assign(tmpRs
, getIReg(rs
));
18974 assign(tmpRt
, getIReg(rt
));
18975 DIP("dinsu r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
);
18977 UChar lsAmt
= 64 - srcSz
; /* left shift amount; */
18978 UChar rsAmt
= 64 - (dstPos
+ srcSz
); /* right shift amount; */
18979 assign(tmpT1
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
18980 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(rsAmt
)));
18982 lsAmt
= 64 - dstPos
; /* left shift amount; */
18983 rsAmt
= 64 - dstPos
; /* right shift amount; */
18984 assign(tmpT3
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(lsAmt
)));
18985 assign(tmpT4
, binop(Iop_Shr64
, mkexpr(tmpT3
), mkU8(rsAmt
)));
18987 lsAmt
= dstPos
; /* left shift amount; */
18988 rsAmt
= srcSz
; /* right shift amount; */
18989 assign(tmpT5
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(rsAmt
)));
18990 assign(tmpT6
, binop(Iop_Shr64
, mkexpr(tmpT5
), mkU8(lsAmt
)));
18992 assign(tmpT7
, binop(Iop_Shl64
, mkexpr(tmpT6
), mkU8(rsAmt
)));
18993 assign(tmpT8
, binop(Iop_Shl64
, mkexpr(tmpT7
), mkU8(lsAmt
)));
18995 assign(tmpT9
, binop(Iop_Or64
, mkexpr(tmpT8
), mkexpr(tmpT4
)));
18996 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT2
), mkexpr(tmpT9
)));
18997 putIReg(rt
, mkexpr(tmpRd
));
19001 case 0x07: { /* Doubleword Insert Bit Field - DINS; MIPS64r2 */
19002 IRTemp tmp1
= newTemp(ty
);
19003 IRTemp tmpT1
= newTemp(ty
);
19004 IRTemp tmpT2
= newTemp(ty
);
19005 IRTemp tmpT3
= newTemp(ty
);
19006 IRTemp tmpT4
= newTemp(ty
);
19007 IRTemp tmpT5
= newTemp(ty
);
19008 IRTemp tmpT6
= newTemp(ty
);
19009 IRTemp tmpT7
= newTemp(ty
);
19010 IRTemp tmpT8
= newTemp(ty
);
19011 IRTemp tmpT9
= newTemp(ty
);
19012 IRTemp tmp
= newTemp(ty
);
19013 IRTemp tmpRs
= newTemp(ty
);
19014 IRTemp tmpRt
= newTemp(ty
);
19015 IRTemp tmpRd
= newTemp(ty
);
19017 assign(tmpRs
, getIReg(rs
));
19018 assign(tmpRt
, getIReg(rt
));
19020 msb
= get_msb(cins
);
19021 lsb
= get_lsb(cins
);
19023 DIP("dins r%u, r%u, %u, %u", rt
, rs
, lsb
,
19025 UChar lsAmt
= 63 - lsb
; /* left shift amount; */
19026 UChar rsAmt
= 63 - lsb
; /* right shift amount; */
19027 assign(tmp
, binop(Iop_Shl64
, mkexpr(tmpRt
), mkU8(lsAmt
)));
19028 assign(tmpT1
, binop(Iop_Shl64
, mkexpr(tmp
), mkU8(1)));
19029 assign(tmp1
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(rsAmt
)));
19030 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmp1
), mkU8(1)));
19032 lsAmt
= msb
; /* left shift amount; */
19033 rsAmt
= 1; /*right shift amount; */
19034 assign(tmpT3
, binop(Iop_Shr64
, mkexpr(tmpRt
), mkU8(rsAmt
)));
19035 assign(tmpT4
, binop(Iop_Shr64
, mkexpr(tmpT3
), mkU8(lsAmt
)));
19036 assign(tmpT5
, binop(Iop_Shl64
, mkexpr(tmpT4
), mkU8(rsAmt
)));
19037 assign(tmpT6
, binop(Iop_Shl64
, mkexpr(tmpT5
), mkU8(lsAmt
)));
19039 lsAmt
= 64 - (msb
- lsb
+ 1); /* left shift amount; */
19040 rsAmt
= 64 - (msb
+ 1); /* right shift amount; */
19041 assign(tmpT7
, binop(Iop_Shl64
, mkexpr(tmpRs
), mkU8(lsAmt
)));
19042 assign(tmpT8
, binop(Iop_Shr64
, mkexpr(tmpT7
), mkU8(rsAmt
)));
19044 assign(tmpT9
, binop(Iop_Or64
, mkexpr(tmpT2
), mkexpr(tmpT8
)));
19045 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT6
), mkexpr(tmpT9
)));
19046 putIReg(rt
, mkexpr(tmpRd
));
19050 case 0x24: /* DBSHFL */
19051 lsb
= get_lsb(cins
);
19052 IRTemp tmpRs
= newTemp(ty
);
19053 IRTemp tmpRt
= newTemp(ty
);
19054 IRTemp tmpRd
= newTemp(ty
);
19055 assign(tmpRs
, getIReg(rs
));
19056 assign(tmpRt
, getIReg(rt
));
19059 case 0x02: { /* DSBH */
19060 DIP("dsbh r%u, r%u", rd
, rt
);
19061 IRTemp tmpT1
= newTemp(ty
);
19062 IRTemp tmpT2
= newTemp(ty
);
19063 IRTemp tmpT3
= newTemp(ty
);
19064 IRTemp tmpT4
= newTemp(ty
);
19065 IRTemp tmpT5
= newTemp(Ity_I64
);
19066 IRTemp tmpT6
= newTemp(ty
);
19067 assign(tmpT5
, mkU64(0xFF00FF00FF00FF00ULL
));
19068 assign(tmpT6
, mkU64(0x00FF00FF00FF00FFULL
));
19069 assign(tmpT1
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT5
)));
19070 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(8)));
19071 assign(tmpT3
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT6
)));
19072 assign(tmpT4
, binop(Iop_Shl64
, mkexpr(tmpT3
), mkU8(8)));
19073 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT4
), mkexpr(tmpT2
)));
19074 putIReg(rd
, mkexpr(tmpRd
));
19078 case 0x05: { /* DSHD */
19079 DIP("dshd r%u, r%u\n", rd
, rt
);
19080 IRTemp tmpT1
= newTemp(ty
);
19081 IRTemp tmpT2
= newTemp(ty
);
19082 IRTemp tmpT3
= newTemp(ty
);
19083 IRTemp tmpT4
= newTemp(ty
);
19084 IRTemp tmpT5
= newTemp(Ity_I64
);
19085 IRTemp tmpT6
= newTemp(ty
);
19086 IRTemp tmpT7
= newTemp(ty
);
19087 IRTemp tmpT8
= newTemp(ty
);
19088 IRTemp tmpT9
= newTemp(ty
);
19089 assign(tmpT5
, mkU64(0xFFFF0000FFFF0000ULL
));
19090 assign(tmpT6
, mkU64(0x0000FFFF0000FFFFULL
));
19091 assign(tmpT1
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT5
)));
19092 assign(tmpT2
, binop(Iop_Shr64
, mkexpr(tmpT1
), mkU8(16)));
19093 assign(tmpT3
, binop(Iop_And64
, mkexpr(tmpRt
), mkexpr(tmpT6
)));
19094 assign(tmpT4
, binop(Iop_Shl64
, mkexpr(tmpT3
), mkU8(16)));
19095 assign(tmpT7
, binop(Iop_Or64
, mkexpr(tmpT4
), mkexpr(tmpT2
)));
19096 assign(tmpT8
, binop(Iop_Shl64
, mkexpr(tmpT7
), mkU8(32)));
19097 assign(tmpT9
, binop(Iop_Shr64
, mkexpr(tmpT7
), mkU8(32)));
19098 assign(tmpRd
, binop(Iop_Or64
, mkexpr(tmpT8
), mkexpr(tmpT9
)));
19099 putIReg(rd
, mkexpr(tmpRd
));
19103 case 0x08 ... 0x0f: { /* DALIGN */
19104 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19105 DIP("dalign r%u, r%u, r%u, %u", rd
, rs
, rt
, lsb
& 0x7);
19106 UInt bp
= (lsb
& 0x7) << 3;
19109 putIReg(rd
, binop(Iop_Or64
,
19110 binop(Iop_Shl64
, getIReg(rt
), mkU8(bp
)),
19112 getIReg(rs
), mkU8(64 - bp
))));
19114 putIReg(rd
, getIReg(rt
));
19122 case 0: /* DBITSWAP */
19123 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19124 DIP("dbitswap r%u, r%u", rd
, rt
);
19125 putIReg(rd
, qop(Iop_Rotx64
, getIReg(rt
), mkU8(7), mkU8(8), mkU8(1)));
19138 case 0x3B: /* RDHWR */
19139 DIP("rdhwr r%u, r%u", rt
, rd
);
19141 if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
) ||
19142 VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
) ||
19143 (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_BROADCOM
)) {
19145 putIReg(rt
, getULR());
19148 && VEX_MIPS_COMP_ID(archinfo
->hwcaps
)
19149 == VEX_PRID_COMP_CAVIUM
)) {
19150 IRExpr
** arg
= mkIRExprVec_1(mkU32(rd
));
19151 IRTemp val
= newTemp(ty
);
19152 IRDirty
*d
= unsafeIRDirty_1_N(val
,
19154 "mips_dirtyhelper_rdhwr",
19155 &mips_dirtyhelper_rdhwr
,
19157 stmt(IRStmt_Dirty(d
));
19158 putIReg(rt
, mkexpr(val
));
19167 case 0x04: /* INS */
19168 msb
= get_msb(cins
);
19169 lsb
= get_lsb(cins
);
19170 size
= msb
- lsb
+ 1;
19171 DIP("ins size:%u msb:%u lsb:%u", size
, msb
, lsb
);
19173 vassert(lsb
+ size
<= 32);
19174 vassert(lsb
+ size
> 0);
19176 /* put size bits from rs at the pos in temporary */
19177 t0
= newTemp(Ity_I32
);
19178 t3
= newTemp(Ity_I32
);
19179 /* shift left for 32 - size to clear leading bits and get zeros
19181 assign(t0
, binop(Iop_Shl32
, mkNarrowTo32(ty
, getIReg(rs
)),
19183 /* now set it at pos */
19184 t1
= newTemp(Ity_I32
);
19185 assign(t1
, binop(Iop_Shr32
, mkexpr(t0
), mkU8(32 - size
- lsb
)));
19188 t2
= newTemp(Ity_I32
);
19189 /* clear everything but lower pos bits from rt */
19190 assign(t2
, binop(Iop_Shl32
, mkNarrowTo32(ty
, getIReg(rt
)),
19192 assign(t3
, binop(Iop_Shr32
, mkexpr(t2
), mkU8(32 - lsb
)));
19194 assign(t3
, mkU32(0));
19197 t4
= newTemp(Ity_I32
);
19198 /* clear everything but upper msb + 1 bits from rt */
19199 assign(t4
, binop(Iop_Shr32
, mkNarrowTo32(ty
, getIReg(rt
)),
19201 t5
= newTemp(Ity_I32
);
19202 assign(t5
, binop(Iop_Shl32
, mkexpr(t4
), mkU8(msb
+ 1)));
19204 /* now combine these registers */
19206 t6
= newTemp(Ity_I32
);
19207 assign(t6
, binop(Iop_Or32
, mkexpr(t5
), mkexpr(t1
)));
19208 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t6
),
19209 mkexpr(t3
)), True
));
19211 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t1
),
19212 mkexpr(t5
)), True
));
19215 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t1
),
19216 mkexpr(t3
)), True
));
19221 case 0x00: /* EXT */
19222 msb
= get_msb(cins
);
19223 lsb
= get_lsb(cins
);
19225 DIP("ext size:%u msb:%u lsb:%u", size
, msb
, lsb
);
19226 vassert(lsb
+ size
<= 32);
19227 vassert(lsb
+ size
> 0);
19229 /* put size bits from rs at the top of in temporary */
19230 if (lsb
+ size
< 32) {
19231 t0
= newTemp(Ity_I32
);
19232 assign(t0
, binop(Iop_Shl32
, mkNarrowTo32(ty
, getIReg(rs
)),
19233 mkU8(32 - lsb
- size
)));
19235 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Shr32
, mkexpr(t0
),
19236 mkU8(32 - size
)), True
));
19238 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Shr32
,
19239 mkNarrowTo32(ty
, getIReg(rs
)),
19240 mkU8(32 - size
)), True
));
19245 case 0x03: /* Doubleword Extract Bit Field - DEXT; MIPS64r2 */
19246 msb
= get_msb(cins
);
19247 lsb
= get_lsb(cins
);
19249 DIP("dext r%u, r%u, %u, %u", rt
, rs
, lsb
, msb
+ 1);
19250 t1
= newTemp(Ity_I64
);
19252 vassert(size
> 0 && size
<= 32);
19253 vassert((lsb
+ size
) > 0 && (lsb
+ size
) <= 63);
19255 UChar lsAmt
= 63 - (lsb
+ msb
); /* left shift amount; */
19256 UChar rsAmt
= 63 - msb
; /* right shift amount; */
19258 assign(t1
, binop(Iop_Shl64
, getIReg(rs
), mkU8(lsAmt
)));
19259 putIReg(rt
, binop(Iop_Shr64
, mkexpr(t1
), mkU8(rsAmt
)));
19263 case 0x20: /* BSHFL */
19265 case 0x0: /* BITSWAP */
19266 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19267 DIP("bitswap r%u, r%u", rd
, rt
);
19270 putIReg(rd
, unop(Iop_32Uto64
, qop(Iop_Rotx32
, unop(Iop_64to32
, getIReg(rt
)),
19271 mkU8(7), mkU8(8), mkU8(1))));
19273 putIReg(rd
, qop(Iop_Rotx32
, getIReg(rt
), mkU8(7),
19274 mkU8(8), mkU8(1)));
19277 ILLEGAL_INSTRUCTON
;
19282 case 0x02: /* WSBH */
19283 DIP("wsbh r%u, r%u", rd
, rt
);
19284 t0
= newTemp(Ity_I32
);
19285 t1
= newTemp(Ity_I32
);
19286 t2
= newTemp(Ity_I32
);
19287 t3
= newTemp(Ity_I32
);
19288 assign(t0
, binop(Iop_Shl32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19289 getIReg(rt
)), mkU32(0x00FF0000)),
19291 assign(t1
, binop(Iop_Shr32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19292 getIReg(rt
)), mkU32(0xFF000000)), mkU8(0x8)));
19293 assign(t2
, binop(Iop_Shl32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19294 getIReg(rt
)), mkU32(0x000000FF)), mkU8(0x8)));
19295 assign(t3
, binop(Iop_Shr32
, binop(Iop_And32
, mkNarrowTo32(ty
,
19296 getIReg(rt
)), mkU32(0x0000FF00)), mkU8(0x8)));
19297 putIReg(rd
, mkWidenFrom32(ty
, binop(Iop_Or32
, binop(Iop_Or32
,
19298 mkexpr(t0
), mkexpr(t1
)),
19299 binop(Iop_Or32
, mkexpr(t2
),
19300 mkexpr(t3
))), True
));
19303 case 0x10: /* SEB */
19304 DIP("seb r%u, r%u", rd
, rt
);
19307 putIReg(rd
, unop(Iop_8Sto64
, unop(Iop_64to8
, getIReg(rt
))));
19309 putIReg(rd
, unop(Iop_8Sto32
, unop(Iop_32to8
, getIReg(rt
))));
19313 case 0x18: /* SEH */
19314 DIP("seh r%u, r%u", rd
, rt
);
19317 putIReg(rd
, unop(Iop_16Sto64
, unop(Iop_64to16
, getIReg(rt
))));
19319 putIReg(rd
, unop(Iop_16Sto32
, unop(Iop_32to16
, getIReg(rt
))));
19323 case 0x08 ... 0x0b: /* ALIGN */
19324 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19326 UInt bp
= (sa
& 0x3) << 3;
19329 putIReg(rd
, unop(Iop_32Sto64
,
19340 putIReg(rd
, getIReg(rt
));
19342 UInt bp
= (sa
& 0x3) << 3;
19345 putIReg(rd
, binop(Iop_Or32
,
19347 getIReg(rt
), mkU8(bp
)),
19349 getIReg(rs
), mkU8(32 - bp
))));
19351 putIReg(rd
, getIReg(rt
));
19354 ILLEGAL_INSTRUCTON
;
19366 /* --- MIPS32(r2) DSP ASE(r2) / Cavium Specfic (LX) instructions --- */
19368 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
19369 if (dis_instr_CVM(cins
))
19375 case 0xC: /* INSV */
19376 case 0x38: { /* EXTR.W */
19377 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19378 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19380 if (0 != retVal
) {
19392 case 0x10: { /* ADDU.QB */
19394 case 0xC: /* SUBU_S.PH */
19395 case 0xD: /* ADDU_S.PH */
19396 case 0x1E: { /* MULQ_S.PH */
19397 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19398 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19400 if (0 != retVal
) {
19413 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19414 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19416 if (0 != retVal
) {
19432 case 0x11: { /* CMPU.EQ.QB */
19434 case 0x18: /* CMPGDU.EQ.QB */
19435 case 0x19: /* CMPGDU.LT.QB */
19436 case 0x1A: /* CMPGDU.LE.QB */
19437 case 0x0D: /* PRECR.QB.PH */
19438 case 0x1E: /* PRECR_SRA.PH.W */
19439 case 0x1F: { /* PRECR_SRA_R.PH.W */
19440 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19441 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19443 if (0 != retVal
) {
19456 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19457 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19459 if (0 != retVal
) {
19475 case 0x12: { /* ABSQ_S.PH */
19477 case 0x1: { /* ABSQ_S.QB */
19478 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19479 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19481 if (0 != retVal
) {
19494 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19495 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19497 if (0 != retVal
) {
19513 case 0x13: { /* SHLL.QB */
19515 case 0x04: /* SHRA.QB */
19516 case 0x05: /* SHRA_R.QB */
19517 case 0x06: /* SHRAV.QB */
19518 case 0x07: /* SHRAV_R.QB */
19519 case 0x19: /* SHLR.PH */
19520 case 0x1B: { /* SHLRV.PH */
19521 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19522 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19524 if (0 != retVal
) {
19537 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19538 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19540 if (0 != retVal
) {
19556 case 0x30: { /* DPAQ.W.PH */
19558 case 0x0: /* DPA.W.PH */
19559 case 0x18: /* DPAQX_S.W.PH */
19560 case 0x1A: /* DPAQX_SA.W.PH */
19561 case 0x8: /* DPAX.W.PH */
19562 case 0x1: /* DPS.W.PH */
19563 case 0x19: /* DPSQX_S.W.PH */
19564 case 0x1B: /* DPSQX_SA.W.PH */
19565 case 0x9: /* DPSX.W.PH */
19566 case 0x2: { /* MULSA.W.PH */
19567 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19568 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19570 if (0 != retVal
) {
19583 if (VEX_MIPS_PROC_DSP(archinfo
->hwcaps
)) {
19584 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19586 if (0 != retVal
) {
19602 case 0x18: /* ADDUH.QB/MUL.PH */
19603 case 0x31: { /* APPEND */
19604 if (VEX_MIPS_PROC_DSP2(archinfo
->hwcaps
)) {
19605 UInt retVal
= disDSPInstr_MIPS_WRK ( cins
);
19607 if (0 != retVal
) {
19617 case 0x35: { /* PREF r6*/
19622 case 0x36: { /* LL */
19623 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19624 DIP("ll r%u, %u(r%u)", rt
, imm
, rs
);
19625 LOAD_STORE_PATTERN
;
19627 assign(t2
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), True
));
19628 putLLaddr(mkexpr(t1
));
19629 putLLdata(mkexpr(t2
));
19630 putIReg(rt
, mkexpr(t2
));
19634 case 0x26: { /* SC */
19635 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19636 DIP("sc r%u, %u(r%u)", rt
, imm
, rs
);
19637 LOAD_STORE_PATTERN
;
19639 t2
= newTemp(Ity_I1
);
19640 t3
= newTemp(Ity_I32
);
19641 assign(t2
, binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19642 mkexpr(t1
), getLLaddr()));
19643 assign(t3
, mkNarrowTo32(ty
, getIReg(rt
)));
19644 putLLaddr(LLADDR_INVALID
);
19645 putIReg(rt
, getIReg(0));
19647 mips_next_insn_if(mkexpr(t2
));
19649 t4
= newTemp(Ity_I32
);
19650 t5
= newTemp(Ity_I32
);
19652 assign(t5
, mkNarrowTo32(ty
, getLLdata()));
19654 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
19655 MIPS_IEND
, mkexpr(t1
), /* addr */
19656 NULL
, mkexpr(t5
), /* expected value */
19657 NULL
, mkexpr(t3
) /* new value */)));
19659 putIReg(rt
, unop(mode64
? Iop_1Uto64
: Iop_1Uto32
,
19660 binop(Iop_CmpEQ32
, mkexpr(t4
), mkexpr(t5
))));
19664 case 0x37: { /* LLD */
19665 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19666 DIP("lld r%u, %u(r%u)", rt
, imm
, rs
);
19667 LOAD_STORE_PATTERN
;
19669 t2
= newTemp(Ity_I64
);
19670 assign(t2
, load(Ity_I64
, mkexpr(t1
)));
19671 putLLaddr(mkexpr(t1
));
19672 putLLdata(mkexpr(t2
));
19673 putIReg(rt
, mkexpr(t2
));
19677 case 0x27: { /* SCD */
19678 imm
= extend_s_9to16((instr_index
>> 7) & 0x1ff);
19679 DIP("sdc r%u, %u(r%u)", rt
, imm
, rs
);
19680 LOAD_STORE_PATTERN
;
19682 t2
= newTemp(Ity_I1
);
19683 t3
= newTemp(Ity_I64
);
19684 assign(t2
, binop(Iop_CmpNE64
, mkexpr(t1
), getLLaddr()));
19685 assign(t3
, getIReg(rt
));
19686 putLLaddr(LLADDR_INVALID
);
19687 putIReg(rt
, getIReg(0));
19689 mips_next_insn_if(mkexpr(t2
));
19691 t4
= newTemp(Ity_I64
);
19692 t5
= newTemp(Ity_I64
);
19694 assign(t5
, getLLdata());
19696 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
19697 MIPS_IEND
, mkexpr(t1
), /* addr */
19698 NULL
, mkexpr(t5
), /* expected value */
19699 NULL
, mkexpr(t3
) /* new value */)));
19701 putIReg(rt
, unop(Iop_1Uto64
,
19702 binop(Iop_CmpEQ64
, mkexpr(t4
), mkexpr(t5
))));
19713 static UInt
disInstr_MIPS_WRK_00(UInt cins
, const VexArchInfo
* archinfo
,
19714 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
19715 IRStmt
** bstmt
, IRExpr
** lastn
)
19718 UInt opcode
, rs
, rt
, trap_code
, imm
, instr_index
, p
;
19719 /* Additional variables for instruction fields in DSP ASE insructions */
19721 opcode
= get_opcode(cins
);
19722 imm
= get_imm(cins
);
19725 instr_index
= get_instr_index(cins
);
19726 trap_code
= get_code(cins
);
19727 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
19729 switch (opcode
& 0x0F) {
19730 case 0x00: /* Special */
19731 return disInstr_MIPS_WRK_Special(cins
, archinfo
, abiinfo
,
19732 dres
, bstmt
, lastn
);
19734 case 0x01: /* Regimm */
19736 case 0x00: /* BLTZ */
19737 DIP("bltz r%u, %u", rs
, imm
);
19740 if (!dis_instr_branch(cins
, dres
, bstmt
))
19743 dis_branch(False
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19744 mkU32(0x80000000)), mkU32(0x80000000)), imm
, bstmt
);
19748 case 0x01: /* BGEZ */
19749 DIP("bgez r%u, %u", rs
, imm
);
19752 if (!dis_instr_branch(cins
, dres
, bstmt
))
19755 dis_branch(False
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19756 mkU32(0x80000000)), mkU32(0x0)), imm
, bstmt
);
19760 case 0x02: /* BLTZL */
19761 DIP("bltzl r%u, %u", rs
, imm
);
19762 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19763 binop(mode64
? Iop_And64
: Iop_And32
, getIReg(rs
),
19764 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19765 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19769 case 0x03: /* BGEZL */
19770 DIP("bgezl r%u, %u", rs
, imm
);
19771 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19772 binop(mode64
? Iop_And64
: Iop_And32
, getIReg(rs
),
19773 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19774 mode64
? mkU64(0x0) : mkU32(0x0)), imm
);
19777 case 0x06: { /* DAHI */
19778 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19779 DIP("dahi r%u, %x", rs
, imm
);
19780 putIReg(rs
, binop(Iop_Add64
,
19781 getIReg(rs
), mkU64(extend_s_16to64 (imm
) << 32)));
19789 case 0x08: /* TGEI */
19790 DIP("tgei r%u, %u %u", rs
, imm
, trap_code
);
19793 stmt (IRStmt_Exit (unop (Iop_Not1
,
19794 binop (Iop_CmpLT64S
,
19796 mkU64 (extend_s_16to64 (imm
)))),
19798 IRConst_U64(guest_PC_curr_instr
+ 4),
19801 stmt (IRStmt_Exit (unop (Iop_Not1
,
19802 binop (Iop_CmpLT32S
,
19804 mkU32 (extend_s_16to32 (imm
)))),
19806 IRConst_U32(guest_PC_curr_instr
+ 4),
19812 case 0x09: { /* TGEIU */
19813 DIP("tgeiu r%u, %u %u", rs
, imm
, trap_code
);
19816 stmt (IRStmt_Exit (unop (Iop_Not1
,
19817 binop (Iop_CmpLT64U
,
19819 mkU64 (extend_s_16to64 (imm
)))),
19821 IRConst_U64(guest_PC_curr_instr
+ 4),
19824 stmt (IRStmt_Exit (unop (Iop_Not1
,
19825 binop (Iop_CmpLT32U
,
19827 mkU32 (extend_s_16to32 (imm
)))),
19829 IRConst_U32(guest_PC_curr_instr
+ 4),
19836 case 0x0A: { /* TLTI */
19837 DIP("tlti r%u, %u %u", rs
, imm
, trap_code
);
19840 stmt (IRStmt_Exit (binop (Iop_CmpLT64S
, getIReg (rs
),
19841 mkU64 (extend_s_16to64 (imm
))),
19843 IRConst_U64(guest_PC_curr_instr
+ 4),
19846 stmt (IRStmt_Exit (binop (Iop_CmpLT32S
, getIReg (rs
),
19847 mkU32 (extend_s_16to32 (imm
))),
19849 IRConst_U32(guest_PC_curr_instr
+ 4),
19856 case 0x0B: { /* TLTIU */
19857 DIP("tltiu r%u, %u %u", rs
, imm
, trap_code
);
19860 stmt (IRStmt_Exit (binop (Iop_CmpLT64U
, getIReg (rs
),
19861 mkU64 (extend_s_16to64 (imm
))),
19863 IRConst_U64(guest_PC_curr_instr
+ 4),
19866 stmt (IRStmt_Exit (binop (Iop_CmpLT32U
, getIReg (rs
),
19867 mkU32 (extend_s_16to32 (imm
))),
19869 IRConst_U32(guest_PC_curr_instr
+ 4),
19876 case 0x0C: { /* TEQI */
19877 DIP("teqi r%u, %u %u", rs
, imm
, trap_code
);
19880 stmt (IRStmt_Exit (binop (Iop_CmpEQ64
, getIReg (rs
),
19881 mkU64 (extend_s_16to64 (imm
))),
19883 IRConst_U64(guest_PC_curr_instr
+ 4),
19886 stmt (IRStmt_Exit (binop (Iop_CmpEQ32
, getIReg (rs
),
19887 mkU32 (extend_s_16to32 (imm
))),
19889 IRConst_U32(guest_PC_curr_instr
+ 4),
19896 case 0x0E: { /* TNEI */
19897 DIP("tnei r%u, %u %u", rs
, imm
, trap_code
);
19900 stmt (IRStmt_Exit (binop (Iop_CmpNE64
, getIReg (rs
),
19901 mkU64 (extend_s_16to64 (imm
))),
19903 IRConst_U64(guest_PC_curr_instr
+ 4),
19906 stmt (IRStmt_Exit (binop (Iop_CmpNE32
, getIReg (rs
),
19907 mkU32 (extend_s_16to32 (imm
))),
19909 IRConst_U32(guest_PC_curr_instr
+ 4),
19916 case 0x10: /* BLTZAL */
19917 DIP("bltzal r%u, %u", rs
, imm
);
19920 if (!dis_instr_branch(cins
, dres
, bstmt
))
19923 dis_branch(True
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19924 mkU32(0x80000000)), mkU32(0x80000000)), imm
, bstmt
);
19928 case 0x11: /* BGEZAL */
19929 DIP("bgezal r%u, %u", rs
, imm
);
19932 if (!dis_instr_branch(cins
, dres
, bstmt
))
19935 dis_branch(True
, binop(Iop_CmpEQ32
, binop(Iop_And32
, getIReg(rs
),
19936 mkU32(0x80000000)), mkU32(0x0)), imm
, bstmt
);
19940 case 0x12: /* BLTZALL */
19941 DIP("bltzall r%u, %u", rs
, imm
);
19942 putIReg(31, mode64
? mkU64(guest_PC_curr_instr
+ 8) :
19943 mkU32(guest_PC_curr_instr
+ 8));
19944 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
19945 binop(mode64
? Iop_And64
: Iop_And32
, getIReg(rs
),
19946 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19947 mode64
? mkU64(0x8000000000000000ULL
) : mkU32(0x80000000)),
19951 case 0x13: /* BGEZALL */
19952 DIP("bgezall r%u, %u", rs
, imm
);
19955 putIReg(31, mkU64(guest_PC_curr_instr
+ 8));
19956 *lastn
= dis_branch_likely(binop(Iop_CmpNE64
,
19959 mkU64(0x8000000000000000ULL
)),
19963 putIReg(31, mkU32(guest_PC_curr_instr
+ 8));
19964 *lastn
= dis_branch_likely(binop(Iop_CmpNE32
, binop(Iop_And32
,
19965 getIReg(rs
), mkU32(0x80000000)),
19971 case 0x1C: { /* BPOSGE32 */
19972 DIP("bposge32 %u", imm
);
19974 t0
= newTemp(Ity_I32
);
19975 /* Get pos field from DSPControl register. */
19976 assign(t0
, binop(Iop_And32
, getDSPControl(), mkU32(0x3f)));
19977 dis_branch(False
, unop(Iop_Not1
, binop(Iop_CmpLT32U
, mkexpr(t0
),
19978 mkU32(32))), imm
, bstmt
);
19982 case 0x1E: { /* DATI */
19983 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
19984 DIP("dati r%u, %x", rs
, imm
);
19985 putIReg(rs
, binop(Iop_Add64
,
19986 getIReg(rs
), mkU64((long long)imm
<< 48)));
19994 case 0x1F: /* SYNCI */
19995 /* Just ignore it */
20005 DIP("j 0x%x", instr_index
);
20009 assign(t0
, mkU64((guest_PC_curr_instr
& 0xFFFFFFFFF0000000ULL
) |
20010 (instr_index
<< 2)));
20012 assign(t0
, mkU32((guest_PC_curr_instr
& 0xF0000000) |
20013 (instr_index
<< 2)));
20015 *lastn
= mkexpr(t0
);
20018 case 0x03: /* JAL */
20019 DIP("jal 0x%x", instr_index
);
20022 putIReg(31, mkU64(guest_PC_curr_instr
+ 8));
20024 assign(t0
, mkU64((guest_PC_curr_instr
& 0xFFFFFFFFF0000000ULL
) |
20025 (instr_index
<< 2)));
20027 putIReg(31, mkU32(guest_PC_curr_instr
+ 8));
20029 assign(t0
, mkU32((guest_PC_curr_instr
& 0xF0000000) |
20030 (instr_index
<< 2)));
20033 *lastn
= mkexpr(t0
);
20036 case 0x04: /* BEQ, B */
20037 if (rs
== 0 && rt
== 0) {
20038 ULong branch_offset
;
20043 branch_offset
= extend_s_18to64(imm
<< 2);
20044 assign(t0
, mkU64(guest_PC_curr_instr
+ 4 + branch_offset
));
20046 branch_offset
= extend_s_18to32(imm
<< 2);
20047 assign(t0
, mkU32(guest_PC_curr_instr
+ 4 + branch_offset
));
20050 *lastn
= mkexpr(t0
);
20052 DIP("beq r%u, r%u, %u", rs
, rt
, imm
);
20055 dis_branch(False
, binop(Iop_CmpEQ64
, getIReg(rs
), getIReg(rt
)),
20058 dis_branch(False
, binop(Iop_CmpEQ32
, getIReg(rs
), getIReg(rt
)),
20063 case 0x05: /* BNE */
20064 DIP("bne r%u, r%u, %u", rs
, rt
, imm
);
20067 dis_branch(False
, binop(Iop_CmpNE64
, getIReg(rs
), getIReg(rt
)),
20070 dis_branch(False
, binop(Iop_CmpNE32
, getIReg(rs
), getIReg(rt
)),
20075 case 0x06: /* BLEZ, BLEZALC, BGEZALC, BGEUC */
20076 if (rt
== 0) { /* BLEZ */
20077 DIP("blez r%u, %u", rs
, imm
);
20080 dis_branch(False
, binop(Iop_CmpLE64S
, getIReg(rs
), mkU64(0x0)),
20083 dis_branch(False
, binop(Iop_CmpLE32S
, getIReg(rs
), mkU32(0x0)), imm
,
20085 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20086 if (rs
== 0) { /* BLEZALC */
20087 DIP("blezalc r%u, %u", rt
, imm
);
20090 dis_branch_compact(True
,
20091 binop(Iop_CmpLE64S
, getIReg(rt
), mkU64(0x0)),
20094 dis_branch_compact(True
,
20095 binop(Iop_CmpLE32S
, getIReg(rt
), mkU32(0x0)),
20097 } else if (rt
== rs
) { /* BGEZALC */
20098 DIP("bgezalc r%u, %u", rt
, imm
);
20101 dis_branch_compact(True
,
20102 binop(Iop_CmpLE64S
, mkU64(0x0), getIReg(rt
)),
20105 dis_branch_compact(True
,
20106 binop(Iop_CmpLE32S
, mkU32(0x0), getIReg(rt
)),
20108 } else { /* BGEUC */
20109 DIP("bgeuc r%u, r%u, %u", rt
, rs
, imm
);
20112 dis_branch_compact(False
,
20114 binop(Iop_CmpLT64U
,
20115 getIReg(rs
), getIReg(rt
))),
20118 dis_branch_compact(False
,
20120 binop(Iop_CmpLT32U
,
20121 getIReg(rs
), getIReg(rt
))),
20130 case 0x07: /* BGTZ, BGTZALC, BLTZALC, BLTUC */
20131 if (rt
== 0) { /* BGTZ */
20132 DIP("bgtz r%u, %u", rs
, imm
);
20135 dis_branch(False
, unop(Iop_Not1
, binop(Iop_CmpLE64S
, getIReg(rs
),
20136 mkU64(0x00))), imm
, bstmt
);
20138 dis_branch(False
, unop(Iop_Not1
, binop(Iop_CmpLE32S
, getIReg(rs
),
20139 mkU32(0x00))), imm
, bstmt
);
20140 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20141 if (rs
== 0) { /* BGTZALC */
20142 DIP("bgtzalc r%u, %u", rt
, imm
);
20145 dis_branch_compact(True
,
20147 binop(Iop_CmpLE64S
,
20148 getIReg(rt
), mkU64(0x0))),
20151 dis_branch_compact(True
,
20153 binop(Iop_CmpLE32S
,
20154 getIReg(rt
), mkU32(0x0))),
20157 } else if (rs
== rt
) { /* BLTZALC */
20158 DIP("bltzalc r%u, %u", rt
, imm
);
20161 dis_branch_compact(True
,
20163 binop(Iop_CmpLE64S
,
20164 mkU64(0x0), getIReg(rt
))),
20167 dis_branch_compact(True
,
20169 binop(Iop_CmpLE32S
,
20170 mkU32(0x0), getIReg(rt
))),
20173 } else { /* BLTUC */
20174 DIP("bltuc r%u, r%u, %u", rt
, rs
, imm
);
20177 dis_branch_compact(False
,
20178 binop(Iop_CmpLT64U
, getIReg(rs
), getIReg(rt
)),
20181 dis_branch_compact(False
,
20182 binop(Iop_CmpLT32U
, getIReg(rs
), getIReg(rt
)),
20192 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev < 6))
20194 case 0x08: { /* ADDI */
20195 DIP("addi r%u, r%u, %u", rt
, rs
, imm
);
20196 IRTemp tmpRs32
, t1
, t2
, t3
, t4
;
20197 tmpRs32
= newTemp(Ity_I32
);
20198 assign(tmpRs32
, mkNarrowTo32(ty
, getIReg(rs
)));
20200 t0
= newTemp(Ity_I32
);
20201 t1
= newTemp(Ity_I32
);
20202 t2
= newTemp(Ity_I32
);
20203 t3
= newTemp(Ity_I32
);
20204 t4
= newTemp(Ity_I32
);
20205 /* dst = src0 + sign(imm)
20206 if(sign(src0 ) != sign(imm ))
20208 if(sign(dst) == sign(src0 ))
20210 we have overflow! */
20212 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
),
20213 mkU32(extend_s_16to32(imm
))));
20214 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
),
20215 mkU32(extend_s_16to32(imm
))));
20216 assign(t2
, unop(Iop_1Sto32
, binop(Iop_CmpEQ32
, binop(Iop_And32
,
20217 mkexpr(t1
), mkU32(0x80000000)), mkU32(0x80000000))));
20219 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
20220 assign(t4
, unop(Iop_1Sto32
, binop(Iop_CmpNE32
, binop(Iop_And32
,
20221 mkexpr(t3
), mkU32(0x80000000)), mkU32(0x80000000))));
20223 stmt(IRStmt_Exit(binop(Iop_CmpEQ32
, binop(Iop_Or32
, mkexpr(t2
),
20224 mkexpr(t4
)), mkU32(0)), Ijk_SigFPE_IntOvf
,
20225 mode64
? IRConst_U64(guest_PC_curr_instr
+ 4) :
20226 IRConst_U32(guest_PC_curr_instr
+ 4),
20229 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t0
), True
));
20233 #elif defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 6))
20235 case 0x08: { /* BEQZALC, BEQC, BOVC */
20236 IRTemp t1
, t2
, t3
, t4
;
20237 if (rs
== 0) { /* BEQZALC */
20238 DIP("beqzalc r%u, %u", rt
, imm
);
20241 dis_branch_compact(True
,
20242 binop(Iop_CmpEQ64
, getIReg(rt
), mkU64(0x0)),
20245 dis_branch_compact(True
,
20246 binop(Iop_CmpEQ32
, getIReg(rt
), mkU32(0x0)),
20249 } else if (rs
< rt
) { /* BEQC */
20250 DIP("beqc r%u, r%u, %u", rs
, rt
, imm
);
20253 dis_branch_compact(False
,
20254 binop(Iop_CmpEQ64
, getIReg(rt
), getIReg(rs
)),
20257 dis_branch_compact(False
,
20258 binop(Iop_CmpEQ32
, getIReg(rt
), getIReg(rs
)),
20261 } else { /* BOVC */
20262 DIP("bovc r%u, r%u, %u", rs
, rt
, imm
);
20265 t0
= newTemp(Ity_I32
);
20266 t1
= newTemp(Ity_I32
);
20267 t2
= newTemp(Ity_I32
);
20268 t3
= newTemp(Ity_I32
);
20269 assign(t0
, IRExpr_ITE(binop(Iop_CmpLT64S
,
20271 mkU64(0xffffffff80000000ULL
)),
20273 IRExpr_ITE(binop(Iop_CmpLT64S
,
20275 mkU64(0x7FFFFFFFULL
)),
20276 mkU32(0), mkU32(1))));
20277 assign(t1
, IRExpr_ITE(binop(Iop_CmpLT64S
,
20279 mkU64(0xffffffff80000000ULL
)),
20281 IRExpr_ITE(binop(Iop_CmpLT64S
,
20283 mkU64(0x7FFFFFFFULL
)),
20284 mkU32(0), mkU32(1))));
20285 assign(t2
, IRExpr_ITE(binop(Iop_CmpLT64S
,
20287 getIReg(rt
), getIReg(rs
)),
20288 mkU64(0xffffffff80000000ULL
)),
20290 IRExpr_ITE(binop(Iop_CmpLT64S
,
20294 mkU64(0x7FFFFFFFULL
)),
20295 mkU32(0), mkU32(1))));
20296 assign(t3
, binop(Iop_Add32
,
20298 binop(Iop_Add32
, mkexpr(t1
), mkexpr(t2
))));
20299 dis_branch_compact(False
,
20300 binop(Iop_CmpNE32
, mkexpr(t3
), mkU32(0)),
20303 IRTemp tmpRs32
= newTemp(Ity_I32
);
20304 IRTemp tmpRt32
= newTemp(Ity_I32
);
20305 assign(tmpRs32
, getIReg(rs
));
20306 assign(tmpRt32
, getIReg(rt
));
20308 t0
= newTemp(Ity_I32
);
20309 t1
= newTemp(Ity_I32
);
20310 t2
= newTemp(Ity_I32
);
20311 t3
= newTemp(Ity_I32
);
20312 t4
= newTemp(Ity_I32
);
20313 /* dst = src0 + src1
20314 if (sign(src0 ) != sign(src1 ))
20316 if (sign(dst) == sign(src0 ))
20318 we have overflow! */
20320 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
20321 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
20322 assign(t2
, unop(Iop_1Uto32
,
20324 binop(Iop_And32
, mkexpr(t1
), mkU32(0x80000000)),
20325 mkU32(0x80000000))));
20327 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
20328 assign(t4
, unop(Iop_1Uto32
,
20330 binop(Iop_And32
, mkexpr(t3
), mkU32(0x80000000)),
20331 mkU32(0x80000000))));
20333 dis_branch_compact(False
, binop(Iop_CmpEQ32
,
20334 binop(Iop_Or32
, mkexpr(t2
), mkexpr(t4
)),
20335 mkU32(0)), imm
, dres
);
20340 /* In documentation for BEQC stands rs > rt and for BOVC stands rs >= rt! */
20345 case 0x09: /* ADDIU */
20346 DIP("addiu r%u, r%u, %u", rt
, rs
, imm
);
20349 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Add32
,
20350 mkNarrowTo32(ty
, getIReg(rs
)), mkU32(extend_s_16to32(imm
))),
20353 putIReg(rt
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
20357 case 0x0A: /* SLTI */
20358 DIP("slti r%u, r%u, %u", rt
, rs
, imm
);
20361 putIReg(rt
, unop(Iop_1Uto64
, binop(Iop_CmpLT64S
, getIReg(rs
),
20362 mkU64(extend_s_16to64(imm
)))));
20364 putIReg(rt
, unop(Iop_1Uto32
, binop(Iop_CmpLT32S
, getIReg(rs
),
20365 mkU32(extend_s_16to32(imm
)))));
20369 case 0x0B: /* SLTIU */
20370 DIP("sltiu r%u, r%u, %u", rt
, rs
, imm
);
20373 putIReg(rt
, unop(Iop_1Uto64
, binop(Iop_CmpLT64U
, getIReg(rs
),
20374 mkU64(extend_s_16to64(imm
)))));
20376 putIReg(rt
, unop(Iop_1Uto32
, binop(Iop_CmpLT32U
, getIReg(rs
),
20377 mkU32(extend_s_16to32(imm
)))));
20381 case 0x0C: /* ANDI */
20382 DIP("andi r%u, r%u, %u", rt
, rs
, imm
);
20385 ALUI_PATTERN64(Iop_And64
);
20387 ALUI_PATTERN(Iop_And32
);
20392 case 0x0D: /* ORI */
20393 DIP("ori r%u, r%u, %u", rt
, rs
, imm
);
20396 ALUI_PATTERN64(Iop_Or64
);
20398 ALUI_PATTERN(Iop_Or32
);
20403 case 0x0E: /* XORI */
20404 DIP("xori r%u, r%u, %u", rt
, rs
, imm
);
20407 ALUI_PATTERN64(Iop_Xor64
);
20409 ALUI_PATTERN(Iop_Xor32
);
20414 case 0x0F: /* LUI */
20417 DIP("lui r%u, imm: 0x%x", rt
, imm
);
20420 putIReg(rt
, mkU64(extend_s_32to64(p
)));
20422 putIReg(rt
, mkU32(p
));
20425 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) { /* AUI */
20426 DIP("aui r%u, imm: 0x%x", rt
, imm
);
20429 putIReg(rt
, unop(Iop_32Sto64
,
20433 mkU64(extend_s_32to64(imm
<< 16))))));
20435 putIReg(rt
, binop(Iop_Add32
, getIReg(rs
), mkU32(imm
<< 16)));
20451 static UInt
disInstr_MIPS_WRK_10(UInt cins
, const VexArchInfo
* archinfo
,
20452 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
20453 IRStmt
** bstmt
, IRExpr
** lastn
)
20455 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
, t6
, t7
;
20456 UInt opcode
, rs
, rt
, ft
, fs
, fd
, fmt
, tf
, nd
, function
, imm
;
20457 /* Additional variables for instruction fields in DSP ASE insructions */
20459 opcode
= get_opcode(cins
);
20460 imm
= get_imm(cins
);
20468 fmt
= get_fmt(cins
);
20469 function
= get_function(cins
);
20470 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
20471 IRType tyF
= fp_mode64
? Ity_F64
: Ity_F32
;
20473 switch (opcode
& 0x0F) {
20474 case 0x01: { /* COP1 */
20475 if (fmt
== 0x3 && fd
== 0 && function
== 0) { /* MFHC1 */
20476 DIP("mfhc1 r%u, f%u", rt
, fs
);
20478 if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
) ||
20479 VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20481 t0
= newTemp(Ity_I64
);
20482 t1
= newTemp(Ity_I32
);
20483 assign(t0
, unop(Iop_ReinterpF64asI64
, getDReg(fs
)));
20484 assign(t1
, unop(Iop_64HIto32
, mkexpr(t0
)));
20485 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
20487 putIReg(rt
, mkWidenFrom32(ty
, unop(Iop_ReinterpF32asI32
,
20488 getFReg(fs
| 1)), True
));
20491 ILLEGAL_INSTRUCTON
;
20495 } else if (fmt
== 0x7 && fd
== 0 && function
== 0) { /* MTHC1 */
20496 DIP("mthc1 r%u, f%u", rt
, fs
);
20498 if (VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
) ||
20499 VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20501 t0
= newTemp(Ity_I64
);
20502 assign(t0
, binop(Iop_32HLto64
, mkNarrowTo32(ty
, getIReg(rt
)),
20503 unop(Iop_ReinterpF32asI32
,
20504 getLoFromF64(Ity_F64
, getDReg(fs
)))));
20505 putDReg(fs
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
20507 putFReg(fs
| 1, unop(Iop_ReinterpI32asF32
,
20508 mkNarrowTo32(ty
, getIReg(rt
))));
20511 ILLEGAL_INSTRUCTON
;
20515 } else if (fmt
== 0x8) { /* BC */
20516 /* FcConditionalCode(bc1_cc) */
20517 UInt bc1_cc
= get_bc1_cc(cins
);
20518 t1
= newTemp(Ity_I1
);
20519 t2
= newTemp(Ity_I32
);
20520 t3
= newTemp(Ity_I1
);
20522 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(bc1_cc
)));
20523 assign(t2
, IRExpr_ITE(mkexpr(t1
),
20525 binop(Iop_Shr32
, getFCSR(), mkU8(23)),
20528 binop(Iop_Shr32
, getFCSR(),
20529 mkU8(24 + bc1_cc
)),
20532 if (tf
== 1 && nd
== 0) {
20533 /* branch on true */
20534 DIP("bc1t %u, %u", bc1_cc
, imm
);
20535 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
20536 dis_branch(False
, mkexpr(t3
), imm
, bstmt
);
20538 } else if (tf
== 0 && nd
== 0) {
20539 /* branch on false */
20540 DIP("bc1f %u, %u", bc1_cc
, imm
);
20541 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
20542 dis_branch(False
, mkexpr(t3
), imm
, bstmt
);
20544 } else if (nd
== 1 && tf
== 0) {
20545 DIP("bc1fl %u, %u", bc1_cc
, imm
);
20546 *lastn
= dis_branch_likely(binop(Iop_CmpNE32
, mkexpr(t2
),
20549 } else if (nd
== 1 && tf
== 1) {
20550 DIP("bc1tl %u, %u", bc1_cc
, imm
);
20551 *lastn
= dis_branch_likely(binop(Iop_CmpEQ32
, mkexpr(t2
),
20556 } else if (fmt
>= 0x1c && has_msa
) { /* BNZ.df */
20558 t0
= newTemp(Ity_I32
);
20559 t1
= newTemp(Ity_V128
);
20560 t2
= newTemp(Ity_V128
);
20561 t3
= newTemp(Ity_V128
);
20562 assign(t1
, getWReg(ft
));
20563 assign(t2
, binop(Iop_64HLtoV128
, mkU64(0), mkU64(0)));
20566 case 0x00: { /* BNZ.B */
20567 DIP("BNZ.B w%u, %u", ft
, imm
);
20568 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
20572 case 0x01: { /* BNZ.H */
20573 DIP("BNZ.H w%u, %u", ft
, imm
);
20574 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
20578 case 0x02: { /* BNZ.W */
20579 DIP("BNZ.W w%u, %u", ft
, imm
);
20580 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
20584 case 0x03: { /* BNZ.D */
20585 DIP("BNZ.D w%u, %u", ft
, imm
);
20586 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
20594 unop(Iop_V128to32
, mkexpr(t3
)),
20595 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t3
)))),
20598 unop(Iop_V128HIto64
, mkexpr(t3
))),
20600 unop(Iop_V128HIto64
, mkexpr(t3
))))));
20602 binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20603 } else if (fmt
== 0x0F && has_msa
) { /* BNZ.V */
20604 t0
= newTemp(Ity_I32
);
20605 t1
= newTemp(Ity_V128
);
20606 assign(t1
, getWReg(ft
));
20610 unop(Iop_V128to32
, mkexpr(t1
)),
20611 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t1
)))),
20613 unop(Iop_64to32
, unop(Iop_V128HIto64
, mkexpr(t1
))),
20615 unop(Iop_V128HIto64
, mkexpr(t1
))))));
20617 binop(Iop_CmpNE32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20618 } else if (fmt
>= 0x18 && has_msa
) { /* BZ.df */
20620 t0
= newTemp(Ity_I32
);
20621 t1
= newTemp(Ity_V128
);
20622 t2
= newTemp(Ity_V128
);
20623 t3
= newTemp(Ity_V128
);
20624 assign(t1
, getWReg(ft
));
20625 assign(t2
, binop(Iop_64HLtoV128
, mkU64(0), mkU64(0)));
20628 case 0x00: { /* BZ.B */
20629 DIP("BZ.B w%u, %u", ft
, imm
);
20630 assign(t3
, binop(Iop_CmpEQ8x16
, mkexpr(t1
), mkexpr(t2
)));
20634 case 0x01: { /* BZ.H */
20635 DIP("BZ.H w%u, %u", ft
, imm
);
20636 assign(t3
, binop(Iop_CmpEQ16x8
, mkexpr(t1
), mkexpr(t2
)));
20640 case 0x02: { /* BZ.W */
20641 DIP("BZ.W w%u, %u", ft
, imm
);
20642 assign(t3
, binop(Iop_CmpEQ32x4
, mkexpr(t1
), mkexpr(t2
)));
20646 case 0x03: { /* BZ.D */
20647 DIP("BZ.D w%u, %u", ft
, imm
);
20648 assign(t3
, binop(Iop_CmpEQ64x2
, mkexpr(t1
), mkexpr(t2
)));
20656 unop(Iop_V128to32
, mkexpr(t3
)),
20657 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t3
)))),
20659 unop(Iop_64to32
, unop(Iop_V128HIto64
, mkexpr(t3
))),
20661 unop(Iop_V128HIto64
, mkexpr(t3
))))));
20663 binop(Iop_CmpNE32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20664 } else if (fmt
== 0x0B && has_msa
) { /* BZ.V */
20665 t0
= newTemp(Ity_I32
);
20666 t1
= newTemp(Ity_V128
);
20667 assign(t1
, getWReg(ft
));
20671 unop(Iop_V128to32
, mkexpr(t1
)),
20672 unop(Iop_64HIto32
, unop(Iop_V128to64
, mkexpr(t1
)))),
20674 unop(Iop_64to32
, unop(Iop_V128HIto64
, mkexpr(t1
))),
20676 unop(Iop_V128HIto64
, mkexpr(t1
))))));
20678 binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0)), imm
, bstmt
);
20679 } else if (fmt
== 0x09) { /* BC1EQZ */
20680 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20681 DIP("bc1eqz f%u, %u", ft
, imm
);
20682 t1
= newTemp(Ity_I1
);
20685 assign(t1
, binop(Iop_CmpEQ64
,
20687 unop(Iop_ReinterpF64asI64
, getDReg(ft
)),
20691 assign(t1
, binop(Iop_CmpEQ32
,
20694 unop(Iop_ReinterpF64asI64
,
20700 dis_branch(False
, mkexpr(t1
), imm
, bstmt
);
20704 } else if (fmt
== 0x0D) { /* BC1NEZ */
20705 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20706 DIP("bc1nez f%u, %u", ft
, imm
);
20707 t1
= newTemp(Ity_I1
);
20710 assign(t1
, binop(Iop_CmpNE64
,
20712 unop(Iop_ReinterpF64asI64
, getDReg(ft
)),
20716 assign(t1
, binop(Iop_CmpNE32
,
20719 unop(Iop_ReinterpF64asI64
, getDReg(ft
))),
20724 dis_branch(False
, mkexpr(t1
), imm
, bstmt
);
20726 ILLEGAL_INSTRUCTON
;
20730 if (fmt
== 0x15) { /* CMP.cond.d */
20731 Bool comparison
= True
;
20732 UInt signaling
= CMPAFD
;
20733 DIP("cmp.cond.d f%u, f%u, f%u, cond %u", fd
, fs
, ft
, function
);
20734 t0
= newTemp(Ity_I32
);
20736 /* Conditions starting with S should signal exception on QNaN inputs. */
20737 switch (function
) {
20738 case 0x08: /* SAF */
20739 signaling
= CMPSAFD
; /* fallthrough */
20741 case 0x00: /* AF */
20742 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20743 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20745 binop(Iop_I64StoF64
,
20746 get_IR_roundingmode(), mkU64(0)));
20749 case 0x09: /* SUN */
20750 signaling
= CMPSAFD
; /* fallthrough */
20752 case 0x01: /* UN */
20753 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20754 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20756 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20757 unop(Iop_ReinterpI64asF64
,
20758 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20759 binop(Iop_I64StoF64
,
20760 get_IR_roundingmode(), mkU64(0))));
20763 case 0x19: /* SOR */
20764 signaling
= CMPSAFD
; /* fallthrough */
20766 case 0x11: /* OR */
20767 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20768 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20770 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20771 binop(Iop_I64StoF64
,
20772 get_IR_roundingmode(), mkU64(0)),
20773 unop(Iop_ReinterpI64asF64
,
20774 mkU64(0xFFFFFFFFFFFFFFFFULL
))));
20777 case 0x0A: /* SEQ */
20778 signaling
= CMPSAFD
; /* fallthrough */
20780 case 0x02: /* EQ */
20781 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20782 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20784 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20785 unop(Iop_ReinterpI64asF64
,
20786 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20787 binop(Iop_I64StoF64
,
20788 get_IR_roundingmode(), mkU64(0))));
20791 case 0x1A: /* SNEQ */
20792 signaling
= CMPSAFD
; /* fallthrough */
20794 case 0x12: /* NEQ */
20795 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20796 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20798 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20799 binop(Iop_I64StoF64
,
20800 get_IR_roundingmode(), mkU64(0)),
20801 unop(Iop_ReinterpI64asF64
,
20802 mkU64(0xFFFFFFFFFFFFFFFFULL
))));
20805 case 0x0B: /* SUEQ */
20806 signaling
= CMPSAFD
; /* fallthrough */
20808 case 0x03: /* UEQ */
20809 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20810 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20812 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20813 unop(Iop_ReinterpI64asF64
,
20814 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20815 IRExpr_ITE(binop(Iop_CmpEQ32
,
20816 mkexpr(t0
), mkU32(0x45)),
20817 unop(Iop_ReinterpI64asF64
,
20818 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20819 binop(Iop_I64StoF64
,
20820 get_IR_roundingmode(),
20824 case 0x1B: /* SNEQ */
20825 signaling
= CMPSAFD
; /* fallthrough */
20827 case 0x13: /* NEQ */
20828 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20829 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20831 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20832 unop(Iop_ReinterpI64asF64
,
20833 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20834 IRExpr_ITE(binop(Iop_CmpEQ32
,
20835 mkexpr(t0
), mkU32(0x00)),
20836 unop(Iop_ReinterpI64asF64
,
20837 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20838 binop(Iop_I64StoF64
,
20839 get_IR_roundingmode(),
20843 case 0x0C: /* SLT */
20844 signaling
= CMPSAFD
; /* fallthrough */
20846 case 0x04: /* LT */
20847 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20848 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20850 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20851 unop(Iop_ReinterpI64asF64
,
20852 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20853 binop(Iop_I64StoF64
,
20854 get_IR_roundingmode(), mkU64(0))));
20857 case 0x0D: /* SULT */
20858 signaling
= CMPSAFD
; /* fallthrough */
20860 case 0x05: /* ULT */
20861 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20862 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20864 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20865 unop(Iop_ReinterpI64asF64
,
20866 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20867 IRExpr_ITE(binop(Iop_CmpEQ32
,
20868 mkexpr(t0
), mkU32(0x45)),
20869 unop(Iop_ReinterpI64asF64
,
20870 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20871 binop(Iop_I64StoF64
,
20872 get_IR_roundingmode(),
20876 case 0x0E: /* SLE */
20877 signaling
= CMPSAFD
; /* fallthrough */
20879 case 0x06: /* LE */
20880 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20881 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20883 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
20884 unop(Iop_ReinterpI64asF64
,
20885 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20886 IRExpr_ITE(binop(Iop_CmpEQ32
,
20887 mkexpr(t0
), mkU32(0x40)),
20888 unop(Iop_ReinterpI64asF64
,
20889 mkU64(0xFFFFFFFFFFFFFFFFULL
)),
20890 binop(Iop_I64StoF64
,
20891 get_IR_roundingmode(),
20895 case 0x0F: /* SULE */
20896 signaling
= CMPSAFD
; /* fallthrough */
20898 case 0x07: /* ULE */
20899 assign(t0
, binop(Iop_CmpF64
, getDReg(fs
), getDReg(ft
)));
20900 calculateFCSR(fs
, ft
, signaling
, False
, 2);
20902 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x0)),
20903 binop(Iop_I64StoF64
,
20904 get_IR_roundingmode(), mkU64(0)),
20905 unop(Iop_ReinterpI64asF64
,
20906 mkU64(0xFFFFFFFFFFFFFFFFULL
))));
20910 comparison
= False
;
20914 if (!VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
20921 } else if (fmt
== 0x14) {
20922 Bool comparison
= True
;
20923 UInt signaling
= CMPAFS
;
20924 DIP("cmp.cond.s f%u, f%u, f%u, cond %u", fd
, fs
, ft
, function
);
20925 t0
= newTemp(Ity_I32
);
20927 /* Conditions starting with S should signal exception on QNaN inputs. */
20928 switch (function
) {
20929 case 0x08: /* SAF */
20930 signaling
= CMPSAFS
; /* fallthrough */
20932 case 0x00: /* AF */
20933 assign(t0
, binop(Iop_CmpF32
,
20934 getLoFromF64(Ity_F64
, getFReg(fs
)),
20935 getLoFromF64(Ity_F64
, getFReg(ft
))));
20936 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20938 mkWidenFromF32(tyF
,
20939 binop(Iop_I32StoF32
,
20940 get_IR_roundingmode(), mkU32(0))));
20943 case 0x09: /* SUN */
20944 signaling
= CMPSAFS
; /* fallthrough */
20946 case 0x01: /* UN */
20947 assign(t0
, binop(Iop_CmpF32
,
20948 getLoFromF64(Ity_F64
, getFReg(fs
)),
20949 getLoFromF64(Ity_F64
, getFReg(ft
))));
20950 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20952 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20953 mkWidenFromF32(tyF
,
20954 unop(Iop_ReinterpI32asF32
,
20955 mkU32(0xFFFFFFFFU
))),
20956 mkWidenFromF32(tyF
,
20957 binop(Iop_I32StoF32
,
20958 get_IR_roundingmode(),
20962 case 0x19: /* SOR */
20963 signaling
= CMPSAFS
; /* fallthrough */
20965 case 0x11: /* OR */
20966 assign(t0
, binop(Iop_CmpF32
,
20967 getLoFromF64(Ity_F64
, getFReg(fs
)),
20968 getLoFromF64(Ity_F64
, getFReg(ft
))));
20969 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20971 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x45)),
20972 mkWidenFromF32(tyF
,
20973 binop(Iop_I32StoF32
,
20974 get_IR_roundingmode(),
20976 mkWidenFromF32(tyF
,
20977 unop(Iop_ReinterpI32asF32
,
20978 mkU32(0xFFFFFFFFU
)))));
20981 case 0x0A: /* SEQ */
20982 signaling
= CMPSAFS
; /* fallthrough */
20984 case 0x02: /* EQ */
20985 assign(t0
, binop(Iop_CmpF32
,
20986 getLoFromF64(Ity_F64
, getFReg(fs
)),
20987 getLoFromF64(Ity_F64
, getFReg(ft
))));
20988 calculateFCSR(fs
, ft
, signaling
, True
, 2);
20990 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
20991 mkWidenFromF32(tyF
,
20992 unop(Iop_ReinterpI32asF32
,
20993 mkU32(0xFFFFFFFFU
))),
20994 mkWidenFromF32(tyF
,
20995 binop(Iop_I32StoF32
,
20996 get_IR_roundingmode(),
21000 case 0x1A: /* SNEQ */
21001 signaling
= CMPSAFS
; /* fallthrough */
21003 case 0x12: /* NEQ */
21004 assign(t0
, binop(Iop_CmpF32
,
21005 getLoFromF64(Ity_F64
, getFReg(fs
)),
21006 getLoFromF64(Ity_F64
, getFReg(ft
))));
21007 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21009 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
21010 mkWidenFromF32(tyF
,
21011 binop(Iop_I32StoF32
,
21012 get_IR_roundingmode(),
21014 mkWidenFromF32(tyF
,
21015 unop(Iop_ReinterpI32asF32
,
21016 mkU32(0xFFFFFFFFU
)))));
21019 case 0x0B: /* SUEQ */
21020 signaling
= CMPSAFS
; /* fallthrough */
21022 case 0x03: /* UEQ */
21023 assign(t0
, binop(Iop_CmpF32
,
21024 getLoFromF64(Ity_F64
, getFReg(fs
)),
21025 getLoFromF64(Ity_F64
, getFReg(ft
))));
21026 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21028 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x40)),
21029 mkWidenFromF32(tyF
,
21030 unop(Iop_ReinterpI32asF32
,
21031 mkU32(0xFFFFFFFFU
))),
21032 IRExpr_ITE(binop(Iop_CmpEQ32
,
21033 mkexpr(t0
), mkU32(0x45)),
21034 mkWidenFromF32(tyF
,
21035 unop(Iop_ReinterpI32asF32
,
21036 mkU32(0xFFFFFFFFU
))),
21037 mkWidenFromF32(tyF
,
21038 binop(Iop_I32StoF32
,
21039 get_IR_roundingmode(),
21043 case 0x1B: /* SNEQ */
21044 signaling
= CMPSAFS
; /* fallthrough */
21046 case 0x13: /* NEQ */
21047 assign(t0
, binop(Iop_CmpF32
,
21048 getLoFromF64(Ity_F64
, getFReg(fs
)),
21049 getLoFromF64(Ity_F64
, getFReg(ft
))));
21050 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21052 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21053 mkWidenFromF32(tyF
,
21054 unop(Iop_ReinterpI32asF32
,
21055 mkU32(0xFFFFFFFFU
))),
21056 IRExpr_ITE(binop(Iop_CmpEQ32
,
21057 mkexpr(t0
), mkU32(0x00)),
21058 mkWidenFromF32(tyF
,
21059 unop(Iop_ReinterpI32asF32
,
21060 mkU32(0xFFFFFFFFU
))),
21061 mkWidenFromF32(tyF
,
21062 binop(Iop_I32StoF32
,
21063 get_IR_roundingmode(),
21067 case 0x0C: /* SLT */
21068 signaling
= CMPSAFS
; /* fallthrough */
21070 case 0x04: /* LT */
21071 assign(t0
, binop(Iop_CmpF32
,
21072 getLoFromF64(Ity_F64
, getFReg(fs
)),
21073 getLoFromF64(Ity_F64
, getFReg(ft
))));
21074 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21076 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21077 mkWidenFromF32(tyF
,
21078 unop(Iop_ReinterpI32asF32
,
21079 mkU32(0xFFFFFFFFU
))),
21080 mkWidenFromF32(tyF
,
21081 binop(Iop_I32StoF32
,
21082 get_IR_roundingmode(),
21086 case 0x0D: /* SULT */
21087 signaling
= CMPSAFS
; /* fallthrough */
21089 case 0x05: /* ULT */
21090 assign(t0
, binop(Iop_CmpF32
,
21091 getLoFromF64(Ity_F64
, getFReg(fs
)),
21092 getLoFromF64(Ity_F64
, getFReg(ft
))));
21093 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21095 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21096 mkWidenFromF32(tyF
,
21097 unop(Iop_ReinterpI32asF32
,
21098 mkU32(0xFFFFFFFFU
))),
21099 IRExpr_ITE(binop(Iop_CmpEQ32
,
21100 mkexpr(t0
), mkU32(0x45)),
21101 mkWidenFromF32(tyF
,
21102 unop(Iop_ReinterpI32asF32
,
21103 mkU32(0xFFFFFFFFU
))),
21104 mkWidenFromF32(tyF
,
21105 binop(Iop_I32StoF32
,
21106 get_IR_roundingmode(),
21110 case 0x0E: /* SLE */
21111 signaling
= CMPSAFS
; /* fallthrough */
21113 case 0x06: /* LE */
21114 assign(t0
, binop(Iop_CmpF32
,
21115 getLoFromF64(Ity_F64
, getFReg(fs
)),
21116 getLoFromF64(Ity_F64
, getFReg(ft
))));
21117 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21119 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x01)),
21120 mkWidenFromF32(tyF
,
21121 unop(Iop_ReinterpI32asF32
,
21122 mkU32(0xFFFFFFFFU
))),
21123 IRExpr_ITE(binop(Iop_CmpEQ32
,
21124 mkexpr(t0
), mkU32(0x40)),
21125 mkWidenFromF32(tyF
,
21126 unop(Iop_ReinterpI32asF32
,
21127 mkU32(0xFFFFFFFFU
))),
21128 mkWidenFromF32(tyF
,
21129 binop(Iop_I32StoF32
,
21130 get_IR_roundingmode(),
21134 case 0x0F: /* SULE */
21135 signaling
= CMPSAFS
; /* fallthrough */
21137 case 0x07: /* ULE */
21138 assign(t0
, binop(Iop_CmpF32
,
21139 getLoFromF64(Ity_F64
, getFReg(fs
)),
21140 getLoFromF64(Ity_F64
, getFReg(ft
))));
21141 calculateFCSR(fs
, ft
, signaling
, True
, 2);
21143 IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t0
), mkU32(0x0)),
21144 mkWidenFromF32(tyF
,
21145 binop(Iop_I32StoF32
,
21146 get_IR_roundingmode(),
21148 mkWidenFromF32(tyF
,
21149 unop(Iop_ReinterpI32asF32
,
21150 mkU32(0xFFFFFFFFU
)))));
21154 comparison
= False
;
21158 if (!VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
21166 switch (function
) {
21167 case 0x04: { /* SQRT.fmt */
21169 case 0x10: { /* S */
21170 IRExpr
*rm
= get_IR_roundingmode();
21171 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_SqrtF32
, rm
,
21172 getLoFromF64(tyF
, getFReg(fs
)))));
21176 case 0x11: { /* D */
21177 IRExpr
*rm
= get_IR_roundingmode();
21178 putDReg(fd
, binop(Iop_SqrtF64
, rm
, getDReg(fs
)));
21186 break; /* SQRT.fmt */
21188 case 0x05: /* ABS.fmt */
21191 DIP("abs.s f%u, f%u", fd
, fs
);
21192 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_AbsF32
,
21193 getLoFromF64(tyF
, getFReg(fs
)))));
21197 DIP("abs.d f%u, f%u", fd
, fs
);
21198 putDReg(fd
, unop(Iop_AbsF64
, getDReg(fs
)));
21205 break; /* ABS.fmt */
21207 case 0x02: /* MUL.fmt */
21209 case 0x11: { /* D */
21210 DIP("mul.d f%u, f%u, f%u", fd
, fs
, ft
);
21211 IRExpr
*rm
= get_IR_roundingmode();
21212 putDReg(fd
, triop(Iop_MulF64
, rm
, getDReg(fs
),
21217 case 0x10: { /* S */
21218 DIP("mul.s f%u, f%u, f%u", fd
, fs
, ft
);
21219 IRExpr
*rm
= get_IR_roundingmode();
21220 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_MulF32
, rm
,
21221 getLoFromF64(tyF
, getFReg(fs
)),
21222 getLoFromF64(tyF
, getFReg(ft
)))));
21230 break; /* MUL.fmt */
21232 case 0x03: /* DIV.fmt */
21234 case 0x11: { /* D */
21235 DIP("div.d f%u, f%u, f%u", fd
, fs
, ft
);
21236 IRExpr
*rm
= get_IR_roundingmode();
21237 putDReg(fd
, triop(Iop_DivF64
, rm
, getDReg(fs
),
21242 case 0x10: { /* S */
21243 DIP("div.s f%u, f%u, f%u", fd
, fs
, ft
);
21244 calculateFCSR(fs
, ft
, DIVS
, False
, 2);
21245 IRExpr
*rm
= get_IR_roundingmode();
21246 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_DivF32
, rm
,
21247 getLoFromF64(tyF
, getFReg(fs
)),
21248 getLoFromF64(tyF
, getFReg(ft
)))));
21256 break; /* DIV.fmt */
21258 case 0x01: /* SUB.fmt */
21260 case 0x11: { /* D */
21261 DIP("sub.d f%u, f%u, f%u", fd
, fs
, ft
);
21262 calculateFCSR(fs
, ft
, SUBD
, False
, 2);
21263 IRExpr
*rm
= get_IR_roundingmode();
21264 putDReg(fd
, triop(Iop_SubF64
, rm
, getDReg(fs
),
21269 case 0x10: { /* S */
21270 DIP("sub.s f%u, f%u, f%u", fd
, fs
, ft
);
21271 calculateFCSR(fs
, ft
, SUBS
, True
, 2);
21272 IRExpr
*rm
= get_IR_roundingmode();
21273 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_SubF32
, rm
,
21274 getLoFromF64(tyF
, getFReg(fs
)),
21275 getLoFromF64(tyF
, getFReg(ft
)))));
21283 break; /* SUB.fmt */
21285 case 0x06: /* MOV.fmt */
21288 DIP("mov.d f%u, f%u", fd
, fs
);
21291 putDReg(fd
, getDReg(fs
));
21293 putFReg(fd
, getFReg(fs
));
21294 putFReg(fd
+ 1, getFReg(fs
+ 1));
21300 DIP("mov.s f%u, f%u", fd
, fs
);
21301 putFReg(fd
, getFReg(fs
));
21308 break; /* MOV.fmt */
21310 case 0x07: /* NEG.fmt */
21313 DIP("neg.s f%u, f%u", fd
, fs
);
21314 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_NegF32
,
21315 getLoFromF64(tyF
, getFReg(fs
)))));
21319 DIP("neg.d f%u, f%u", fd
, fs
);
21320 putDReg(fd
, unop(Iop_NegF64
, getDReg(fs
)));
21327 break; /* NEG.fmt */
21329 case 0x08: /* ROUND.L.fmt */
21332 DIP("round.l.s f%u, f%u", fd
, fs
);
21335 calculateFCSR(fs
, 0, ROUNDLS
, True
, 1);
21336 t0
= newTemp(Ity_I64
);
21338 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x0),
21339 getLoFromF64(Ity_F64
, getFReg(fs
))));
21341 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
21349 DIP("round.l.d f%u, f%u", fd
, fs
);
21352 calculateFCSR(fs
, 0, ROUNDLD
, False
, 1);
21353 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
21354 binop(Iop_F64toI64S
,
21368 break; /* ROUND.L.fmt */
21370 case 0x09: /* TRUNC.L.fmt */
21373 DIP("trunc.l.s f%u, f%u", fd
, fs
);
21376 calculateFCSR(fs
, 0, TRUNCLS
, True
, 1);
21377 t0
= newTemp(Ity_I64
);
21378 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x3),
21379 getLoFromF64(Ity_F64
, getFReg(fs
))));
21381 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
21389 DIP("trunc.l.d f%u, f%u", fd
, fs
);
21392 calculateFCSR(fs
, 0, TRUNCLD
, False
, 1);
21393 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
21394 binop(Iop_F64toI64S
,
21407 break; /* TRUNC.L.fmt */
21409 case 0x15: /* RECIP.fmt */
21411 case 0x10: { /* S */
21412 DIP("recip.s f%u, f%u", fd
, fs
);
21413 IRExpr
*rm
= get_IR_roundingmode();
21414 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_DivF32
,
21415 rm
, unop(Iop_ReinterpI32asF32
,
21416 mkU32(ONE_SINGLE
)), getLoFromF64(tyF
,
21421 case 0x11: { /* D */
21422 DIP("recip.d f%u, f%u", fd
, fs
);
21423 IRExpr
*rm
= get_IR_roundingmode();
21424 /* putDReg(fd, 1.0/getDreg(fs)); */
21425 putDReg(fd
, triop(Iop_DivF64
, rm
,
21426 unop(Iop_ReinterpI64asF64
,
21427 mkU64(ONE_DOUBLE
)), getDReg(fs
)));
21436 break; /* RECIP.fmt */
21438 case 0x13: /* MOVN.fmt */
21441 DIP("movn.s f%u, f%u, r%u", fd
, fs
, rt
);
21442 t1
= newTemp(Ity_I1
);
21445 assign(t1
, binop(Iop_CmpNE64
, mkU64(0), getIReg(rt
)));
21447 assign(t1
, binop(Iop_CmpNE32
, mkU32(0), getIReg(rt
)));
21449 putFReg(fd
, IRExpr_ITE(mkexpr(t1
), getFReg(fs
), getFReg(fd
)));
21453 DIP("movn.d f%u, f%u, r%u", fd
, fs
, rt
);
21454 t1
= newTemp(Ity_I1
);
21457 assign(t1
, binop(Iop_CmpNE64
, mkU64(0), getIReg(rt
)));
21459 assign(t1
, binop(Iop_CmpNE32
, mkU32(0), getIReg(rt
)));
21461 putDReg(fd
, IRExpr_ITE(mkexpr(t1
), getDReg(fs
), getDReg(fd
)));
21468 break; /* MOVN.fmt */
21470 case 0x12: /* MOVZ.fmt */
21473 DIP("movz.s f%u, f%u, r%u", fd
, fs
, rt
);
21474 t1
= newTemp(Ity_I1
);
21477 assign(t1
, binop(Iop_CmpEQ64
, mkU64(0), getIReg(rt
)));
21479 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), getIReg(rt
)));
21481 putFReg(fd
, IRExpr_ITE(mkexpr(t1
), getFReg(fs
), getFReg(fd
)));
21485 DIP("movz.d f%u, f%u, r%u", fd
, fs
, rt
);
21486 t1
= newTemp(Ity_I1
);
21489 assign(t1
, binop(Iop_CmpEQ64
, mkU64(0), getIReg(rt
)));
21491 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), getIReg(rt
)));
21493 putDReg(fd
, IRExpr_ITE(mkexpr(t1
), getDReg(fs
), getDReg(fd
)));
21500 break; /* MOVZ.fmt */
21502 case 0x11: /* MOVT.fmt */
21504 UInt mov_cc
= get_mov_cc(cins
);
21506 switch (fmt
) { /* MOVCF = 010001 */
21508 DIP("movt.d f%u, f%u, %u", fd
, fs
, mov_cc
);
21509 t1
= newTemp(Ity_I1
);
21510 t2
= newTemp(Ity_I32
);
21511 t3
= newTemp(Ity_I1
);
21512 t4
= newTemp(Ity_F64
);
21514 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21515 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21517 binop(Iop_Shr32
, getFCSR(),
21521 binop(Iop_Shr32
, getFCSR(),
21522 mkU8(24 + mov_cc
)),
21526 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
21527 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21528 getDReg(fs
), getDReg(fd
)));
21529 putDReg(fd
, mkexpr(t4
));
21533 DIP("movt.s f%u, f%u, %u", fd
, fs
, mov_cc
);
21534 t1
= newTemp(Ity_I1
);
21535 t2
= newTemp(Ity_I32
);
21536 t3
= newTemp(Ity_I1
);
21537 t4
= newTemp(Ity_F64
);
21538 t5
= newTemp(Ity_F64
);
21539 t6
= newTemp(Ity_F64
);
21540 t7
= newTemp(Ity_I64
);
21543 assign(t5
, getFReg(fs
));
21544 assign(t6
, getFReg(fd
));
21546 assign(t5
, unop(Iop_F32toF64
, getFReg(fs
)));
21547 assign(t6
, unop(Iop_F32toF64
, getFReg(fd
)));
21550 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21551 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21553 binop(Iop_Shr32
, getFCSR(),
21557 binop(Iop_Shr32
, getFCSR(),
21558 mkU8(24 + mov_cc
)),
21562 assign(t3
, binop(Iop_CmpEQ32
, mkU32(1), mkexpr(t2
)));
21563 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21564 mkexpr(t5
), mkexpr(t6
)));
21567 IRTemp f
= newTemp(Ity_F64
);
21568 IRTemp fd_hi
= newTemp(Ity_I32
);
21569 assign(f
, getFReg(fd
));
21570 assign(fd_hi
, unop(Iop_64HIto32
,
21571 unop(Iop_ReinterpF64asI64
, mkexpr(f
))));
21572 assign(t7
, mkWidenFrom32(Ity_I64
, unop(Iop_64to32
,
21573 unop(Iop_ReinterpF64asI64
, mkexpr(t4
))),
21576 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t7
)));
21578 putFReg(fd
, binop(Iop_F64toF32
, get_IR_roundingmode(),
21586 } else if (tf
== 0) { /* MOVF.fmt */
21587 UInt mov_cc
= get_mov_cc(cins
);
21589 switch (fmt
) { /* MOVCF = 010001 */
21591 DIP("movf.d f%u, f%u, %u", fd
, fs
, mov_cc
);
21592 t1
= newTemp(Ity_I1
);
21593 t2
= newTemp(Ity_I32
);
21594 t3
= newTemp(Ity_I1
);
21595 t4
= newTemp(Ity_F64
);
21597 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21598 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21600 binop(Iop_Shr32
, getFCSR(),
21604 binop(Iop_Shr32
, getFCSR(),
21605 mkU8(24 + mov_cc
)),
21609 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
21610 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21611 getDReg(fs
), getDReg(fd
)));
21612 putDReg(fd
, mkexpr(t4
));
21616 DIP("movf.s f%u, f%u, %u", fd
, fs
, mov_cc
);
21617 t1
= newTemp(Ity_I1
);
21618 t2
= newTemp(Ity_I32
);
21619 t3
= newTemp(Ity_I1
);
21620 t4
= newTemp(Ity_F64
);
21621 t5
= newTemp(Ity_F64
);
21622 t6
= newTemp(Ity_F64
);
21625 assign(t5
, getFReg(fs
));
21626 assign(t6
, getFReg(fd
));
21628 assign(t5
, unop(Iop_F32toF64
, getFReg(fs
)));
21629 assign(t6
, unop(Iop_F32toF64
, getFReg(fd
)));
21632 assign(t1
, binop(Iop_CmpEQ32
, mkU32(0), mkU32(mov_cc
)));
21633 assign(t2
, IRExpr_ITE(mkexpr(t1
),
21635 binop(Iop_Shr32
, getFCSR(),
21639 binop(Iop_Shr32
, getFCSR(),
21640 mkU8(24 + mov_cc
)),
21644 assign(t3
, binop(Iop_CmpEQ32
, mkU32(0), mkexpr(t2
)));
21645 assign(t4
, IRExpr_ITE(mkexpr(t3
),
21646 mkexpr(t5
), mkexpr(t6
)));
21649 IRTemp f
= newTemp(Ity_F64
);
21650 IRTemp fd_hi
= newTemp(Ity_I32
);
21651 t7
= newTemp(Ity_I64
);
21652 assign(f
, getFReg(fd
));
21653 assign(fd_hi
, unop(Iop_64HIto32
,
21654 unop(Iop_ReinterpF64asI64
, mkexpr(f
))));
21655 assign(t7
, mkWidenFrom32(Ity_I64
, unop(Iop_64to32
,
21656 unop(Iop_ReinterpF64asI64
, mkexpr(t4
))),
21659 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t7
)));
21661 putFReg(fd
, binop(Iop_F64toF32
, get_IR_roundingmode(),
21671 break; /* MOVT.fmt */
21673 case 0x00: /* ADD.fmt */
21675 case 0x10: { /* S */
21676 DIP("add.s f%u, f%u, f%u", fd
, fs
, ft
);
21677 calculateFCSR(fs
, ft
, ADDS
, True
, 2);
21678 IRExpr
*rm
= get_IR_roundingmode();
21679 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_AddF32
, rm
,
21680 getLoFromF64(tyF
, getFReg(fs
)),
21681 getLoFromF64(tyF
, getFReg(ft
)))));
21685 case 0x11: { /* D */
21686 DIP("add.d f%u, f%u, f%u", fd
, fs
, ft
);
21687 calculateFCSR(fs
, ft
, ADDD
, False
, 2);
21688 IRExpr
*rm
= get_IR_roundingmode();
21689 putDReg(fd
, triop(Iop_AddF64
, rm
, getDReg(fs
), getDReg(ft
)));
21693 case 0x04: /* MTC1 (Move Word to Floating Point) */
21694 DIP("mtc1 r%u, f%u", rt
, fs
);
21697 t0
= newTemp(Ity_I32
);
21698 t1
= newTemp(Ity_F32
);
21699 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
21700 assign(t1
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
21702 putFReg(fs
, mkWidenFromF32(tyF
, mkexpr(t1
)));
21704 putFReg(fs
, unop(Iop_ReinterpI32asF32
,
21705 mkNarrowTo32(ty
, getIReg(rt
))));
21709 case 0x05: /* Doubleword Move to Floating Point DMTC1; MIPS64 */
21710 DIP("dmtc1 r%u, f%u", rt
, fs
);
21712 putDReg(fs
, unop(Iop_ReinterpI64asF64
, getIReg(rt
)));
21715 case 0x00: /* MFC1 */
21716 DIP("mfc1 r%u, f%u", rt
, fs
);
21719 t0
= newTemp(Ity_I64
);
21720 t1
= newTemp(Ity_I32
);
21721 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21722 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21723 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t1
), True
));
21725 putIReg(rt
, mkWidenFrom32(ty
,
21726 unop(Iop_ReinterpF32asI32
, getFReg(fs
)),
21731 case 0x01: /* Doubleword Move from Floating Point DMFC1;
21733 DIP("dmfc1 r%u, f%u", rt
, fs
);
21734 putIReg(rt
, unop(Iop_ReinterpF64asI64
, getDReg(fs
)));
21737 case 0x06: /* CTC1 */
21738 DIP("ctc1 r%u, f%u", rt
, fs
);
21739 t0
= newTemp(Ity_I32
);
21740 t1
= newTemp(Ity_I32
);
21741 t2
= newTemp(Ity_I32
);
21742 t3
= newTemp(Ity_I32
);
21743 t4
= newTemp(Ity_I32
);
21744 t5
= newTemp(Ity_I32
);
21745 t6
= newTemp(Ity_I32
);
21746 assign(t0
, mkNarrowTo32(ty
, getIReg(rt
)));
21748 if (fs
== 25) { /* FCCR */
21749 assign(t1
, binop(Iop_Shl32
, binop(Iop_And32
, mkexpr(t0
),
21750 mkU32(0x000000FE)), mkU8(24)));
21751 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21752 mkU32(0x01000000)));
21753 assign(t3
, binop(Iop_Shl32
, binop(Iop_And32
, mkexpr(t0
),
21754 mkU32(0x00000001)), mkU8(23)));
21755 assign(t4
, binop(Iop_And32
, mkexpr(t0
),
21756 mkU32(0x007FFFFF)));
21757 putFCSR(binop(Iop_Or32
, binop(Iop_Or32
, mkexpr(t1
),
21758 mkexpr(t2
)), binop(Iop_Or32
, mkexpr(t3
),
21760 } else if (fs
== 26) { /* FEXR */
21761 assign(t1
, binop(Iop_And32
, getFCSR(), mkU32(0xFFFC0000)));
21762 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21763 mkU32(0x0003F000)));
21764 assign(t3
, binop(Iop_And32
, getFCSR(), mkU32(0x00000F80)));
21765 assign(t4
, binop(Iop_And32
, mkexpr(t0
),
21766 mkU32(0x0000007C)));
21767 assign(t5
, binop(Iop_And32
, getFCSR(), mkU32(0x00000003)));
21768 putFCSR(binop(Iop_Or32
, binop(Iop_Or32
, binop(Iop_Or32
,
21769 mkexpr(t1
), mkexpr(t2
)), binop(Iop_Or32
,
21770 mkexpr(t3
), mkexpr(t4
))), mkexpr(t5
)));
21771 } else if (fs
== 28) {
21772 assign(t1
, binop(Iop_And32
, getFCSR(), mkU32(0xFE000000)));
21773 assign(t2
, binop(Iop_Shl32
, binop(Iop_And32
, mkexpr(t0
),
21774 mkU32(0x00000002)), mkU8(22)));
21775 assign(t3
, binop(Iop_And32
, getFCSR(), mkU32(0x00FFF000)));
21776 assign(t4
, binop(Iop_And32
, mkexpr(t0
),
21777 mkU32(0x00000F80)));
21778 assign(t5
, binop(Iop_And32
, getFCSR(), mkU32(0x0000007C)));
21779 assign(t6
, binop(Iop_And32
, mkexpr(t0
),
21780 mkU32(0x00000003)));
21781 putFCSR(binop(Iop_Or32
, binop(Iop_Or32
, binop(Iop_Or32
,
21782 mkexpr(t1
), mkexpr(t2
)), binop(Iop_Or32
,
21783 mkexpr(t3
), mkexpr(t4
))), binop(Iop_Or32
,
21784 mkexpr(t5
), mkexpr(t6
))));
21785 } else if (fs
== 31) {
21786 putFCSR(mkexpr(t0
));
21791 case 0x02: /* CFC1 */
21792 DIP("cfc1 r%u, f%u", rt
, fs
);
21793 t0
= newTemp(Ity_I32
);
21794 t1
= newTemp(Ity_I32
);
21795 t2
= newTemp(Ity_I32
);
21796 t3
= newTemp(Ity_I32
);
21797 t4
= newTemp(Ity_I32
);
21798 t5
= newTemp(Ity_I32
);
21799 t6
= newTemp(Ity_I32
);
21800 assign(t0
, getFCSR());
21803 putIReg(rt
, mkWidenFrom32(ty
,
21804 IRExpr_Get(offsetof(VexGuestMIPS32State
,
21808 } else if (fs
== 25) {
21809 assign(t1
, mkU32(0x000000FF));
21810 assign(t2
, binop(Iop_Shr32
, binop(Iop_And32
, mkexpr(t0
),
21811 mkU32(0xFE000000)), mkU8(25)));
21812 assign(t3
, binop(Iop_Shr32
, binop(Iop_And32
, mkexpr(t0
),
21813 mkU32(0x00800000)), mkU8(23)));
21814 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
,
21815 binop(Iop_Or32
, mkexpr(t1
), mkexpr(t2
)),
21816 mkexpr(t3
)), False
));
21817 } else if (fs
== 26) {
21818 assign(t1
, mkU32(0xFFFFF07C));
21819 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21820 mkU32(0x0003F000)));
21821 assign(t3
, binop(Iop_And32
, mkexpr(t0
),
21822 mkU32(0x0000007C)));
21823 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
,
21824 binop(Iop_Or32
, mkexpr(t1
), mkexpr(t2
)),
21825 mkexpr(t3
)), False
));
21826 } else if (fs
== 28) {
21827 assign(t1
, mkU32(0x00000F87));
21828 assign(t2
, binop(Iop_And32
, mkexpr(t0
),
21829 mkU32(0x00000F83)));
21830 assign(t3
, binop(Iop_Shr32
, binop(Iop_And32
, mkexpr(t0
),
21831 mkU32(0x01000000)), mkU8(22)));
21832 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
,
21833 binop(Iop_Or32
, mkexpr(t1
), mkexpr(t2
)),
21834 mkexpr(t3
)), False
));
21835 } else if (fs
== 31) {
21836 putIReg(rt
, mkWidenFrom32(ty
, getFCSR(), False
));
21847 case 0x21: /* CVT.D */
21850 DIP("cvt.d.s f%u, f%u", fd
, fs
);
21851 calculateFCSR(fs
, 0, CVTDS
, True
, 1);
21854 t0
= newTemp(Ity_I64
);
21855 t1
= newTemp(Ity_I32
);
21856 t3
= newTemp(Ity_F32
);
21857 t4
= newTemp(Ity_F32
);
21858 /* get lo half of FPR */
21859 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21861 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21863 assign(t3
, unop(Iop_ReinterpI32asF32
, mkexpr(t1
)));
21865 putFReg(fd
, unop(Iop_F32toF64
, mkexpr(t3
)));
21867 putDReg(fd
, unop(Iop_F32toF64
, getFReg(fs
)));
21872 DIP("cvt.d.w %u, %u", fd
, fs
);
21873 calculateFCSR(fs
, 0, CVTDW
, True
, 1);
21876 t0
= newTemp(Ity_I64
);
21877 t1
= newTemp(Ity_I32
);
21878 t3
= newTemp(Ity_F32
);
21879 t4
= newTemp(Ity_F32
);
21880 /* get lo half of FPR */
21881 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21883 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21884 putDReg(fd
, unop(Iop_I32StoF64
, mkexpr(t1
)));
21887 t0
= newTemp(Ity_I32
);
21888 assign(t0
, unop(Iop_ReinterpF32asI32
, getFReg(fs
)));
21889 putDReg(fd
, unop(Iop_I32StoF64
, mkexpr(t0
)));
21893 case 0x15: { /* L */
21895 DIP("cvt.d.l %u, %u", fd
, fs
);
21896 calculateFCSR(fs
, 0, CVTDL
, False
, 1);
21897 t0
= newTemp(Ity_I64
);
21898 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21900 putFReg(fd
, binop(Iop_I64StoF64
,
21901 get_IR_roundingmode(), mkexpr(t0
)));
21913 case 0x20: /* CVT.s */
21916 DIP("cvt.s.w %u, %u", fd
, fs
);
21917 calculateFCSR(fs
, 0, CVTSW
, True
, 1);
21920 t0
= newTemp(Ity_I64
);
21921 t1
= newTemp(Ity_I32
);
21922 t3
= newTemp(Ity_F32
);
21923 t4
= newTemp(Ity_F32
);
21924 /* get lo half of FPR */
21925 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21927 assign(t1
, unop(Iop_64to32
, mkexpr(t0
)));
21928 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_I32StoF32
,
21929 get_IR_roundingmode(), mkexpr(t1
))));
21931 t0
= newTemp(Ity_I32
);
21932 assign(t0
, unop(Iop_ReinterpF32asI32
, getFReg(fs
)));
21933 putFReg(fd
, binop(Iop_I32StoF32
, get_IR_roundingmode(),
21940 DIP("cvt.s.d %u, %u", fd
, fs
);
21941 calculateFCSR(fs
, 0, CVTSD
, False
, 1);
21942 t0
= newTemp(Ity_F32
);
21943 assign(t0
, binop(Iop_F64toF32
, get_IR_roundingmode(),
21945 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t0
)));
21949 DIP("cvt.s.l %u, %u", fd
, fs
);
21952 calculateFCSR(fs
, 0, CVTSL
, False
, 1);
21953 t0
= newTemp(Ity_I64
);
21954 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(fs
)));
21956 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_I64StoF32
,
21957 get_IR_roundingmode(), mkexpr(t0
))));
21970 case 0x24: /* CVT.w */
21973 DIP("cvt.w.s %u, %u", fd
, fs
);
21974 calculateFCSR(fs
, 0, CVTWS
, True
, 1);
21976 mkWidenFromF32(tyF
,
21977 unop(Iop_ReinterpI32asF32
,
21978 binop(Iop_F32toI32S
,
21979 get_IR_roundingmode(),
21985 DIP("cvt.w.d %u, %u", fd
, fs
);
21986 calculateFCSR(fs
, 0, CVTWD
, False
, 1);
21987 t0
= newTemp(Ity_I32
);
21988 t1
= newTemp(Ity_F32
);
21989 assign(t0
, binop(Iop_F64toI32S
, get_IR_roundingmode(),
21991 assign(t1
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
21992 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
22002 case 0x25: /* CVT.l */
22005 DIP("cvt.l.s %u, %u", fd
, fs
);
22008 calculateFCSR(fs
, 0, CVTLS
, True
, 1);
22009 t0
= newTemp(Ity_I64
);
22011 assign(t0
, binop(Iop_F32toI64S
, get_IR_roundingmode(),
22012 getLoFromF64(tyF
, getFReg(fs
))));
22014 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
22021 case 0x11: { /* D */
22022 DIP("cvt.l.d %u, %u", fd
, fs
);
22025 calculateFCSR(fs
, 0, CVTLD
, False
, 1);
22026 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22027 binop(Iop_F64toI64S
,
22028 get_IR_roundingmode(),
22043 case 0x0B: /* FLOOR.L.fmt */
22046 DIP("floor.l.s %u, %u", fd
, fs
);
22049 calculateFCSR(fs
, 0, FLOORLS
, True
, 1);
22050 t0
= newTemp(Ity_I64
);
22052 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x1),
22053 getLoFromF64(tyF
, getFReg(fs
))));
22055 putDReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
22063 DIP("floor.l.d %u, %u", fd
, fs
);
22066 calculateFCSR(fs
, 0, FLOORLD
, False
, 1);
22067 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22068 binop(Iop_F64toI64S
,
22083 case 0x0C: /* ROUND.W.fmt */
22086 DIP("round.w.s f%u, f%u", fd
, fs
);
22087 calculateFCSR(fs
, 0, ROUNDWS
, True
, 1);
22089 mkWidenFromF32(tyF
,
22090 unop(Iop_ReinterpI32asF32
,
22091 binop(Iop_F32toI32S
,
22098 DIP("round.w.d f%u, f%u", fd
, fs
);
22099 calculateFCSR(fs
, 0, ROUNDWD
, False
, 1);
22102 t0
= newTemp(Ity_I32
);
22103 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x0),
22105 putFReg(fd
, mkWidenFromF32(tyF
,
22106 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22108 t0
= newTemp(Ity_I32
);
22110 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x0),
22113 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22123 break; /* ROUND.W.fmt */
22125 case 0x0F: /* FLOOR.W.fmt */
22128 DIP("floor.w.s f%u, f%u", fd
, fs
);
22129 calculateFCSR(fs
, 0, FLOORWS
, True
, 1);
22131 mkWidenFromF32(tyF
,
22132 unop(Iop_ReinterpI32asF32
,
22133 binop(Iop_F32toI32S
,
22140 DIP("floor.w.d f%u, f%u", fd
, fs
);
22141 calculateFCSR(fs
, 0, FLOORWD
, False
, 1);
22144 t0
= newTemp(Ity_I32
);
22145 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x1),
22147 putFReg(fd
, mkWidenFromF32(tyF
,
22148 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22151 t0
= newTemp(Ity_I32
);
22153 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x1),
22156 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22165 break; /* FLOOR.W.fmt */
22167 case 0x0D: /* TRUNC.W */
22170 DIP("trunc.w.s %u, %u", fd
, fs
);
22171 calculateFCSR(fs
, 0, TRUNCWS
, True
, 1);
22173 mkWidenFromF32(tyF
,
22174 unop(Iop_ReinterpI32asF32
,
22175 binop(Iop_F32toI32S
,
22182 DIP("trunc.w.d %u, %u", fd
, fs
);
22183 calculateFCSR(fs
, 0, TRUNCWD
, False
, 1);
22186 t0
= newTemp(Ity_I32
);
22188 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x3),
22191 putFReg(fd
, mkWidenFromF32(tyF
,
22192 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22194 t0
= newTemp(Ity_I32
);
22196 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x3),
22199 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22211 case 0x0E: /* CEIL.W.fmt */
22214 DIP("ceil.w.s %u, %u", fd
, fs
);
22215 calculateFCSR(fs
, 0, CEILWS
, True
, 1);
22217 mkWidenFromF32(tyF
,
22218 unop(Iop_ReinterpI32asF32
,
22219 binop(Iop_F32toI32S
,
22226 DIP("ceil.w.d %u, %u", fd
, fs
);
22227 calculateFCSR(fs
, 0, CEILWD
, False
, 1);
22230 t0
= newTemp(Ity_I32
);
22231 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x2),
22233 putFReg(fd
, unop(Iop_ReinterpI32asF32
, mkexpr(t0
)));
22235 t0
= newTemp(Ity_I32
);
22236 assign(t0
, binop(Iop_F64toI32S
, mkU32(0x2),
22238 putFReg(fd
, mkWidenFromF32(tyF
,
22239 unop(Iop_ReinterpI32asF32
, mkexpr(t0
))));
22251 case 0x0A: /* CEIL.L.fmt */
22254 DIP("ceil.l.s %u, %u", fd
, fs
);
22257 calculateFCSR(fs
, 0, CEILLS
, True
, 1);
22258 t0
= newTemp(Ity_I64
);
22260 assign(t0
, binop(Iop_F32toI64S
, mkU32(0x2),
22261 getLoFromF64(tyF
, getFReg(fs
))));
22263 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t0
)));
22271 DIP("ceil.l.d %u, %u", fd
, fs
);
22274 calculateFCSR(fs
, 0, CEILLD
, False
, 1);
22275 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22276 binop(Iop_F64toI64S
,
22292 case 0x16: /* RSQRT.fmt */
22294 case 0x10: { /* S */
22295 DIP("rsqrt.s %u, %u", fd
, fs
);
22296 IRExpr
*rm
= get_IR_roundingmode();
22297 putFReg(fd
, mkWidenFromF32(tyF
, triop(Iop_DivF32
, rm
,
22298 unop(Iop_ReinterpI32asF32
, mkU32(ONE_SINGLE
)),
22299 binop(Iop_SqrtF32
, rm
, getLoFromF64(tyF
,
22304 case 0x11: { /* D */
22305 DIP("rsqrt.d %u, %u", fd
, fs
);
22306 IRExpr
*rm
= get_IR_roundingmode();
22307 putDReg(fd
, triop(Iop_DivF64
, rm
,
22308 unop(Iop_ReinterpI64asF64
,
22309 mkU64(ONE_DOUBLE
)),
22310 binop(Iop_SqrtF64
, rm
, getDReg(fs
))));
22321 case 0x18: /* MADDF.fmt */
22322 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22324 case 0x11: { /* D */
22325 DIP("maddf.d f%u, f%u, f%u", fd
, fs
, ft
);
22326 IRExpr
*rm
= get_IR_roundingmode();
22327 putDReg(fd
, qop(Iop_MAddF64
, rm
, getDReg(fs
), getDReg(ft
),
22332 case 0x10: { /* S */
22333 DIP("maddf.s f%u, f%u, f%u", fd
, fs
, ft
);
22334 IRExpr
*rm
= get_IR_roundingmode();
22335 t1
= newTemp(Ity_F32
);
22336 assign(t1
, qop(Iop_MAddF32
, rm
,
22337 getLoFromF64(tyF
, getFReg(fs
)),
22338 getLoFromF64(tyF
, getFReg(ft
)),
22339 getLoFromF64(tyF
, getFReg(fd
))));
22340 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
22348 ILLEGAL_INSTRUCTON
;
22353 case 0x19: /* MSUBF.fmt */
22354 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22356 case 0x11: { /* D */
22357 DIP("msubf.d f%u, f%u, f%u", fd
, fs
, ft
);
22358 IRExpr
*rm
= get_IR_roundingmode();
22359 putDReg(fd
, qop(Iop_MSubF64
, rm
, getDReg(fs
),
22360 getDReg(ft
), getDReg(fd
)));
22364 case 0x10: { /* S */
22365 DIP("msubf.s f%u, f%u, f%u", fd
, fs
, ft
);
22366 IRExpr
*rm
= get_IR_roundingmode();
22367 t1
= newTemp(Ity_F32
);
22368 assign(t1
, qop(Iop_MSubF32
, rm
,
22369 getLoFromF64(tyF
, getFReg(fs
)),
22370 getLoFromF64(tyF
, getFReg(ft
)),
22371 getLoFromF64(tyF
, getFReg(fd
))));
22372 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
22380 ILLEGAL_INSTRUCTON
;
22385 case 0x1E: /* MAX.fmt */
22386 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22388 case 0x11: { /* D */
22389 DIP("max.d f%u, f%u, f%u", fd
, fs
, ft
);
22390 calculateFCSR(fs
, ft
, MAXD
, False
, 2);
22391 putDReg(fd
, binop(Iop_MaxNumF64
, getDReg(fs
), getDReg(ft
)));
22395 case 0x10: { /* S */
22396 DIP("max.s f%u, f%u, f%u", fd
, fs
, ft
);
22397 calculateFCSR(fs
, ft
, MAXS
, True
, 2);
22398 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_MaxNumF32
,
22399 getLoFromF64(Ity_F64
,
22401 getLoFromF64(Ity_F64
,
22410 ILLEGAL_INSTRUCTON
;
22415 case 0x1C: /* MIN.fmt */
22416 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22418 case 0x11: { /* D */
22419 DIP("min.d f%u, f%u, f%u", fd
, fs
, ft
);
22420 calculateFCSR(fs
, ft
, MIND
, False
, 2);
22421 putDReg(fd
, binop(Iop_MinNumF64
, getDReg(fs
), getDReg(ft
)));
22425 case 0x10: { /* S */
22426 DIP("min.s f%u, f%u, f%u", fd
, fs
, ft
);
22427 calculateFCSR(fs
, ft
, MINS
, True
, 2);
22428 putFReg(fd
, mkWidenFromF32(tyF
, binop(Iop_MinNumF32
,
22429 getLoFromF64(Ity_F64
,
22431 getLoFromF64(Ity_F64
,
22440 ILLEGAL_INSTRUCTON
;
22445 case 0x1F: /* MAXA.fmt */
22446 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22448 case 0x11: { /* D */
22449 DIP("maxa.d f%u, f%u, f%u", fd
, fs
, ft
);
22450 calculateFCSR(fs
, ft
, MAXAD
, False
, 2);
22451 t1
= newTemp(Ity_F64
);
22452 t2
= newTemp(Ity_F64
);
22453 t3
= newTemp(Ity_F64
);
22454 t4
= newTemp(Ity_I1
);
22455 assign(t1
, unop(Iop_AbsF64
, getFReg(fs
)));
22456 assign(t2
, unop(Iop_AbsF64
, getFReg(ft
)));
22457 assign(t3
, binop(Iop_MaxNumF64
, mkexpr(t1
), mkexpr(t2
)));
22458 assign(t4
, binop(Iop_CmpEQ32
,
22459 binop(Iop_CmpF64
, mkexpr(t3
), mkexpr(t1
)),
22461 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22462 getFReg(fs
), getFReg(ft
)));
22466 case 0x10: { /* S */
22467 DIP("maxa.s f%u, f%u, f%u", fd
, fs
, ft
);
22468 calculateFCSR(fs
, ft
, MAXAS
, True
, 2);
22469 t1
= newTemp(Ity_F32
);
22470 t2
= newTemp(Ity_F32
);
22471 t3
= newTemp(Ity_F32
);
22472 t4
= newTemp(Ity_I1
);
22473 assign(t1
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22475 assign(t2
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22477 assign(t3
, binop(Iop_MaxNumF32
, mkexpr(t1
), mkexpr(t2
)));
22478 assign(t4
, binop(Iop_CmpEQ32
,
22479 binop(Iop_CmpF32
, mkexpr(t3
), mkexpr(t1
)),
22481 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22482 getFReg(fs
), getFReg(ft
)));
22491 ILLEGAL_INSTRUCTON
;
22496 case 0x1D: /* MINA.fmt */
22497 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22499 case 0x11: { /* D */
22500 DIP("mina.d f%u, f%u, f%u", fd
, fs
, ft
);
22501 calculateFCSR(fs
, ft
, MINAD
, False
, 2);
22502 t1
= newTemp(Ity_F64
);
22503 t2
= newTemp(Ity_F64
);
22504 t3
= newTemp(Ity_F64
);
22505 t4
= newTemp(Ity_I1
);
22506 assign(t1
, unop(Iop_AbsF64
, getFReg(fs
)));
22507 assign(t2
, unop(Iop_AbsF64
, getFReg(ft
)));
22508 assign(t3
, binop(Iop_MinNumF64
, mkexpr(t1
), mkexpr(t2
)));
22509 assign(t4
, binop(Iop_CmpEQ32
,
22510 binop(Iop_CmpF64
, mkexpr(t3
), mkexpr(t1
)),
22512 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22513 getFReg(fs
), getFReg(ft
)));
22517 case 0x10: { /* S */
22518 DIP("mina.s f%u, f%u, f%u", fd
, fs
, ft
);
22519 calculateFCSR(fs
, ft
, MINAS
, True
, 2);
22520 t1
= newTemp(Ity_F32
);
22521 t2
= newTemp(Ity_F32
);
22522 t3
= newTemp(Ity_F32
);
22523 t4
= newTemp(Ity_I1
);
22524 assign(t1
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22526 assign(t2
, unop(Iop_AbsF32
, getLoFromF64(Ity_F64
,
22528 assign(t3
, binop(Iop_MinNumF32
, mkexpr(t1
), mkexpr(t2
)));
22529 assign(t4
, binop(Iop_CmpEQ32
,
22530 binop(Iop_CmpF32
, mkexpr(t3
), mkexpr(t1
)),
22532 putFReg(fd
, IRExpr_ITE(mkexpr(t4
),
22533 getFReg(fs
), getFReg(ft
)));
22544 case 0x1A: /* RINT.fmt */
22547 case 0x11: { /* D */
22548 DIP("rint.d f%u, f%u", fd
, fs
);
22549 calculateFCSR(fs
, 0, RINTS
, True
, 1);
22550 IRExpr
*rm
= get_IR_roundingmode();
22551 putDReg(fd
, binop(Iop_RoundF64toInt
, rm
, getDReg(fs
)));
22555 case 0x10: { /* S */
22556 DIP("rint.s f%u, f%u", fd
, fs
);
22557 calculateFCSR(fs
, 0, RINTD
, True
, 1);
22558 IRExpr
*rm
= get_IR_roundingmode();
22560 mkWidenFromF32(tyF
,
22561 binop(Iop_RoundF32toInt
, rm
,
22575 case 0x10: /* SEL.fmt */
22577 case 0x11: { /* D */
22578 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22579 DIP("sel.d f%u, f%u, f%u", fd
, fs
, ft
);
22580 t1
= newTemp(Ity_I1
);
22583 assign(t1
, binop(Iop_CmpNE64
,
22585 unop(Iop_ReinterpF64asI64
,
22590 assign(t1
, binop(Iop_CmpNE32
,
22593 unop(Iop_ReinterpF64asI64
,
22599 putDReg(fd
, IRExpr_ITE(mkexpr(t1
),
22600 getDReg(ft
), getDReg(fs
)));
22603 ILLEGAL_INSTRUCTON
;
22609 case 0x10: { /* S */
22610 DIP("sel.s f%u, f%u, f%u", fd
, fs
, ft
);
22611 t1
= newTemp(Ity_I1
);
22612 assign(t1
, binop(Iop_CmpNE32
,
22614 unop(Iop_ReinterpF32asI32
,
22615 getLoFromF64(tyF
, getFReg(fd
))),
22618 putFReg(fd
, IRExpr_ITE( mkexpr(t1
),
22619 getFReg(ft
), getFReg(fs
)));
22629 case 0x14: /* SELEQZ.fmt */
22630 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22631 switch (fmt
) { /* SELEQZ.df */
22632 case 0x11: { /* D */
22633 DIP("seleqz.d f%u, f%u, f%u", fd
, fs
, ft
);
22634 t1
= newTemp(Ity_I1
);
22637 assign(t1
, binop(Iop_CmpNE64
,
22639 unop(Iop_ReinterpF64asI64
,
22644 assign(t1
, binop(Iop_CmpNE32
,
22647 unop(Iop_ReinterpF64asI64
,
22653 putDReg(fd
, IRExpr_ITE( mkexpr(t1
),
22654 binop(Iop_I64StoF64
,
22655 get_IR_roundingmode(), mkU64(0)),
22660 case 0x10: { /* S */
22661 DIP("seleqz.s f%u, f%u, f%u", fd
, fs
, ft
);
22662 t1
= newTemp(Ity_I1
);
22663 assign(t1
, binop(Iop_CmpNE32
,
22665 unop(Iop_ReinterpF32asI32
,
22666 getLoFromF64(tyF
, getFReg(ft
))),
22669 putFReg(fd
, IRExpr_ITE(mkexpr(t1
),
22670 mkWidenFromF32(tyF
,
22671 binop(Iop_I32StoF32
,
22672 get_IR_roundingmode(),
22682 ILLEGAL_INSTRUCTON
;
22687 case 0x17: /* SELNEZ.fmt */
22688 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22690 case 0x11: { /* D */
22691 DIP("selnez.d f%u, f%u, f%u", fd
, fs
, ft
);
22692 t1
= newTemp(Ity_I1
);
22695 assign(t1
, binop(Iop_CmpNE64
,
22697 unop(Iop_ReinterpF64asI64
,
22702 assign(t1
, binop(Iop_CmpNE32
,
22705 unop(Iop_ReinterpF64asI64
,
22711 putDReg(fd
, IRExpr_ITE( mkexpr(t1
),
22713 binop(Iop_I64StoF64
,
22714 get_IR_roundingmode(),
22719 case 0x10: { /* S */
22720 DIP("selnez.s f%u, f%u, f%u", fd
, fs
, ft
);
22721 t1
= newTemp(Ity_I1
);
22722 assign(t1
, binop(Iop_CmpNE32
,
22724 unop(Iop_ReinterpF32asI32
,
22725 getLoFromF64(tyF
, getFReg(ft
))),
22728 putFReg(fd
, IRExpr_ITE(mkexpr(t1
),
22730 mkWidenFromF32(tyF
,
22731 binop(Iop_I32StoF32
,
22732 get_IR_roundingmode(),
22743 ILLEGAL_INSTRUCTON
;
22748 case 0x1B: /* CLASS.fmt */
22749 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
22750 t0
= newTemp(Ity_I1
); // exp zero
22751 t1
= newTemp(Ity_I1
); // exp max
22752 t2
= newTemp(Ity_I1
); // sign
22753 t3
= newTemp(Ity_I1
); // first
22754 t4
= newTemp(Ity_I1
); // val not zero
22755 t5
= newTemp(Ity_I32
);
22758 case 0x11: { /* D */
22759 DIP("class.d f%u, f%u", fd
, fs
);
22760 assign(t0
, binop(Iop_CmpEQ32
,
22763 unop(Iop_ReinterpF64asI64
,
22765 mkU32(0x7ff00000)),
22767 assign(t1
, binop(Iop_CmpEQ32
,
22770 unop(Iop_ReinterpF64asI64
,
22772 mkU32(0x7ff00000)),
22773 mkU32(0x7ff00000)));
22774 assign(t2
, binop(Iop_CmpEQ32
,
22777 unop(Iop_ReinterpF64asI64
,
22779 mkU32(0x80000000)),
22780 mkU32(0x80000000)));
22781 assign(t3
, binop(Iop_CmpEQ32
,
22784 unop(Iop_ReinterpF64asI64
,
22786 mkU32(0x00080000)),
22787 mkU32(0x00080000)));
22789 if (mode64
) assign(t4
, binop(Iop_CmpNE64
,
22791 unop(Iop_ReinterpF64asI64
,
22793 mkU64(0x000fffffffffffffULL
)),
22795 else assign(t4
, binop(Iop_CmpNE32
,
22799 unop(Iop_ReinterpF64asI64
,
22801 mkU32(0x000fffff)),
22803 unop(Iop_ReinterpF64asI64
,
22807 assign(t5
, binop(Iop_Shl32
,
22808 IRExpr_ITE(mkexpr(t1
),
22809 IRExpr_ITE(mkexpr(t4
),
22810 mkU32(0), mkU32(1)),
22811 IRExpr_ITE(mkexpr(t0
),
22812 IRExpr_ITE(mkexpr(t4
),
22816 IRExpr_ITE(mkexpr(t2
), mkU8(2), mkU8(6))));
22817 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22819 IRExpr_ITE(binop(Iop_CmpNE32
,
22820 mkexpr(t5
), mkU32(0)),
22822 IRExpr_ITE(mkexpr(t3
),
22828 case 0x10: { /* S */
22829 DIP("class.s f%u, f%u", fd
, fs
);
22830 assign(t0
, binop(Iop_CmpEQ32
,
22832 unop(Iop_ReinterpF32asI32
,
22833 getLoFromF64(tyF
, getFReg(fs
))),
22834 mkU32(0x7f800000)),
22836 assign(t1
, binop(Iop_CmpEQ32
,
22838 unop(Iop_ReinterpF32asI32
,
22839 getLoFromF64(tyF
, getFReg(fs
))),
22840 mkU32(0x7f800000)),
22841 mkU32(0x7f800000)));
22842 assign(t2
, binop(Iop_CmpEQ32
,
22844 unop(Iop_ReinterpF32asI32
,
22845 getLoFromF64(tyF
, getFReg(fs
))),
22846 mkU32(0x80000000)),
22847 mkU32(0x80000000)));
22848 assign(t3
, binop(Iop_CmpEQ32
,
22850 unop(Iop_ReinterpF32asI32
,
22851 getLoFromF64(tyF
, getFReg(fs
))),
22852 mkU32(0x00400000)),
22853 mkU32(0x00400000)));
22854 assign(t4
, binop(Iop_CmpNE32
,
22856 unop(Iop_ReinterpF32asI32
,
22857 getLoFromF64(tyF
, getFReg(fs
))),
22858 mkU32(0x007fffff)),
22860 assign(t5
, binop(Iop_Shl32
,
22861 IRExpr_ITE(mkexpr(t1
),
22862 IRExpr_ITE(mkexpr(t4
),
22863 mkU32(0), mkU32(1)),
22864 IRExpr_ITE(mkexpr(t0
),
22865 IRExpr_ITE(mkexpr(t4
),
22867 mkU32(0x8)), //zero or subnorm
22869 IRExpr_ITE(mkexpr(t2
), mkU8(2), mkU8(6))));
22870 putDReg(fd
, unop(Iop_ReinterpI64asF64
,
22872 IRExpr_ITE(binop(Iop_CmpNE32
,
22873 mkexpr(t5
), mkU32(0)),
22875 IRExpr_ITE(mkexpr(t3
),
22885 ILLEGAL_INSTRUCTON
;
22891 if (dis_instr_CCondFmt(cins
))
22902 case 0x03: /* COP1X */
22903 switch (function
) {
22904 case 0x0: { /* LWXC1 */
22905 /* Load Word Indexed to Floating Point - LWXC1 (MIPS32r2) */
22906 DIP("lwxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22908 assign(t2
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22912 t0
= newTemp(Ity_I64
);
22913 t1
= newTemp(Ity_I32
);
22914 t3
= newTemp(Ity_F32
);
22915 t4
= newTemp(Ity_I64
);
22916 assign(t3
, load(Ity_F32
, mkexpr(t2
)));
22918 assign(t4
, mkWidenFrom32(Ity_I64
, unop(Iop_ReinterpF32asI32
,
22919 mkexpr(t3
)), True
));
22921 putFReg(fd
, unop(Iop_ReinterpI64asF64
, mkexpr(t4
)));
22923 putFReg(fd
, load(Ity_F32
, mkexpr(t2
)));
22929 case 0x1: { /* LDXC1 */
22930 /* Load Doubleword Indexed to Floating Point
22931 LDXC1 (MIPS32r2 and MIPS64) */
22932 DIP("ldxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22934 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22936 putDReg(fd
, load(Ity_F64
, mkexpr(t0
)));
22940 case 0x5: /* Load Doubleword Indexed Unaligned to Floating Point - LUXC1;
22941 MIPS32r2 and MIPS64 */
22942 DIP("luxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22944 if ((mode64
|| VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
))
22948 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
,
22949 getIReg(rs
), getIReg(rt
)));
22950 assign(t1
, binop(mode64
? Iop_And64
: Iop_And32
,
22952 mode64
? mkU64(0xfffffffffffffff8ULL
)
22953 : mkU32(0xfffffff8ULL
)));
22954 putFReg(fd
, load(Ity_F64
, mkexpr(t1
)));
22961 case 0x8: { /* Store Word Indexed from Floating Point - SWXC1 */
22962 DIP("swxc1 f%u, r%u(r%u)", ft
, rt
, rs
);
22964 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22968 store(mkexpr(t0
), getLoFromF64(tyF
, getFReg(fs
)));
22970 store(mkexpr(t0
), getFReg(fs
));
22976 case 0x9: { /* Store Doubleword Indexed from Floating Point - SDXC1 */
22977 DIP("sdxc1 f%u, r%u(r%u)", fs
, rt
, rs
);
22979 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
, getIReg(rs
),
22981 store(mkexpr(t0
), getDReg(fs
));
22985 case 0xD: /* Store Doubleword Indexed Unaligned from Floating Point -
22986 SUXC1; MIPS64 MIPS32r2 */
22987 DIP("suxc1 f%u, r%u(r%u)", fd
, rt
, rs
);
22989 if ((mode64
|| VEX_MIPS_CPU_HAS_MIPS32R2(archinfo
->hwcaps
))
22993 assign(t0
, binop(mode64
? Iop_Add64
: Iop_Add32
,
22994 getIReg(rs
), getIReg(rt
)));
22995 assign(t1
, binop(mode64
? Iop_And64
: Iop_And32
,
22997 mode64
? mkU64(0xfffffffffffffff8ULL
)
22998 : mkU32(0xfffffff8ULL
)));
22999 store(mkexpr(t1
), getFReg(fs
));
23011 case 0x20: { /* MADD.S */
23012 DIP("madd.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23013 IRExpr
*rm
= get_IR_roundingmode();
23014 t1
= newTemp(Ity_F32
);
23015 assign(t1
, triop(Iop_AddF32
, rm
, getLoFromF64(tyF
, getFReg(fmt
)),
23016 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23017 getLoFromF64(tyF
, getFReg(ft
)))));
23018 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
23019 break; /* MADD.S */
23022 case 0x21: { /* MADD.D */
23023 DIP("madd.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23024 IRExpr
*rm
= get_IR_roundingmode();
23025 putDReg(fd
, triop(Iop_AddF64
, rm
, getDReg(fmt
),
23026 triop(Iop_MulF64
, rm
, getDReg(fs
),
23028 break; /* MADD.D */
23031 case 0x28: { /* MSUB.S */
23032 DIP("msub.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23033 IRExpr
*rm
= get_IR_roundingmode();
23034 t1
= newTemp(Ity_F32
);
23035 assign(t1
, triop(Iop_SubF32
, rm
,
23036 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23037 getLoFromF64(tyF
, getFReg(ft
))),
23038 getLoFromF64(tyF
, getFReg(fmt
))));
23039 putFReg(fd
, mkWidenFromF32(tyF
, mkexpr(t1
)));
23040 break; /* MSUB.S */
23043 case 0x29: { /* MSUB.D */
23044 DIP("msub.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23045 IRExpr
*rm
= get_IR_roundingmode();
23046 putDReg(fd
, triop(Iop_SubF64
, rm
, triop(Iop_MulF64
, rm
, getDReg(fs
),
23047 getDReg(ft
)), getDReg(fmt
)));
23048 break; /* MSUB.D */
23051 case 0x30: { /* NMADD.S */
23052 DIP("nmadd.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23053 IRExpr
*rm
= get_IR_roundingmode();
23054 t1
= newTemp(Ity_F32
);
23055 assign(t1
, triop(Iop_AddF32
, rm
, getLoFromF64(tyF
, getFReg(fmt
)),
23056 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23057 getLoFromF64(tyF
, getFReg(ft
)))));
23058 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_NegF32
, mkexpr(t1
))));
23059 break; /* NMADD.S */
23062 case 0x31: { /* NMADD.D */
23063 DIP("nmadd.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23064 IRExpr
*rm
= get_IR_roundingmode();
23065 t1
= newTemp(Ity_F64
);
23066 assign(t1
, triop(Iop_AddF64
, rm
, getDReg(fmt
),
23067 triop(Iop_MulF64
, rm
, getDReg(fs
),
23069 putDReg(fd
, unop(Iop_NegF64
, mkexpr(t1
)));
23070 break; /* NMADD.D */
23073 case 0x38: { /* NMSUBB.S */
23074 DIP("nmsub.s f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23075 IRExpr
*rm
= get_IR_roundingmode();
23076 t1
= newTemp(Ity_F32
);
23077 assign(t1
, triop(Iop_SubF32
, rm
,
23078 triop(Iop_MulF32
, rm
, getLoFromF64(tyF
, getFReg(fs
)),
23079 getLoFromF64(tyF
, getFReg(ft
))),
23080 getLoFromF64(tyF
, getFReg(fmt
))));
23081 putFReg(fd
, mkWidenFromF32(tyF
, unop(Iop_NegF32
, mkexpr(t1
))));
23082 break; /* NMSUBB.S */
23085 case 0x39: { /* NMSUBB.D */
23086 DIP("nmsub.d f%u, f%u, f%u, f%u", fd
, fmt
, fs
, ft
);
23087 IRExpr
*rm
= get_IR_roundingmode();
23088 t1
= newTemp(Ity_F64
);
23089 assign(t1
, triop(Iop_SubF64
, rm
, triop(Iop_MulF64
, rm
, getDReg(fs
),
23090 getDReg(ft
)), getDReg(fmt
)));
23091 putDReg(fd
, unop(Iop_NegF64
, mkexpr(t1
)));
23092 break; /* NMSUBB.D */
23101 case 0x04: /* BEQL */
23102 DIP("beql r%u, r%u, %u", rs
, rt
, imm
);
23103 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
23104 getIReg(rs
), getIReg(rt
)), imm
);
23107 case 0x05: /* BNEL */
23108 DIP("bnel r%u, r%u, %u", rs
, rt
, imm
);
23109 *lastn
= dis_branch_likely(binop(mode64
? Iop_CmpEQ64
: Iop_CmpEQ32
,
23110 getIReg(rs
), getIReg(rt
)), imm
);
23113 case 0x06: /* 0x16 ??? BLEZL, BLEZC, BGEZC, BGEC */
23114 if (rt
== 0) { /* BLEZL */
23115 DIP("blezl r%u, %u", rs
, imm
);
23116 *lastn
= dis_branch_likely(unop(Iop_Not1
, (binop(mode64
? Iop_CmpLE64S
:
23117 Iop_CmpLE32S
, getIReg(rs
), mode64
?
23118 mkU64(0x0) : mkU32(0x0)))), imm
);
23119 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
23120 if (rs
== 0) { /* BLEZC */
23121 DIP("blezc r%u, %u", rt
, imm
);
23124 dis_branch_compact(False
,
23125 binop(Iop_CmpLE64S
, getIReg(rt
), mkU64(0x0)),
23128 dis_branch_compact(False
,
23129 binop(Iop_CmpLE32S
, getIReg(rt
), mkU32(0x0)),
23132 } else if (rt
== rs
) { /* BGEZC */
23133 DIP("bgezc r%u, %u", rt
, imm
);
23136 dis_branch_compact(False
,
23137 binop(Iop_CmpLE64S
, mkU64(0x0), getIReg(rt
)),
23140 dis_branch_compact(False
,
23141 binop(Iop_CmpLE32S
, mkU32(0x0), getIReg(rt
)),
23144 } else { /* BGEC */
23145 DIP("bgec r%u, r%u, %u", rs
, rt
, imm
);
23148 dis_branch_compact(False
,
23149 binop(Iop_CmpLE64S
, getIReg(rt
), getIReg(rs
)),
23152 dis_branch_compact(False
,
23153 binop(Iop_CmpLE32S
, getIReg(rt
), getIReg(rs
)),
23163 case 0x07: /* BGTZL, BGTZC, BLTZC, BLTC */
23164 if (rt
== 0) { /* BGTZL */
23165 DIP("bgtzl r%u, %u", rs
, imm
);
23168 *lastn
= dis_branch_likely(binop(Iop_CmpLE64S
, getIReg(rs
),
23169 mkU64(0x00)), imm
);
23171 *lastn
= dis_branch_likely(binop(Iop_CmpLE32S
, getIReg(rs
),
23172 mkU32(0x00)), imm
);
23173 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
23174 if (rs
== 0) { /* BGTZC */
23175 DIP("bgtzc r%u, %u", rt
, imm
);
23178 dis_branch_compact(False
,
23180 binop(Iop_CmpLE64S
,
23181 getIReg(rt
), mkU64(0x0))),
23184 dis_branch_compact(False
,
23186 binop(Iop_CmpLE32S
,
23187 getIReg(rt
), mkU32(0x0))),
23190 } else if (rs
== rt
) { /* BLTZC */
23191 DIP("bltzc r%u, %u", rt
, imm
);
23194 dis_branch_compact(False
,
23196 binop(Iop_CmpLE64S
,
23197 mkU64(0x0), getIReg(rt
))),
23200 dis_branch_compact(False
,
23202 binop(Iop_CmpLE32S
,
23203 mkU32(0x0), getIReg(rt
))),
23206 } else { /* BLTC */
23207 DIP("bltc r%u, r%u, %u", rs
, rt
, imm
);
23210 dis_branch_compact(False
,
23212 binop(Iop_CmpLE64S
,
23213 getIReg(rt
), getIReg(rs
))),
23216 dis_branch_compact(False
,
23218 binop(Iop_CmpLE32S
,
23219 getIReg(rt
), getIReg(rs
))),
23229 #if defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev < 6))
23231 case 0x08: { /* Doubleword Add Immidiate - DADDI; MIPS64 */
23232 DIP("daddi r%u, r%u, %u", rt
, rs
, imm
);
23233 IRTemp tmpRs64
= newTemp(Ity_I64
);
23234 assign(tmpRs64
, getIReg(rs
));
23236 t0
= newTemp(Ity_I64
);
23237 t1
= newTemp(Ity_I64
);
23238 t2
= newTemp(Ity_I64
);
23239 t3
= newTemp(Ity_I64
);
23240 t4
= newTemp(Ity_I64
);
23241 /* dst = src0 + sign(imm)
23242 if(sign(src0 ) != sign(imm ))
23244 if(sign(dst) == sign(src0 ))
23246 we have overflow! */
23248 assign(t0
, binop(Iop_Add64
, mkexpr(tmpRs64
),
23249 mkU64(extend_s_16to64(imm
))));
23250 assign(t1
, binop(Iop_Xor64
, mkexpr(tmpRs64
),
23251 mkU64(extend_s_16to64(imm
))));
23252 assign(t2
, unop(Iop_1Sto64
, binop(Iop_CmpEQ64
, binop(Iop_And64
,
23253 mkexpr(t1
), mkU64(0x8000000000000000ULL
)),
23254 mkU64(0x8000000000000000ULL
))));
23256 assign(t3
, binop(Iop_Xor64
, mkexpr(t0
), mkexpr(tmpRs64
)));
23257 assign(t4
, unop(Iop_1Sto64
, binop(Iop_CmpNE64
, binop(Iop_And64
,
23258 mkexpr(t3
), mkU64(0x8000000000000000ULL
)),
23259 mkU64(0x8000000000000000ULL
))));
23261 stmt(IRStmt_Exit(binop(Iop_CmpEQ64
, binop(Iop_Or64
, mkexpr(t2
),
23262 mkexpr(t4
)), mkU64(0)), Ijk_SigFPE_IntOvf
,
23263 IRConst_U64(guest_PC_curr_instr
+ 4),
23266 putIReg(rt
, mkexpr(t0
));
23270 #elif defined(__mips__) && ((defined(__mips_isa_rev) && __mips_isa_rev >= 6))
23272 case 0x08: { /* BNEZALC, BNEC, BNVC */
23273 if (rs
== 0) { /* BNEZALC */
23274 DIP("bnezalc r%u, %u", rt
, imm
);
23277 dis_branch_compact(True
,
23279 binop(Iop_CmpEQ64
, getIReg(rt
), mkU64(0x0))),
23282 dis_branch_compact(True
,
23284 binop(Iop_CmpEQ32
, getIReg(rt
), mkU32(0x0))),
23287 } else if (rs
< rt
) { /* BNEC */
23288 DIP("bnec r%u, %u", rt
, imm
);
23291 dis_branch_compact(False
,
23294 getIReg(rt
), getIReg(rs
))),
23297 dis_branch_compact(False
,
23300 getIReg(rt
), getIReg(rs
))),
23303 } else { /* BNVC */
23304 DIP("bnvc r%u, r%u, %u", rs
, rt
, imm
);
23307 t0
= newTemp(Ity_I32
);
23308 t1
= newTemp(Ity_I32
);
23309 t2
= newTemp(Ity_I32
);
23310 t3
= newTemp(Ity_I32
);
23311 assign(t0
, IRExpr_ITE(binop(Iop_CmpLT64S
,
23313 mkU64(0xffffffff80000000ULL
)),
23315 IRExpr_ITE(binop(Iop_CmpLT64S
,
23317 mkU64(0x7FFFFFFFULL
)),
23318 mkU32(0), mkU32(1))));
23319 assign(t1
, IRExpr_ITE(binop(Iop_CmpLT64S
,
23321 mkU64(0xffffffff80000000ULL
)),
23323 IRExpr_ITE(binop(Iop_CmpLT64S
,
23325 mkU64(0x7FFFFFFFULL
)),
23326 mkU32(0), mkU32(1))));
23327 assign(t2
, IRExpr_ITE(binop(Iop_CmpLT64S
,
23329 getIReg(rt
), getIReg(rs
)),
23330 mkU64(0xffffffff80000000ULL
)),
23332 IRExpr_ITE(binop(Iop_CmpLT64S
,
23336 mkU64(0x7FFFFFFFULL
)),
23337 mkU32(0), mkU32(1))));
23338 assign(t3
, binop(Iop_Add32
,
23340 binop(Iop_Add32
, mkexpr(t1
), mkexpr(t2
))));
23341 dis_branch_compact(False
,
23342 binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0)),
23345 IRTemp tmpRs32
= newTemp(Ity_I32
);
23346 IRTemp tmpRt32
= newTemp(Ity_I32
);
23348 assign(tmpRs32
, getIReg(rs
));
23349 assign(tmpRt32
, getIReg(rt
));
23350 t0
= newTemp(Ity_I32
);
23351 t1
= newTemp(Ity_I32
);
23352 t2
= newTemp(Ity_I32
);
23353 t3
= newTemp(Ity_I32
);
23354 t4
= newTemp(Ity_I32
);
23355 /* dst = src0 + src1
23356 if (sign(src0 ) != sign(src1 ))
23358 if (sign(dst) == sign(src0 ))
23360 we have overflow! */
23362 assign(t0
, binop(Iop_Add32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
23363 assign(t1
, binop(Iop_Xor32
, mkexpr(tmpRs32
), mkexpr(tmpRt32
)));
23364 assign(t2
, unop(Iop_1Uto32
,
23366 binop(Iop_And32
, mkexpr(t1
), mkU32(0x80000000)),
23367 mkU32(0x80000000))));
23369 assign(t3
, binop(Iop_Xor32
, mkexpr(t0
), mkexpr(tmpRs32
)));
23370 assign(t4
, unop(Iop_1Uto32
,
23372 binop(Iop_And32
, mkexpr(t3
), mkU32(0x80000000)),
23373 mkU32(0x80000000))));
23375 dis_branch_compact(False
, binop(Iop_CmpNE32
,
23376 binop(Iop_Or32
, mkexpr(t2
), mkexpr(t4
)),
23377 mkU32(0)), imm
, dres
);
23386 case 0x09: /* Doubleword Add Immidiate Unsigned - DADDIU; MIPS64 */
23387 DIP("daddiu r%u, r%u, %u", rt
, rs
, imm
);
23388 putIReg(rt
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23391 case 0x0A: { /* LDL */
23392 /* Load Doubleword Left - LDL; MIPS64 */
23394 DIP("ldl r%u, %u(r%u)", rt
, imm
, rs
);
23396 #if defined (_MIPSEL)
23397 t1
= newTemp(Ity_I64
);
23398 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23399 #elif defined (_MIPSEB)
23400 t1
= newTemp(Ity_I64
);
23401 assign(t1
, binop(Iop_Xor64
, mkU64(0x7), binop(Iop_Add64
, getIReg(rs
),
23402 mkU64(extend_s_16to64(imm
)))));
23404 /* t2 = word addr */
23405 /* t4 = addr mod 8 */
23406 LWX_SWX_PATTERN64_1
;
23408 /* t3 = word content - shifted */
23409 t3
= newTemp(Ity_I64
);
23410 assign(t3
, binop(Iop_Shl64
, load(Ity_I64
, mkexpr(t2
)),
23411 narrowTo(Ity_I8
, binop(Iop_Shl64
, binop(Iop_Sub64
, mkU64(0x07),
23412 mkexpr(t4
)), mkU8(3)))));
23414 /* rt content - adjusted */
23415 t5
= newTemp(Ity_I64
);
23416 t6
= newTemp(Ity_I64
);
23417 t7
= newTemp(Ity_I64
);
23419 assign(t5
, binop(Iop_Mul64
, mkexpr(t4
), mkU64(0x8)));
23421 assign(t6
, binop(Iop_Shr64
, mkU64(0x00FFFFFFFFFFFFFFULL
),
23422 narrowTo(Ity_I8
, mkexpr(t5
))));
23424 assign(t7
, binop(Iop_And64
, getIReg(rt
), mkexpr(t6
)));
23426 putIReg(rt
, binop(Iop_Or64
, mkexpr(t7
), mkexpr(t3
)));
23430 case 0x0B: { /* LDR */
23431 /* Load Doubleword Right - LDR; MIPS64 */
23433 DIP("ldr r%u,%u(r%u)", rt
, imm
, rs
);
23435 #if defined (_MIPSEL)
23436 t1
= newTemp(Ity_I64
);
23437 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23438 #elif defined (_MIPSEB)
23439 t1
= newTemp(Ity_I64
);
23440 assign(t1
, binop(Iop_Xor64
, mkU64(0x7), binop(Iop_Add64
, getIReg(rs
),
23441 mkU64(extend_s_16to64(imm
)))));
23443 /* t2 = word addr */
23444 /* t4 = addr mod 8 */
23445 LWX_SWX_PATTERN64_1
;
23447 /* t3 = word content - shifted */
23448 t3
= newTemp(Ity_I64
);
23449 assign(t3
, binop(Iop_Shr64
, load(Ity_I64
, mkexpr(t2
)),
23450 narrowTo(Ity_I8
, binop(Iop_Shl64
, mkexpr(t4
), mkU8(3)))));
23452 /* rt content - adjusted */
23453 t5
= newTemp(Ity_I64
);
23454 assign(t5
, binop(Iop_And64
, getIReg(rt
), unop(Iop_Not64
,
23455 binop(Iop_Shr64
, mkU64(0xFFFFFFFFFFFFFFFFULL
),
23456 narrowTo(Ity_I8
, binop(Iop_Shl64
, mkexpr(t4
), mkU8(0x3)))))));
23458 putIReg(rt
, binop(Iop_Or64
, mkexpr(t5
), mkexpr(t3
)));
23462 case 0x0C: /* Special2 */
23463 return disInstr_MIPS_WRK_Special2(cins
, archinfo
, abiinfo
,
23464 dres
, bstmt
, lastn
);
23466 case 0x0D: /* DAUI */
23467 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
23468 DIP("daui r%u, r%u, %x", rt
, rs
, imm
);
23469 putIReg(rt
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_32to64(imm
<< 16))));
23476 case 0x0E: /* MIPS MSA (SIMD) */
23478 Int retVal
= disMSAInstr_MIPS_WRK(cins
);
23482 } else if (retVal
== -2) {
23488 vex_printf("Error occured while trying to decode MIPS MSA "
23489 "instruction.\nYour platform probably doesn't support "
23490 "MIPS MSA (SIMD) ASE.\n");
23493 case 0x0F: /* Special3 */
23494 return disInstr_MIPS_WRK_Special3(cins
, archinfo
, abiinfo
,
23495 dres
, bstmt
, lastn
);
23504 static UInt
disInstr_MIPS_WRK_20(UInt cins
)
23506 IRTemp t1
= 0, t2
, t3
, t4
, t5
;
23507 UInt opcode
, rs
, rt
, imm
;
23509 opcode
= get_opcode(cins
);
23510 imm
= get_imm(cins
);
23513 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
23515 switch (opcode
& 0x0F) {
23516 case 0x00: /* LB */
23517 DIP("lb r%u, %u(r%u)", rt
, imm
, rs
);
23518 LOAD_STORE_PATTERN
;
23521 putIReg(rt
, unop(Iop_8Sto64
, load(Ity_I8
, mkexpr(t1
))));
23523 putIReg(rt
, unop(Iop_8Sto32
, load(Ity_I8
, mkexpr(t1
))));
23527 case 0x01: /* LH */
23528 DIP("lh r%u, %u(r%u)", rt
, imm
, rs
);
23529 LOAD_STORE_PATTERN
;
23532 putIReg(rt
, unop(Iop_16Sto64
, load(Ity_I16
, mkexpr(t1
))));
23534 putIReg(rt
, unop(Iop_16Sto32
, load(Ity_I16
, mkexpr(t1
))));
23538 case 0x02: /* LWL */
23539 DIP("lwl r%u, %u(r%u)", rt
, imm
, rs
);
23543 t1
= newTemp(Ity_I64
);
23544 #if defined (_MIPSEL)
23545 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23546 #elif defined (_MIPSEB)
23547 assign(t1
, binop(Iop_Xor64
,
23551 mkU64(extend_s_16to64(imm
)))));
23553 /* t2 = word addr */
23554 /* t4 = addr mod 4 */
23557 /* t3 = word content - shifted */
23558 t3
= newTemp(Ity_I32
);
23559 assign(t3
, binop(Iop_Shl32
,
23560 load(Ity_I32
, mkexpr(t2
)),
23568 /* rt content - adjusted */
23569 t5
= newTemp(Ity_I32
);
23570 assign(t5
, binop(Iop_And32
,
23571 mkNarrowTo32(ty
, getIReg(rt
)),
23574 narrowTo(Ity_I8
, binop(Iop_Mul32
,
23578 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t5
),
23579 mkexpr(t3
)), True
));
23582 t1
= newTemp(Ity_I32
);
23583 #if defined (_MIPSEL)
23584 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
23585 #elif defined (_MIPSEB)
23586 assign(t1
, binop(Iop_Xor32
, mkU32(0x3), binop(Iop_Add32
, getIReg(rs
),
23587 mkU32(extend_s_16to32(imm
)))));
23590 /* t2 = word addr */
23591 /* t4 = addr mod 4 */
23594 /* t3 = word content - shifted */
23595 t3
= newTemp(Ity_I32
);
23596 assign(t3
, binop(Iop_Shl32
, load(Ity_I32
, mkexpr(t2
)), narrowTo(Ity_I8
,
23597 binop(Iop_Shl32
, binop(Iop_Sub32
, mkU32(0x03), mkexpr(t4
)),
23600 /* rt content - adjusted */
23601 t5
= newTemp(Ity_I32
);
23602 assign(t5
, binop(Iop_And32
,
23606 narrowTo(Ity_I8
, binop(Iop_Mul32
,
23610 putIReg(rt
, binop(Iop_Or32
, mkexpr(t5
), mkexpr(t3
)));
23615 case 0x03: /* LW */
23616 DIP("lw r%u, %u(r%u)", rt
, imm
, rs
);
23617 LOAD_STORE_PATTERN
;
23618 putIReg(rt
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), True
));
23621 case 0x04: /* LBU */
23622 DIP("lbu r%u, %u(r%u)", rt
, imm
, rs
);
23623 LOAD_STORE_PATTERN
;
23626 putIReg(rt
, unop(Iop_8Uto64
, load(Ity_I8
, mkexpr(t1
))));
23628 putIReg(rt
, unop(Iop_8Uto32
, load(Ity_I8
, mkexpr(t1
))));
23632 case 0x05: /* LHU */
23633 DIP("lhu r%u, %u(r%u)", rt
, imm
, rs
);
23634 LOAD_STORE_PATTERN
;
23637 putIReg(rt
, unop(Iop_16Uto64
, load(Ity_I16
, mkexpr(t1
))));
23639 putIReg(rt
, unop(Iop_16Uto32
, load(Ity_I16
, mkexpr(t1
))));
23643 case 0x06: /* LWR */
23644 DIP("lwr r%u, %u(r%u)", rt
, imm
, rs
);
23648 t1
= newTemp(Ity_I64
);
23650 #if defined (_MIPSEL)
23651 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23652 #elif defined (_MIPSEB)
23653 assign(t1
, binop(Iop_Xor64
,
23657 mkU64(extend_s_16to64(imm
)))));
23659 /* t2 = word addr */
23660 /* t4 = addr mod 4 */
23663 /* t3 = word content - shifted */
23664 t3
= newTemp(Ity_I32
);
23665 assign(t3
, binop(Iop_Shr32
,
23666 load(Ity_I32
, mkexpr(t2
)),
23668 binop(Iop_Shl32
, mkexpr(t4
), mkU8(0x03)))));
23670 /* rt content - adjusted */
23671 t5
= newTemp(Ity_I32
);
23672 assign(t5
, binop(Iop_And32
, mkNarrowTo32(ty
, getIReg(rt
)),
23673 unop(Iop_Not32
, binop(Iop_Shr32
, mkU32(0xFFFFFFFF),
23674 narrowTo(Ity_I8
, binop(Iop_Shl32
, mkexpr(t4
), mkU8(0x3)))))));
23676 putIReg(rt
, mkWidenFrom32(ty
, binop(Iop_Or32
, mkexpr(t5
),
23677 mkexpr(t3
)), True
));
23681 t1
= newTemp(Ity_I32
);
23682 #if defined (_MIPSEL)
23683 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
23684 #elif defined (_MIPSEB)
23685 assign(t1
, binop(Iop_Xor32
, mkU32(0x3), binop(Iop_Add32
, getIReg(rs
),
23686 mkU32(extend_s_16to32(imm
)))));
23689 /* t2 = word addr */
23690 /* t4 = addr mod 4 */
23693 /* t3 = word content - shifted */
23694 t3
= newTemp(Ity_I32
);
23695 assign(t3
, binop(Iop_Shr32
, load(Ity_I32
, mkexpr(t2
)),
23696 narrowTo(Ity_I8
, binop(Iop_Shl32
, mkexpr(t4
),
23699 /* rt content - adjusted */
23700 t5
= newTemp(Ity_I32
);
23701 assign(t5
, binop(Iop_And32
, getIReg(rt
), unop(Iop_Not32
,
23702 binop(Iop_Shr32
, mkU32(0xFFFFFFFF), narrowTo(Ity_I8
,
23703 binop(Iop_Shl32
, mkexpr(t4
), mkU8(0x3)))))));
23705 putIReg(rt
, binop(Iop_Or32
, mkexpr(t5
), mkexpr(t3
)));
23710 case 0x07: /* Load Word unsigned - LWU; MIPS64 */
23711 DIP("lwu r%u,%u(r%u)", rt
, imm
, rs
);
23712 LOAD_STORE_PATTERN
;
23714 putIReg(rt
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), False
));
23717 case 0x08: /* SB */
23718 DIP("sb r%u, %u(r%u)", rt
, imm
, rs
);
23719 LOAD_STORE_PATTERN
;
23720 store(mkexpr(t1
), narrowTo(Ity_I8
, getIReg(rt
)));
23723 case 0x09: /* SH */
23724 DIP("sh r%u, %u(r%u)", rt
, imm
, rs
);
23725 LOAD_STORE_PATTERN
;
23726 store(mkexpr(t1
), narrowTo(Ity_I16
, getIReg(rt
)));
23729 case 0x0A: /* SWL */
23730 DIP("swl r%u, %u(r%u)", rt
, imm
, rs
);
23733 IRTemp E_byte
= newTemp(Ity_I8
);
23734 IRTemp F_byte
= newTemp(Ity_I8
);
23735 IRTemp G_byte
= newTemp(Ity_I8
);
23736 IRTemp H_byte
= newTemp(Ity_I8
);
23737 IRTemp F_pos
= newTemp(Ity_I64
);
23738 IRTemp G_pos
= newTemp(Ity_I64
);
23741 assign(H_byte
, getByteFromReg(rt
, 0));
23743 assign(G_byte
, getByteFromReg(rt
, 1));
23745 assign(F_byte
, getByteFromReg(rt
, 2));
23747 assign(E_byte
, getByteFromReg(rt
, 3));
23750 t1
= newTemp(Ity_I64
);
23751 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23753 /* t2 = word addr */
23754 t2
= newTemp(Ity_I64
);
23755 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFFCULL
)));
23757 /* t3 = addr mod 4 */
23758 t3
= newTemp(Ity_I64
);
23759 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x3)));
23761 #if defined (_MIPSEL)
23762 /* Calculate X_byte position. */
23763 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
23767 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
23771 /* Store X_byte on the right place. */
23772 store(mkexpr(t2
), mkexpr(H_byte
));
23773 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23774 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23775 store(mkexpr(t1
), mkexpr(E_byte
));
23777 #else /* _MIPSEB */
23778 /* Calculate X_byte position. */
23779 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
23783 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
23787 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(3)), mkexpr(H_byte
));
23788 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23789 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23790 store(mkexpr(t1
), mkexpr(E_byte
));
23794 IRTemp E_byte
= newTemp(Ity_I8
);
23795 IRTemp F_byte
= newTemp(Ity_I8
);
23796 IRTemp G_byte
= newTemp(Ity_I8
);
23797 IRTemp H_byte
= newTemp(Ity_I8
);
23798 IRTemp F_pos
= newTemp(Ity_I32
);
23799 IRTemp G_pos
= newTemp(Ity_I32
);
23802 assign(H_byte
, getByteFromReg(rt
, 0));
23804 assign(G_byte
, getByteFromReg(rt
, 1));
23806 assign(F_byte
, getByteFromReg(rt
, 2));
23808 assign(E_byte
, getByteFromReg(rt
, 3));
23811 t1
= newTemp(Ity_I32
);
23812 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
23814 /* t2 = word addr */
23815 t2
= newTemp(Ity_I32
);
23816 assign(t2
, binop(Iop_And32
, mkexpr(t1
), mkU32(0xFFFFFFFCULL
)));
23818 /* t3 = addr mod 4 */
23819 t3
= newTemp(Ity_I32
);
23820 assign(t3
, binop(Iop_And32
, mkexpr(t1
), mkU32(0x3)));
23822 #if defined (_MIPSEL)
23823 /* Calculate X_byte position. */
23824 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
23828 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
23832 /* Store X_byte on the right place. */
23833 store(mkexpr(t2
), mkexpr(H_byte
));
23834 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23835 store(binop(Iop_Sub32
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23836 store(mkexpr(t1
), mkexpr(E_byte
));
23838 #else /* _MIPSEB */
23839 /* Calculate X_byte position. */
23840 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
23844 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
23848 store(binop(Iop_Add32
, mkexpr(t2
), mkU32(3)), mkexpr(H_byte
));
23849 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23850 store(binop(Iop_Add32
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23851 store(mkexpr(t1
), mkexpr(E_byte
));
23858 case 0x0B: /* SW */
23859 DIP("sw r%u, %u(r%u)", rt
, imm
, rs
);
23860 LOAD_STORE_PATTERN
;
23861 store(mkexpr(t1
), mkNarrowTo32(ty
, getIReg(rt
)));
23864 case 0x0C: { /* SDL rt, offset(base) MIPS64 */
23865 DIP("sdl r%u, %u(r%u)", rt
, imm
, rs
);
23867 IRTemp A_byte
= newTemp(Ity_I8
);
23868 IRTemp B_byte
= newTemp(Ity_I8
);
23869 IRTemp C_byte
= newTemp(Ity_I8
);
23870 IRTemp D_byte
= newTemp(Ity_I8
);
23871 IRTemp E_byte
= newTemp(Ity_I8
);
23872 IRTemp F_byte
= newTemp(Ity_I8
);
23873 IRTemp G_byte
= newTemp(Ity_I8
);
23874 IRTemp H_byte
= newTemp(Ity_I8
);
23875 IRTemp B_pos
= newTemp(Ity_I64
);
23876 IRTemp C_pos
= newTemp(Ity_I64
);
23877 IRTemp D_pos
= newTemp(Ity_I64
);
23878 IRTemp E_pos
= newTemp(Ity_I64
);
23879 IRTemp F_pos
= newTemp(Ity_I64
);
23880 IRTemp G_pos
= newTemp(Ity_I64
);
23883 assign(H_byte
, getByteFromReg(rt
, 0));
23885 assign(G_byte
, getByteFromReg(rt
, 1));
23887 assign(F_byte
, getByteFromReg(rt
, 2));
23889 assign(E_byte
, getByteFromReg(rt
, 3));
23891 assign(D_byte
, getByteFromReg(rt
, 4));
23893 assign(C_byte
, getByteFromReg(rt
, 5));
23895 assign(B_byte
, getByteFromReg(rt
, 6));
23897 assign(A_byte
, getByteFromReg(rt
, 7));
23900 t1
= newTemp(Ity_I64
);
23901 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
23903 /* t2 = word addr */
23904 t2
= newTemp(Ity_I64
);
23905 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFF8ULL
)));
23907 /* t3 = addr mod 7 */
23908 t3
= newTemp(Ity_I64
);
23909 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x7)));
23911 #if defined (_MIPSEL)
23912 /* Calculate X_byte position. */
23913 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x1)),
23917 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x2)),
23921 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x3)),
23925 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x4)),
23929 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x5)),
23933 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x7)),
23937 /* Store X_byte on the right place. */
23938 store(mkexpr(t2
), mkexpr(H_byte
));
23939 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23940 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23941 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
23942 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
23943 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
23944 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
23945 store(mkexpr(t1
), mkexpr(A_byte
));
23947 #else /* _MIPSEB */
23948 /* Calculate X_byte position. */
23949 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x7)),
23953 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x6)),
23957 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x5)),
23961 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x4)),
23965 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkexpr(t3
), mkU64(0x3)),
23969 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
23973 /* Store X_byte on the right place. */
23974 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(0x7)), mkexpr(H_byte
));
23975 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(G_pos
)), mkexpr(G_byte
));
23976 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
23977 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
23978 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
23979 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
23980 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
23981 store(mkexpr(t1
), mkexpr(A_byte
));
23988 /* SDR rt, offset(base) - MIPS64 */
23990 DIP("sdr r%u, %u(r%u)", rt
, imm
, rs
);
23991 IRTemp A_byte
= newTemp(Ity_I8
);
23992 IRTemp B_byte
= newTemp(Ity_I8
);
23993 IRTemp C_byte
= newTemp(Ity_I8
);
23994 IRTemp D_byte
= newTemp(Ity_I8
);
23995 IRTemp E_byte
= newTemp(Ity_I8
);
23996 IRTemp F_byte
= newTemp(Ity_I8
);
23997 IRTemp G_byte
= newTemp(Ity_I8
);
23998 IRTemp H_byte
= newTemp(Ity_I8
);
23999 IRTemp B_pos
= newTemp(Ity_I64
);
24000 IRTemp C_pos
= newTemp(Ity_I64
);
24001 IRTemp D_pos
= newTemp(Ity_I64
);
24002 IRTemp E_pos
= newTemp(Ity_I64
);
24003 IRTemp F_pos
= newTemp(Ity_I64
);
24004 IRTemp G_pos
= newTemp(Ity_I64
);
24007 assign(H_byte
, getByteFromReg(rt
, 0));
24009 assign(G_byte
, getByteFromReg(rt
, 1));
24011 assign(F_byte
, getByteFromReg(rt
, 2));
24013 assign(E_byte
, getByteFromReg(rt
, 3));
24015 assign(D_byte
, getByteFromReg(rt
, 4));
24017 assign(C_byte
, getByteFromReg(rt
, 5));
24019 assign(B_byte
, getByteFromReg(rt
, 6));
24021 assign(A_byte
, getByteFromReg(rt
, 7));
24024 t1
= newTemp(Ity_I64
);
24025 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
24027 /* t2 = word addr */
24028 t2
= newTemp(Ity_I64
);
24029 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFF8ULL
)));
24031 /* t3 = addr mod 7 */
24032 t3
= newTemp(Ity_I64
);
24033 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x7)));
24035 #if defined (_MIPSEL)
24036 /* Calculate X_byte position. */
24037 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x1), mkexpr(t3
)),
24041 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x2), mkexpr(t3
)),
24045 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x3), mkexpr(t3
)),
24049 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x4), mkexpr(t3
)),
24053 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x5), mkexpr(t3
)),
24057 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x7)),
24061 /* Store X_byte on the right place. */
24062 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(0x7)), mkexpr(A_byte
));
24063 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
24064 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
24065 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
24066 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
24067 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
24068 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24069 store(mkexpr(t1
), mkexpr(H_byte
));
24071 #else /* _MIPSEB */
24072 /* Calculate X_byte position. */
24073 assign(B_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x5), mkexpr(t3
)),
24077 assign(C_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x4), mkexpr(t3
)),
24081 assign(D_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x3), mkexpr(t3
)),
24085 assign(E_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x2), mkexpr(t3
)),
24089 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpLT64U
, mkU64(0x1), mkexpr(t3
)),
24093 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
24097 /* Store X_byte on the right place. */
24098 store(mkexpr(t2
), mkexpr(A_byte
));
24099 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(B_pos
)), mkexpr(B_byte
));
24100 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(C_pos
)), mkexpr(C_byte
));
24101 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(D_pos
)), mkexpr(D_byte
));
24102 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(E_pos
)), mkexpr(E_byte
));
24103 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(F_pos
)), mkexpr(F_byte
));
24104 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24105 store(mkexpr(t1
), mkexpr(H_byte
));
24110 case 0x0E: /* SWR */
24111 DIP("swr r%u, %u(r%u)", rt
, imm
, rs
);
24114 IRTemp E_byte
= newTemp(Ity_I8
);
24115 IRTemp F_byte
= newTemp(Ity_I8
);
24116 IRTemp G_byte
= newTemp(Ity_I8
);
24117 IRTemp H_byte
= newTemp(Ity_I8
);
24118 IRTemp F_pos
= newTemp(Ity_I64
);
24119 IRTemp G_pos
= newTemp(Ity_I64
);
24122 assign(H_byte
, getByteFromReg(rt
, 0));
24124 assign(G_byte
, getByteFromReg(rt
, 1));
24126 assign(F_byte
, getByteFromReg(rt
, 2));
24128 assign(E_byte
, getByteFromReg(rt
, 3));
24131 t1
= newTemp(Ity_I64
);
24132 assign(t1
, binop(Iop_Add64
, getIReg(rs
), mkU64(extend_s_16to64(imm
))));
24134 /* t2 = word addr */
24135 t2
= newTemp(Ity_I64
);
24136 assign(t2
, binop(Iop_And64
, mkexpr(t1
), mkU64(0xFFFFFFFFFFFFFFFCULL
)));
24138 /* t3 = addr mod 4 */
24139 t3
= newTemp(Ity_I64
);
24140 assign(t3
, binop(Iop_And64
, mkexpr(t1
), mkU64(0x3)));
24142 #if defined (_MIPSEL)
24143 /* Calculate X_byte position. */
24144 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
24148 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
24152 /* Store X_byte on the right place. */
24153 store(binop(Iop_Add64
, mkexpr(t2
), mkU64(0x3)), mkexpr(E_byte
));
24154 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24155 store(binop(Iop_Add64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24156 store(mkexpr(t1
), mkexpr(H_byte
));
24158 #else /* _MIPSEB */
24159 /* Calculate X_byte position. */
24160 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x3)),
24164 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ64
, mkexpr(t3
), mkU64(0x0)),
24168 /* Store X_byte on the right place. */
24169 store(mkexpr(t2
), mkexpr(E_byte
));
24170 store(binop(Iop_Add64
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24171 store(binop(Iop_Sub64
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24172 store(mkexpr(t1
), mkexpr(H_byte
));
24175 IRTemp E_byte
= newTemp(Ity_I8
);
24176 IRTemp F_byte
= newTemp(Ity_I8
);
24177 IRTemp G_byte
= newTemp(Ity_I8
);
24178 IRTemp H_byte
= newTemp(Ity_I8
);
24179 IRTemp F_pos
= newTemp(Ity_I32
);
24180 IRTemp G_pos
= newTemp(Ity_I32
);
24183 assign(H_byte
, getByteFromReg(rt
, 0));
24185 assign(G_byte
, getByteFromReg(rt
, 1));
24187 assign(F_byte
, getByteFromReg(rt
, 2));
24189 assign(E_byte
, getByteFromReg(rt
, 3));
24192 t1
= newTemp(Ity_I32
);
24193 assign(t1
, binop(Iop_Add32
, getIReg(rs
), mkU32(extend_s_16to32(imm
))));
24195 /* t2 = word addr */
24196 t2
= newTemp(Ity_I32
);
24197 assign(t2
, binop(Iop_And32
, mkexpr(t1
), mkU32(0xFFFFFFFCULL
)));
24199 /* t3 = addr mod 4 */
24200 t3
= newTemp(Ity_I32
);
24201 assign(t3
, binop(Iop_And32
, mkexpr(t1
), mkU32(0x3)));
24203 #if defined (_MIPSEL)
24204 /* Calculate X_byte position. */
24205 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
24209 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
24213 /* Store X_byte on the right place. */
24214 store(binop(Iop_Add32
, mkexpr(t2
), mkU32(0x3)), mkexpr(E_byte
));
24215 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24216 store(binop(Iop_Add32
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24217 store(mkexpr(t1
), mkexpr(H_byte
));
24219 #else /* _MIPSEB */
24220 /* Calculate X_byte position. */
24221 assign(F_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x3)),
24225 assign(G_pos
, IRExpr_ITE(binop(Iop_CmpEQ32
, mkexpr(t3
), mkU32(0x0)),
24229 /* Store X_byte on the right place. */
24230 store(mkexpr(t2
), mkexpr(E_byte
));
24231 store(binop(Iop_Add32
, mkexpr(t2
), mkexpr(F_pos
)), mkexpr(F_byte
));
24232 store(binop(Iop_Sub32
, mkexpr(t1
), mkexpr(G_pos
)), mkexpr(G_byte
));
24233 store(mkexpr(t1
), mkexpr(H_byte
));
24243 static UInt
disInstr_MIPS_WRK_30(UInt cins
, const VexArchInfo
* archinfo
,
24244 const VexAbiInfo
* abiinfo
, DisResult
* dres
,
24247 IRTemp t0
, t1
= 0, t2
, t3
, t4
, t5
;
24248 UInt opcode
, rs
, rt
, rd
, ft
, function
, imm
, instr_index
;
24250 opcode
= get_opcode(cins
);
24251 imm
= get_imm(cins
);
24257 instr_index
= get_instr_index(cins
);
24258 function
= get_function(cins
);
24259 IRType ty
= mode64
? Ity_I64
: Ity_I32
;
24261 switch (opcode
& 0x0F) {
24262 case 0x00: /* LL */
24263 DIP("ll r%u, %u(r%u)", rt
, imm
, rs
);
24264 LOAD_STORE_PATTERN
;
24266 if (abiinfo
->guest__use_fallback_LLSC
) {
24268 assign(t2
, mkWidenFrom32(ty
, load(Ity_I32
, mkexpr(t1
)), True
));
24269 putLLaddr(mkexpr(t1
));
24270 putLLdata(mkexpr(t2
));
24271 putIReg(rt
, mkexpr(t2
));
24273 t2
= newTemp(Ity_I32
);
24274 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
), NULL
));
24275 putIReg(rt
, mkWidenFrom32(ty
, mkexpr(t2
), True
));
24280 case 0x01: /* LWC1 */
24281 /* Load Word to Floating Point - LWC1 (MIPS32) */
24282 DIP("lwc1 f%u, %u(r%u)", ft
, imm
, rs
);
24283 LOAD_STORE_PATTERN
;
24286 t0
= newTemp(Ity_F32
);
24287 t2
= newTemp(Ity_I64
);
24288 assign(t0
, load(Ity_F32
, mkexpr(t1
)));
24289 assign(t2
, mkWidenFrom32(Ity_I64
, unop(Iop_ReinterpF32asI32
,
24290 mkexpr(t0
)), True
));
24291 putDReg(ft
, unop(Iop_ReinterpI64asF64
, mkexpr(t2
)));
24293 putFReg(ft
, load(Ity_F32
, mkexpr(t1
)));
24298 case 0x02: /* Branch on Bit Clear - BBIT0; Cavium OCTEON */
24300 /* Cavium Specific instructions. */
24301 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24302 DIP("bbit0 r%u, 0x%x, %x", rs
, rt
, imm
);
24303 t0
= newTemp(Ity_I32
);
24304 t1
= newTemp(Ity_I32
);
24305 assign(t0
, mkU32(0x1));
24306 assign(t1
, binop(Iop_Shl32
, mkexpr(t0
), mkU8(rt
)));
24307 dis_branch(False
, binop(Iop_CmpEQ32
,
24310 mkNarrowTo32(ty
, getIReg(rs
))),
24313 } else if (archinfo
->hwcaps
& VEX_MIPS_CPU_ISA_M32R6
) { /* BC */
24314 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24315 DIP("bc %x", instr_index
& 0x3FFFFFF);
24318 t0
= newTemp(Ity_I64
);
24319 assign(t0
, mkU64(guest_PC_curr_instr
+
24320 ((extend_s_26to64(instr_index
& 0x3FFFFFF) + 1 ) << 2)));
24322 t0
= newTemp(Ity_I32
);
24323 assign(t0
, mkU32(guest_PC_curr_instr
+
24324 ((extend_s_26to32(instr_index
& 0x3FFFFFF) + 1) << 2)));
24328 dres
->whatNext
= Dis_StopHere
;
24329 dres
->jk_StopHere
= Ijk_Boring
;
24331 ILLEGAL_INSTRUCTON
;
24340 case 0x03: /* PREF */
24344 case 0x04: /* Load Linked Doubleword - LLD; MIPS64 */
24345 DIP("lld r%u, %u(r%u)", rt
, imm
, rs
);
24348 LOAD_STORE_PATTERN
;
24349 t2
= newTemp(Ity_I64
);
24351 if (abiinfo
->guest__use_fallback_LLSC
) {
24352 assign(t2
, load(Ity_I64
, mkexpr(t1
)));
24353 putLLaddr(mkexpr(t1
));
24354 putLLdata(mkexpr(t2
));
24356 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
), NULL
));
24359 putIReg(rt
, mkexpr(t2
));
24366 case 0x05: /* Load Doubleword to Floating Point - LDC1 (MIPS32) */
24367 DIP("ldc1 f%u, %u(%u)", rt
, imm
, rs
);
24368 LOAD_STORE_PATTERN
;
24369 putDReg(ft
, load(Ity_F64
, mkexpr(t1
)));
24372 case 0x06: /* Branch on Bit Clear Plus 32 - BBIT032; Cavium OCTEON */
24374 /* Cavium Specific instructions. */
24375 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24376 DIP("bbit032 r%u, 0x%x, %x", rs
, rt
, imm
);
24377 t0
= newTemp(Ity_I64
);
24378 t1
= newTemp(Ity_I8
); /* Shift. */
24379 t2
= newTemp(Ity_I64
);
24380 assign(t0
, mkU64(0x1));
24381 assign(t1
, binop(Iop_Add8
, mkU8(rt
), mkU8(32)));
24382 assign(t2
, binop(Iop_Shl64
, mkexpr(t0
), mkexpr(t1
)));
24383 dis_branch(False
, binop(Iop_CmpEQ64
,
24389 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24390 if (rs
== 0) { /* JIC */
24391 DIP("jic r%u, %u", rt
, instr_index
& 0xFFFF);
24394 t0
= newTemp(Ity_I64
);
24395 assign(t0
, binop(Iop_Add64
, getIReg(rt
),
24396 mkU64(extend_s_16to64((instr_index
& 0xFFFF)))));
24398 t0
= newTemp(Ity_I32
);
24399 assign(t0
, binop(Iop_Add32
, getIReg(rt
),
24400 mkU32(extend_s_16to32((instr_index
& 0xFFFF)))));
24404 dres
->whatNext
= Dis_StopHere
;
24405 dres
->jk_StopHere
= Ijk_Boring
;
24406 } else { /* BEQZC */
24407 DIP("beqzc r%u, %u", rs
, imm
);
24408 dres
->jk_StopHere
= Ijk_Boring
;
24409 dres
->whatNext
= Dis_StopHere
;
24410 ULong branch_offset
;
24411 t0
= newTemp(Ity_I1
);
24414 branch_offset
= extend_s_23to64((instr_index
& 0x1fffff) << 2);
24415 assign(t0
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0x0)));
24416 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24417 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
24419 putPC(mkU64(guest_PC_curr_instr
+ 4));
24421 branch_offset
= extend_s_23to32((instr_index
& 0x1fffff) << 2);
24422 assign(t0
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0x0)));
24423 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24424 IRConst_U32(guest_PC_curr_instr
+ 4 +
24425 (UInt
) branch_offset
), OFFB_PC
));
24426 putPC(mkU32(guest_PC_curr_instr
+ 4));
24435 case 0x07: /* Load Doubleword - LD; MIPS64 */
24436 DIP("ld r%u, %u(r%u)", rt
, imm
, rs
);
24437 LOAD_STORE_PATTERN
;
24438 putIReg(rt
, load(Ity_I64
, mkexpr(t1
)));
24441 case 0x08: /* SC */
24442 DIP("sc r%u, %u(r%u)", rt
, imm
, rs
);
24443 t2
= newTemp(Ity_I1
);
24444 LOAD_STORE_PATTERN
;
24446 if (abiinfo
->guest__use_fallback_LLSC
) {
24447 t3
= newTemp(Ity_I32
);
24448 assign(t2
, binop(mode64
? Iop_CmpNE64
: Iop_CmpNE32
,
24449 mkexpr(t1
), getLLaddr()));
24450 assign(t3
, mkNarrowTo32(ty
, getIReg(rt
)));
24451 putLLaddr(LLADDR_INVALID
);
24452 putIReg(rt
, getIReg(0));
24454 mips_next_insn_if(mkexpr(t2
));
24456 t4
= newTemp(Ity_I32
);
24457 t5
= newTemp(Ity_I32
);
24459 assign(t5
, mkNarrowTo32(ty
, getLLdata()));
24461 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
24462 MIPS_IEND
, mkexpr(t1
), /* addr */
24463 NULL
, mkexpr(t5
), /* expected value */
24464 NULL
, mkexpr(t3
) /* new value */)));
24466 putIReg(rt
, unop(mode64
? Iop_1Uto64
: Iop_1Uto32
,
24467 binop(Iop_CmpEQ32
, mkexpr(t4
), mkexpr(t5
))));
24469 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
),
24470 mkNarrowTo32(ty
, getIReg(rt
))));
24471 putIReg(rt
, unop(mode64
? Iop_1Uto64
: Iop_1Uto32
, mkexpr(t2
)));
24476 case 0x09: /* SWC1 */
24477 DIP("swc1 f%u, %u(r%u)", ft
, imm
, rs
);
24480 t0
= newTemp(Ity_I64
);
24481 t2
= newTemp(Ity_I32
);
24482 LOAD_STORE_PATTERN
;
24483 assign(t0
, unop(Iop_ReinterpF64asI64
, getFReg(ft
)));
24484 assign(t2
, unop(Iop_64to32
, mkexpr(t0
)));
24485 store(mkexpr(t1
), unop(Iop_ReinterpI32asF32
, mkexpr(t2
)));
24487 LOAD_STORE_PATTERN
;
24488 store(mkexpr(t1
), getFReg(ft
));
24493 case 0x0A: /* Branch on Bit Set - BBIT1; Cavium OCTEON */
24495 /* Cavium Specific instructions. */
24496 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24497 DIP("bbit1 r%u, 0x%x, %x", rs
, rt
, imm
);
24498 t0
= newTemp(Ity_I32
);
24499 t1
= newTemp(Ity_I32
);
24500 assign(t0
, mkU32(0x1));
24501 assign(t1
, binop(Iop_Shl32
, mkexpr(t0
), mkU8(rt
)));
24502 dis_branch(False
, binop(Iop_CmpNE32
,
24505 mkNarrowTo32(ty
, getIReg(rs
))),
24508 } else if (archinfo
->hwcaps
& VEX_MIPS_CPU_ISA_M32R6
) {/* BALC */
24509 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24510 DIP("balc %x", instr_index
& 0x3FFFFFF);
24513 t0
= newTemp(Ity_I64
);
24514 assign(t0
, mkU64(guest_PC_curr_instr
+ ((extend_s_26to64(
24515 instr_index
& 0x3FFFFFF) + 1) << 2)));
24516 putIReg(31, mkU64(guest_PC_curr_instr
+ 4));
24518 t0
= newTemp(Ity_I32
);
24519 assign(t0
, mkU32(guest_PC_curr_instr
+ ((extend_s_26to32(
24520 instr_index
& 0x3FFFFFF) + 1) << 2)));
24521 putIReg(31, mkU32(guest_PC_curr_instr
+ 4));
24525 dres
->whatNext
= Dis_StopHere
;
24526 dres
->jk_StopHere
= Ijk_Call
;
24528 ILLEGAL_INSTRUCTON
;
24537 case 0x0B: /* PCREL */
24538 if (rt
== 0x1E) { /* AUIPC */
24539 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24540 DIP("auipc r%u, %u", rs
, imm
);
24543 putIReg(rs
, mkU64(guest_PC_curr_instr
+ (imm
<< 16)));
24545 putIReg(rs
, mkU32(guest_PC_curr_instr
+ (imm
<< 16)));
24548 ILLEGAL_INSTRUCTON
;
24552 } else if (rt
== 0x1F) { /* ALUIPC */
24553 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24554 DIP("aluipc r%u, %u", rs
, imm
);
24557 putIReg(rs
, mkU64((~0x0FFFFULL
) &
24558 (guest_PC_curr_instr
+ extend_s_32to64(imm
<< 16))));
24560 putIReg(rs
, mkU32((~0x0FFFFULL
) &
24561 (guest_PC_curr_instr
+ (imm
<< 16))));
24564 ILLEGAL_INSTRUCTON
;
24568 } else if ((rt
& 0x18) == 0) { /* ADDIUPC */
24569 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24570 DIP("addiupc r%u, %u", rs
, instr_index
& 0x7FFFF);
24573 putIReg(rs
, mkU64(guest_PC_curr_instr
+
24574 (extend_s_19to64(instr_index
& 0x7FFFF) << 2)));
24576 putIReg(rs
, mkU32(guest_PC_curr_instr
+
24577 (extend_s_19to32(instr_index
& 0x7FFFF) << 2)));
24580 ILLEGAL_INSTRUCTON
;
24584 } else if ((rt
& 0x18) == 8) { /* LWPC */
24585 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24586 DIP("lwpc r%u, %x", rs
, instr_index
& 0x7FFFF);
24589 t1
= newTemp(Ity_I64
);
24590 assign(t1
, mkU64(guest_PC_curr_instr
+
24591 (extend_s_19to64(instr_index
& 0x7FFFF) << 2)));
24592 putIReg(rs
, unop(Iop_32Sto64
, load(Ity_I32
, mkexpr(t1
))));
24594 t1
= newTemp(Ity_I32
);
24595 assign(t1
, mkU32(guest_PC_curr_instr
+
24596 (extend_s_19to32(instr_index
& 0x7FFFF) << 2)));
24597 putIReg(rs
, load(Ity_I32
, mkexpr(t1
)));
24600 ILLEGAL_INSTRUCTON
;
24604 } else if ((rt
& 0x18) == 16) { /* LWUPC */
24605 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24606 DIP("lwupc r%u, %x", rs
, instr_index
& 0x7FFFF);
24609 t1
= newTemp(Ity_I64
);
24610 assign(t1
, mkU64(guest_PC_curr_instr
+
24611 (extend_s_19to64(instr_index
& 0x7FFFF) << 2)));
24612 putIReg(rs
, unop(Iop_32Uto64
, load(Ity_I32
, mkexpr(t1
))));
24614 t1
= newTemp(Ity_I32
);
24615 assign(t1
, mkU32(guest_PC_curr_instr
+
24616 (extend_s_19to32(instr_index
& 0x7FFFF) << 2)));
24617 putIReg(rs
, load(Ity_I32
, mkexpr(t1
)));
24624 } else if ((rt
& 0x1C) == 0x18) { /* LDPC */
24625 if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24626 DIP("ldpc r%u, %x", rs
, instr_index
& 0x3FFFF);
24627 t1
= newTemp(Ity_I64
);
24628 assign(t1
, mkU64(guest_PC_curr_instr
+
24629 (extend_s_18to64(instr_index
& 0x3FFFF) << 3)));
24630 putIReg(rs
, load(Ity_I64
, mkexpr(t1
)));
24640 if (0x3B == function
&&
24641 (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_BROADCOM
)) {
24643 DIP("rdhwr r%u, r%u", rt
, rd
);
24646 putIReg(rt
, getULR());
24655 case 0x0C: /* Store Conditional Doubleword - SCD; MIPS64 */
24656 DIP("scd r%u, %u(r%u)", rt
, imm
, rs
);
24659 t2
= newTemp(Ity_I1
);
24660 LOAD_STORE_PATTERN
;
24662 if (abiinfo
->guest__use_fallback_LLSC
) {
24663 t3
= newTemp(Ity_I64
);
24664 assign(t2
, binop(Iop_CmpNE64
, mkexpr(t1
), getLLaddr()));
24665 assign(t3
, getIReg(rt
));
24666 putLLaddr(LLADDR_INVALID
);
24667 putIReg(rt
, getIReg(0));
24669 mips_next_insn_if(mkexpr(t2
));
24671 t4
= newTemp(Ity_I64
);
24672 t5
= newTemp(Ity_I64
);
24674 assign(t5
, getLLdata());
24676 stmt(IRStmt_CAS(mkIRCAS(IRTemp_INVALID
, t4
, /* old_mem */
24677 MIPS_IEND
, mkexpr(t1
), /* addr */
24678 NULL
, mkexpr(t5
), /* expected value */
24679 NULL
, mkexpr(t3
) /* new value */)));
24681 putIReg(rt
, unop(Iop_1Uto64
,
24682 binop(Iop_CmpEQ64
, mkexpr(t4
), mkexpr(t5
))));
24684 stmt(IRStmt_LLSC(MIPS_IEND
, t2
, mkexpr(t1
), getIReg(rt
)));
24685 putIReg(rt
, unop(Iop_1Uto64
, mkexpr(t2
)));
24693 case 0x0D: /* Store Doubleword from Floating Point - SDC1 */
24694 DIP("sdc1 f%u, %u(%u)", ft
, imm
, rs
);
24695 LOAD_STORE_PATTERN
;
24696 store(mkexpr(t1
), getDReg(ft
));
24699 case 0x0E: /* Branch on Bit Set Plus 32 - BBIT132; Cavium OCTEON */
24701 /* Cavium Specific instructions. */
24702 if (VEX_MIPS_COMP_ID(archinfo
->hwcaps
) == VEX_PRID_COMP_CAVIUM
) {
24703 DIP("bbit132 r%u, 0x%x, %x", rs
, rt
, imm
);
24704 t0
= newTemp(Ity_I64
);
24705 t1
= newTemp(Ity_I8
); /* Shift. */
24706 t2
= newTemp(Ity_I64
);
24707 assign(t0
, mkU64(0x1));
24708 assign(t1
, binop(Iop_Add8
, mkU8(rt
), mkU8(32)));
24709 assign(t2
, binop(Iop_Shl64
, mkexpr(t0
), mkexpr(t1
)));
24710 dis_branch(False
, binop(Iop_CmpNE64
,
24716 } else if (VEX_MIPS_CPU_HAS_MIPSR6(archinfo
->hwcaps
)) {
24717 if (rs
== 0) {/* JIALC */
24718 DIP("jialc r%u, %u", rt
, instr_index
& 0xFFFF);
24723 t0
= newTemp(Ity_I64
);
24724 assign(t0
, binop(Iop_Add64
, getIReg(rt
),
24725 mkU64(extend_s_16to64((instr_index
& 0xFFFF)))));
24726 putIReg(31, mkU64(guest_PC_curr_instr
+ 4));
24728 t0
= newTemp(Ity_I32
);
24729 assign(t0
, binop(Iop_Add32
, getIReg(rt
),
24730 mkU32(extend_s_16to32((instr_index
& 0xFFFF)))));
24731 putIReg(31, mkU32(guest_PC_curr_instr
+ 4));
24735 dres
->whatNext
= Dis_StopHere
;
24736 dres
->jk_StopHere
= Ijk_Call
;
24737 } else { /* BNEZC */
24738 DIP("bnezc r%u, %u", rs
, imm
);
24739 dres
->jk_StopHere
= Ijk_Boring
;
24740 dres
->whatNext
= Dis_StopHere
;
24741 ULong branch_offset
;
24742 t0
= newTemp(Ity_I1
);
24745 branch_offset
= extend_s_23to64((instr_index
& 0x1fffff) << 2);
24746 assign(t0
, unop(Iop_Not1
, binop(Iop_CmpEQ64
, getIReg(rs
), mkU64(0x0))));
24747 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24748 IRConst_U64(guest_PC_curr_instr
+ 4 + branch_offset
),
24750 putPC(mkU64(guest_PC_curr_instr
+ 4));
24752 branch_offset
= extend_s_23to32((instr_index
& 0x1fffff) << 2);
24753 assign(t0
, unop(Iop_Not1
, binop(Iop_CmpEQ32
, getIReg(rs
), mkU32(0x0))));
24754 stmt(IRStmt_Exit(mkexpr(t0
), Ijk_Boring
,
24755 IRConst_U32(guest_PC_curr_instr
+ 4 +
24756 (UInt
) branch_offset
), OFFB_PC
));
24757 putPC(mkU32(guest_PC_curr_instr
+ 4));
24766 case 0x0F: /* Store Doubleword - SD; MIPS64 */
24767 DIP("sd r%u, %u(r%u)", rt
, imm
, rs
);
24768 LOAD_STORE_PATTERN
;
24769 store(mkexpr(t1
), getIReg(rt
));
24779 static DisResult
disInstr_MIPS_WRK ( Long delta64
,
24780 const VexArchInfo
* archinfo
,
24781 const VexAbiInfo
* abiinfo
,
24785 UInt opcode
, cins
, result
;
24789 static IRExpr
*lastn
= NULL
; /* last jump addr */
24790 static IRStmt
*bstmt
= NULL
; /* branch (Exit) stmt */
24792 /* The running delta */
24793 Int delta
= (Int
) delta64
;
24795 /* Holds eip at the start of the insn, so that we can print
24796 consistent error messages for unimplemented insns. */
24797 Int delta_start
= delta
;
24799 /* Are we in a delay slot ? */
24800 Bool delay_slot_branch
, likely_delay_slot
, delay_slot_jump
;
24802 /* Set result defaults. */
24803 dres
.whatNext
= Dis_Continue
;
24805 dres
.jk_StopHere
= Ijk_INVALID
;
24806 dres
.hint
= Dis_HintNone
;
24808 delay_slot_branch
= likely_delay_slot
= delay_slot_jump
= False
;
24810 const UChar
*code
= guest_code
+ delta
;
24811 cins
= getUInt(code
);
24812 opcode
= get_opcode(cins
);
24813 DIP("\t0x%llx:\t0x%08x\t", (Addr64
)guest_PC_curr_instr
, cins
);
24816 if (branch_or_jump(guest_code
+ delta
- 4)
24817 && (lastn
!= NULL
|| bstmt
!= NULL
)) {
24818 dres
.whatNext
= Dis_StopHere
;
24819 delay_slot_jump
= (lastn
!= NULL
);
24820 delay_slot_branch
= (bstmt
!= NULL
);
24823 likely_delay_slot
= (lastn
!= NULL
)
24824 && branch_or_link_likely(guest_code
+ delta
- 4);
24827 // Emit an Illegal instruction in case a branch/jump
24828 // instruction is encountered in the delay slot
24829 // of an another branch/jump
24830 if ((delay_slot_branch
|| likely_delay_slot
|| delay_slot_jump
) &&
24831 (branch_or_jump(guest_code
+ delta
) ||
24832 branch_or_link_likely(guest_code
+ delta
))) {
24834 putPC(mkU64(guest_PC_curr_instr
+ 4));
24836 putPC(mkU32(guest_PC_curr_instr
+ 4));
24838 dres
.jk_StopHere
= Ijk_SigILL
;
24839 dres
.whatNext
= Dis_StopHere
;
24845 /* Spot "Special" instructions (see comment at top of file). */
24847 /* Spot the 16-byte preamble:
24860 UInt word1
= mode64
? 0xF8 : 0x342;
24861 UInt word2
= mode64
? 0x378 : 0x742;
24862 UInt word3
= mode64
? 0x778 : 0xC2;
24863 UInt word4
= mode64
? 0x4F8 : 0x4C2;
24865 if (getUInt(code
+ 0) == word1
&& getUInt(code
+ 4) == word2
&&
24866 getUInt(code
+ 8) == word3
&& getUInt(code
+ 12) == word4
) {
24867 /* Got a "Special" instruction preamble. Which one is it? */
24868 if (getUInt(code
+ 16) == 0x01ad6825 /* or $13, $13, $13 */ ) {
24869 /* $11 = client_request ( $12 ) */
24870 DIP("$11 = client_request ( $12 )");
24873 putPC(mkU64(guest_PC_curr_instr
+ 20));
24875 putPC(mkU32(guest_PC_curr_instr
+ 20));
24877 dres
.jk_StopHere
= Ijk_ClientReq
;
24878 dres
.whatNext
= Dis_StopHere
;
24880 goto decode_success
;
24881 } else if (getUInt(code
+ 16) == 0x01ce7025 /* or $14, $14, $14 */ ) {
24882 /* $11 = guest_NRADDR */
24883 DIP("$11 = guest_NRADDR");
24888 putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS64State
,
24889 guest_NRADDR
), Ity_I64
));
24891 putIReg(11, IRExpr_Get(offsetof(VexGuestMIPS32State
,
24892 guest_NRADDR
), Ity_I32
));
24894 goto decode_success
;
24895 } else if (getUInt(code
+ 16) == 0x01ef7825 /* or $15, $15, $15 */ ) {
24896 /* branch-and-link-to-noredir $25 */
24897 DIP("branch-and-link-to-noredir $25");
24900 putIReg(31, mkU64(guest_PC_curr_instr
+ 20));
24902 putIReg(31, mkU32(guest_PC_curr_instr
+ 20));
24904 putPC(getIReg(25));
24905 dres
.jk_StopHere
= Ijk_NoRedir
;
24906 dres
.whatNext
= Dis_StopHere
;
24907 goto decode_success
;
24908 } else if (getUInt(code
+ 16) == 0x016b5825 /* or $11,$11,$11 */ ) {
24910 DIP("IR injection");
24911 #if defined (_MIPSEL)
24912 vex_inject_ir(irsb
, Iend_LE
);
24913 #elif defined (_MIPSEB)
24914 vex_inject_ir(irsb
, Iend_BE
);
24918 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_CMSTART
),
24919 mkU64(guest_PC_curr_instr
)));
24920 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_CMLEN
),
24923 putPC(mkU64(guest_PC_curr_instr
+ 20));
24925 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_CMSTART
),
24926 mkU32(guest_PC_curr_instr
)));
24927 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_CMLEN
),
24930 putPC(mkU32(guest_PC_curr_instr
+ 20));
24933 dres
.whatNext
= Dis_StopHere
;
24934 dres
.jk_StopHere
= Ijk_InvalICache
;
24937 goto decode_success
;
24940 /* We don't know what it is. Set opc1/opc2 so decode_failure
24941 can print the insn following the Special-insn preamble. */
24943 goto decode_failure
;
24948 switch (opcode
& 0x30) {
24950 result
= disInstr_MIPS_WRK_00(cins
, archinfo
, abiinfo
,
24951 &dres
, &bstmt
, &lastn
);
24953 if (result
== -1) goto decode_failure
;
24955 if (result
== -2) goto decode_failure_dsp
;
24961 result
= disInstr_MIPS_WRK_10(cins
, archinfo
, abiinfo
,
24962 &dres
, &bstmt
, &lastn
);
24964 if (result
== -1) goto decode_failure
;
24966 if (result
== -2) goto decode_failure_dsp
;
24971 result
= disInstr_MIPS_WRK_20(cins
);
24973 if (result
== -1) goto decode_failure
;
24975 if (result
== -2) goto decode_failure_dsp
;
24980 result
= disInstr_MIPS_WRK_30(cins
, archinfo
, abiinfo
, &dres
, &bstmt
);
24982 if (result
== -1) goto decode_failure
;
24984 if (result
== -2) goto decode_failure_dsp
;
24988 decode_failure_dsp
:
24989 vex_printf("Error occured while trying to decode MIPS32 DSP "
24990 "instruction.\nYour platform probably doesn't support "
24991 "MIPS32 DSP ASE.\n");
24994 /* All decode failures end up here. */
24996 vex_printf("vex mips->IR: unhandled instruction bytes: "
24997 "0x%x 0x%x 0x%x 0x%x\n",
24998 (UInt
) getIByte(delta_start
+ 0),
24999 (UInt
) getIByte(delta_start
+ 1),
25000 (UInt
) getIByte(delta_start
+ 2),
25001 (UInt
) getIByte(delta_start
+ 3));
25003 /* Tell the dispatcher that this insn cannot be decoded, and so has
25004 not been executed, and (is currently) the next to be executed.
25005 EIP should be up-to-date since it made so at the start bnezof each
25006 insn, but nevertheless be paranoid and update it again right
25009 stmt(IRStmt_Put(offsetof(VexGuestMIPS64State
, guest_PC
),
25010 mkU64(guest_PC_curr_instr
)));
25011 jmp_lit64(&dres
, Ijk_NoDecode
, guest_PC_curr_instr
);
25013 stmt(IRStmt_Put(offsetof(VexGuestMIPS32State
, guest_PC
),
25014 mkU32(guest_PC_curr_instr
)));
25015 jmp_lit32(&dres
, Ijk_NoDecode
, guest_PC_curr_instr
);
25018 dres
.whatNext
= Dis_StopHere
;
25021 } /* switch (opc) for the main (primary) opcode switch. */
25023 /* All MIPS insn have 4 bytes */
25025 if (delay_slot_branch
) {
25026 delay_slot_branch
= False
;
25031 putPC(mkU64(guest_PC_curr_instr
+ 4));
25033 putPC(mkU32(guest_PC_curr_instr
+ 4));
25035 dres
.jk_StopHere
= is_Branch_or_Jump_and_Link(guest_code
+ delta
- 4) ?
25036 Ijk_Call
: Ijk_Boring
;
25039 if (likely_delay_slot
) {
25040 dres
.jk_StopHere
= Ijk_Boring
;
25041 dres
.whatNext
= Dis_StopHere
;
25046 if (delay_slot_jump
) {
25049 dres
.jk_StopHere
= is_Branch_or_Jump_and_Link(guest_code
+ delta
- 4) ?
25050 Ijk_Call
: Ijk_Boring
;
25058 /* All decode successes end up here. */
25059 switch (dres
.whatNext
) {
25061 if (branch_or_jump(guest_code
+ delta
) ||
25062 branch_or_link_likely(guest_code
+ delta
)) {
25063 guest_PC_curr_instr
+= 4;
25064 dres
= disInstr_MIPS_WRK(delta64
+ 4, archinfo
, abiinfo
, sigill_diag
);
25068 putPC(mkU64(guest_PC_curr_instr
+ 4));
25070 putPC(mkU32(guest_PC_curr_instr
+ 4));
25082 /* On MIPS we need to check if the last instruction in block is branch or
25084 if (((vex_control
.guest_max_insns
- 1) == (delta
+ 4) / 4)
25085 && (dres
.whatNext
!= Dis_StopHere
))
25086 if (branch_or_jump(guest_code
+ delta
+ 4)) {
25087 dres
.whatNext
= Dis_StopHere
;
25088 dres
.jk_StopHere
= Ijk_Boring
;
25090 putPC(mkU64(guest_PC_curr_instr
+ 4));
25092 putPC(mkU32(guest_PC_curr_instr
+ 4));
25099 /*------------------------------------------------------------*/
25100 /*--- Top-level fn ---*/
25101 /*------------------------------------------------------------*/
25103 /* Disassemble a single instruction into IR. The instruction
25104 is located in host memory at &guest_code[delta]. */
25105 DisResult
disInstr_MIPS( IRSB
* irsb_IN
,
25106 const UChar
* guest_code_IN
,
25109 VexArch guest_arch
,
25110 const VexArchInfo
* archinfo
,
25111 const VexAbiInfo
* abiinfo
,
25112 VexEndness host_endness_IN
,
25113 Bool sigill_diag_IN
)
25116 /* Set globals (see top of this file) */
25117 vassert(guest_arch
== VexArchMIPS32
|| guest_arch
== VexArchMIPS64
);
25119 mode64
= guest_arch
!= VexArchMIPS32
;
25120 fp_mode64
= abiinfo
->guest_mips_fp_mode
& 1;
25121 fp_mode64_fre
= abiinfo
->guest_mips_fp_mode
& 2;
25122 has_msa
= VEX_MIPS_PROC_MSA(archinfo
->hwcaps
);
25124 vassert(VEX_MIPS_HOST_FP_MODE(archinfo
->hwcaps
) >= fp_mode64
);
25126 guest_code
= guest_code_IN
;
25128 host_endness
= host_endness_IN
;
25129 #if defined(VGP_mips32_linux)
25130 guest_PC_curr_instr
= (Addr32
)guest_IP
;
25131 #elif defined(VGP_mips64_linux)
25132 guest_PC_curr_instr
= (Addr64
)guest_IP
;
25135 dres
= disInstr_MIPS_WRK(delta
, archinfo
, abiinfo
, sigill_diag_IN
);
25140 /*--------------------------------------------------------------------*/
25141 /*--- end guest_mips_toIR.c ---*/
25142 /*--------------------------------------------------------------------*/