2 /*---------------------------------------------------------------*/
3 /*--- begin host_mips_defs.c ---*/
4 /*---------------------------------------------------------------*/
7 This file is part of Valgrind, a dynamic binary instrumentation
10 Copyright (C) 2010-2017 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 #include "libvex_basictypes.h"
30 #include "libvex_trc_values.h"
32 #include "main_util.h"
33 #include "host_generic_regs.h"
34 #include "host_mips_defs.h"
36 /* Register number for guest state pointer in host code. */
40 /*---------------- Registers ----------------*/
42 const RRegUniverse
* getRRegUniverse_MIPS ( Bool mode64
)
44 /* The real-register universe is a big constant, so we just want to
45 initialise it once. rRegUniverse_MIPS_initted values: 0=not initted,
46 1=initted for 32-bit-mode, 2=initted for 64-bit-mode */
47 static RRegUniverse rRegUniverse_MIPS
;
48 static UInt rRegUniverse_MIPS_initted
= 0;
50 /* Handy shorthand, nothing more */
51 RRegUniverse
* ru
= &rRegUniverse_MIPS
;
53 /* This isn't thread-safe. Sigh. */
54 UInt howNeeded
= mode64
? 2 : 1;
55 if (LIKELY(rRegUniverse_MIPS_initted
== howNeeded
))
58 RRegUniverse__init(ru
);
60 /* Add the registers. The initial segment of this array must be
61 those available for allocation by reg-alloc, and those that
62 follow are not available for allocation. */
63 ru
->allocable_start
[(mode64
) ? HRcInt64
: HRcInt32
] = ru
->size
;
64 ru
->regs
[ru
->size
++] = hregMIPS_GPR16(mode64
);
65 ru
->regs
[ru
->size
++] = hregMIPS_GPR17(mode64
);
66 ru
->regs
[ru
->size
++] = hregMIPS_GPR18(mode64
);
67 ru
->regs
[ru
->size
++] = hregMIPS_GPR19(mode64
);
68 ru
->regs
[ru
->size
++] = hregMIPS_GPR20(mode64
);
69 ru
->regs
[ru
->size
++] = hregMIPS_GPR21(mode64
);
70 ru
->regs
[ru
->size
++] = hregMIPS_GPR22(mode64
);
72 ru
->regs
[ru
->size
++] = hregMIPS_GPR12(mode64
);
73 ru
->regs
[ru
->size
++] = hregMIPS_GPR13(mode64
);
74 ru
->regs
[ru
->size
++] = hregMIPS_GPR14(mode64
);
75 ru
->regs
[ru
->size
++] = hregMIPS_GPR15(mode64
);
76 ru
->regs
[ru
->size
++] = hregMIPS_GPR24(mode64
);
77 ru
->allocable_end
[(mode64
) ? HRcInt64
: HRcInt32
] = ru
->size
- 1;
79 /* s7 (=guest_state) */
80 ru
->allocable_start
[(mode64
) ? HRcFlt64
: HRcFlt32
] = ru
->size
;
81 ru
->regs
[ru
->size
++] = hregMIPS_F16(mode64
);
82 ru
->regs
[ru
->size
++] = hregMIPS_F18(mode64
);
83 ru
->regs
[ru
->size
++] = hregMIPS_F20(mode64
);
84 ru
->regs
[ru
->size
++] = hregMIPS_F22(mode64
);
85 ru
->regs
[ru
->size
++] = hregMIPS_F24(mode64
);
86 ru
->regs
[ru
->size
++] = hregMIPS_F26(mode64
);
87 ru
->regs
[ru
->size
++] = hregMIPS_F28(mode64
);
88 ru
->regs
[ru
->size
++] = hregMIPS_F30(mode64
);
89 ru
->allocable_end
[(mode64
) ? HRcFlt64
: HRcFlt32
] = ru
->size
- 1;
91 ru
->allocable_start
[HRcVec128
] = ru
->size
;
92 ru
->regs
[ru
->size
++] = hregMIPS_W16(mode64
);
93 ru
->regs
[ru
->size
++] = hregMIPS_W17(mode64
);
94 ru
->regs
[ru
->size
++] = hregMIPS_W18(mode64
);
95 ru
->regs
[ru
->size
++] = hregMIPS_W19(mode64
);
96 ru
->regs
[ru
->size
++] = hregMIPS_W20(mode64
);
97 ru
->regs
[ru
->size
++] = hregMIPS_W21(mode64
);
98 ru
->regs
[ru
->size
++] = hregMIPS_W22(mode64
);
99 ru
->regs
[ru
->size
++] = hregMIPS_W23(mode64
);
100 ru
->regs
[ru
->size
++] = hregMIPS_W24(mode64
);
101 ru
->regs
[ru
->size
++] = hregMIPS_W25(mode64
);
102 ru
->regs
[ru
->size
++] = hregMIPS_W26(mode64
);
103 ru
->regs
[ru
->size
++] = hregMIPS_W27(mode64
);
104 ru
->regs
[ru
->size
++] = hregMIPS_W28(mode64
);
105 ru
->regs
[ru
->size
++] = hregMIPS_W29(mode64
);
106 ru
->regs
[ru
->size
++] = hregMIPS_W30(mode64
);
107 ru
->regs
[ru
->size
++] = hregMIPS_W31(mode64
);
108 ru
->allocable_end
[HRcVec128
] = ru
->size
- 1;
111 /* Fake double floating point */
112 ru
->allocable_start
[HRcFlt64
] = ru
->size
;
113 ru
->regs
[ru
->size
++] = hregMIPS_D0(mode64
);
114 ru
->regs
[ru
->size
++] = hregMIPS_D1(mode64
);
115 ru
->regs
[ru
->size
++] = hregMIPS_D2(mode64
);
116 ru
->regs
[ru
->size
++] = hregMIPS_D3(mode64
);
117 ru
->regs
[ru
->size
++] = hregMIPS_D4(mode64
);
118 ru
->regs
[ru
->size
++] = hregMIPS_D5(mode64
);
119 ru
->regs
[ru
->size
++] = hregMIPS_D6(mode64
);
120 ru
->regs
[ru
->size
++] = hregMIPS_D7(mode64
);
121 ru
->allocable_end
[HRcFlt64
] = ru
->size
- 1;
124 ru
->allocable
= ru
->size
;
125 /* And other regs, not available to the allocator. */
127 ru
->regs
[ru
->size
++] = hregMIPS_HI(mode64
);
128 ru
->regs
[ru
->size
++] = hregMIPS_LO(mode64
);
129 ru
->regs
[ru
->size
++] = hregMIPS_GPR0(mode64
);
130 ru
->regs
[ru
->size
++] = hregMIPS_GPR1(mode64
);
131 ru
->regs
[ru
->size
++] = hregMIPS_GPR2(mode64
);
132 ru
->regs
[ru
->size
++] = hregMIPS_GPR3(mode64
);
133 ru
->regs
[ru
->size
++] = hregMIPS_GPR4(mode64
);
134 ru
->regs
[ru
->size
++] = hregMIPS_GPR5(mode64
);
135 ru
->regs
[ru
->size
++] = hregMIPS_GPR6(mode64
);
136 ru
->regs
[ru
->size
++] = hregMIPS_GPR7(mode64
);
137 ru
->regs
[ru
->size
++] = hregMIPS_GPR8(mode64
);
138 ru
->regs
[ru
->size
++] = hregMIPS_GPR9(mode64
);
139 ru
->regs
[ru
->size
++] = hregMIPS_GPR10(mode64
);
140 ru
->regs
[ru
->size
++] = hregMIPS_GPR11(mode64
);
141 ru
->regs
[ru
->size
++] = hregMIPS_GPR23(mode64
);
142 ru
->regs
[ru
->size
++] = hregMIPS_GPR25(mode64
);
143 ru
->regs
[ru
->size
++] = hregMIPS_GPR29(mode64
);
144 ru
->regs
[ru
->size
++] = hregMIPS_GPR31(mode64
);
146 rRegUniverse_MIPS_initted
= howNeeded
;
148 RRegUniverse__check_is_sane(ru
);
153 UInt
ppHRegMIPS(HReg reg
, Bool mode64
)
156 static const HChar
*ireg32_names
[35]
157 = { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
158 "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
159 "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
160 "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
164 static const HChar
*freg32_names
[32]
165 = { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
166 "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
167 "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
168 "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31"
171 static const HChar
*freg64_names
[32]
172 = { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7",
173 "$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15",
176 static const HChar
*fvec128_names
[32]
177 = { "$w0", "$w1", "$w2", "$w3", "$w4", "$w5", "$w6", "$w7",
178 "$w8", "$w9", "$w10", "$w11", "$w12", "$w13", "$w14", "$w15",
179 "$w16", "$w17", "$w18", "$w19", "$w20", "$w21", "$w22", "$w23",
180 "$w24", "$w24", "$w26", "$w27", "$w28", "$w29", "$w30", "$w31"
183 /* Be generic for all virtual regs. */
184 if (hregIsVirtual(reg
)) {
188 /* But specific for real regs. */
189 vassert(hregClass(reg
) == HRcInt32
|| hregClass(reg
) == HRcInt64
||
190 hregClass(reg
) == HRcFlt32
|| hregClass(reg
) == HRcFlt64
||
191 hregClass(reg
) == HRcVec128
);
193 /* But specific for real regs. */
194 switch (hregClass(reg
)) {
196 r
= hregEncoding(reg
);
197 vassert(r
>= 0 && r
< 32);
198 return vex_printf("%s", ireg32_names
[r
]);
200 r
= hregEncoding (reg
);
201 vassert (r
>= 0 && r
< 32);
202 return vex_printf ("%s", ireg32_names
[r
]);
204 r
= hregEncoding(reg
);
205 vassert(r
>= 0 && r
< 32);
206 return vex_printf("%s", freg32_names
[r
]);
208 r
= hregEncoding(reg
);
209 vassert(r
>= 0 && r
< 32);
210 return vex_printf("%s", freg64_names
[r
]);
212 r
= hregEncoding(reg
);
213 vassert(r
>= 0 && r
< 32);
214 return vex_printf("%s", fvec128_names
[r
]);
216 vpanic("ppHRegMIPS");
222 /*----------------- Condition Codes ----------------------*/
224 const HChar
*showMIPSCondCode(MIPSCondCode cond
)
229 ret
= "EQ"; /* equal */
232 ret
= "NEQ"; /* not equal */
235 ret
= "GE"; /* >=u (Greater Than or Equal) */
238 ret
= "LT"; /* <u (lower) */
241 ret
= "MI"; /* minus (negative) */
244 ret
= "PL"; /* plus (zero or +ve) */
247 ret
= "VS"; /* overflow */
250 ret
= "VC"; /* no overflow */
253 ret
= "HI"; /* >u (higher) */
256 ret
= "LS"; /* <=u (lower or same) */
259 ret
= "GE"; /* >=s (signed greater or equal) */
262 ret
= "LT"; /* <s (signed less than) */
265 ret
= "GT"; /* >s (signed greater) */
268 ret
= "LE"; /* <=s (signed less or equal) */
271 ret
= "AL"; /* always (unconditional) */
274 ret
= "NV"; /* never (unconditional): */
277 vpanic("showMIPSCondCode");
283 const HChar
*showMIPSFpOp(MIPSFpOp op
)
419 #if (__mips_isa_rev >= 6)
471 vex_printf("Unknown op: %d", (Int
)op
);
472 vpanic("showMIPSFpOp");
478 /* Show move from/to fpr to/from gpr */
479 const HChar
* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op
)
486 case MFpGpMove_dmfc1
:
492 case MFpGpMove_dmtc1
:
496 vpanic("showMIPSFpGpMoveOp");
502 /* Show floating point move conditional */
503 const HChar
* showMIPSMoveCondOp ( MIPSMoveCondOp op
)
507 case MFpMoveCond_movns
:
510 case MFpMoveCond_movnd
:
529 vpanic("showMIPSFpMoveCondOp");
535 /* --------- MIPSAMode: memory address expressions. --------- */
537 MIPSAMode
*MIPSAMode_IR(Int idx
, HReg base
)
539 MIPSAMode
*am
= LibVEX_Alloc_inline(sizeof(MIPSAMode
));
541 am
->Mam
.IR
.base
= base
;
542 am
->Mam
.IR
.index
= idx
;
547 MIPSAMode
*MIPSAMode_RR(HReg idx
, HReg base
)
549 MIPSAMode
*am
= LibVEX_Alloc_inline(sizeof(MIPSAMode
));
551 am
->Mam
.RR
.base
= base
;
552 am
->Mam
.RR
.index
= idx
;
557 MIPSAMode
*dopyMIPSAMode(MIPSAMode
* am
)
562 ret
= MIPSAMode_IR(am
->Mam
.IR
.index
, am
->Mam
.IR
.base
);
565 ret
= MIPSAMode_RR(am
->Mam
.RR
.index
, am
->Mam
.RR
.base
);
568 vpanic("dopyMIPSAMode");
574 MIPSAMode
*nextMIPSAModeFloat(MIPSAMode
* am
)
579 ret
= MIPSAMode_IR(am
->Mam
.IR
.index
+ 4, am
->Mam
.IR
.base
);
582 /* We can't do anything with the RR case, so if it appears
583 we simply have to give up. */
586 vpanic("nextMIPSAModeFloat");
592 MIPSAMode
*nextMIPSAModeInt(MIPSAMode
* am
)
597 ret
= MIPSAMode_IR(am
->Mam
.IR
.index
+ 4, am
->Mam
.IR
.base
);
600 /* We can't do anything with the RR case, so if it appears
601 we simply have to give up. */
604 vpanic("nextMIPSAModeInt");
610 void ppMIPSAMode(MIPSAMode
* am
, Bool mode64
)
614 if (am
->Mam
.IR
.index
== 0)
617 vex_printf("%d(", (Int
) am
->Mam
.IR
.index
);
618 ppHRegMIPS(am
->Mam
.IR
.base
, mode64
);
622 ppHRegMIPS(am
->Mam
.RR
.base
, mode64
);
624 ppHRegMIPS(am
->Mam
.RR
.index
, mode64
);
627 vpanic("ppMIPSAMode");
632 static void addRegUsage_MIPSAMode(HRegUsage
* u
, MIPSAMode
* am
)
636 addHRegUse(u
, HRmRead
, am
->Mam
.IR
.base
);
639 addHRegUse(u
, HRmRead
, am
->Mam
.RR
.base
);
640 addHRegUse(u
, HRmRead
, am
->Mam
.RR
.index
);
643 vpanic("addRegUsage_MIPSAMode");
648 static void mapRegs_MIPSAMode(HRegRemap
* m
, MIPSAMode
* am
)
652 am
->Mam
.IR
.base
= lookupHRegRemap(m
, am
->Mam
.IR
.base
);
655 am
->Mam
.RR
.base
= lookupHRegRemap(m
, am
->Mam
.RR
.base
);
656 am
->Mam
.RR
.index
= lookupHRegRemap(m
, am
->Mam
.RR
.index
);
659 vpanic("mapRegs_MIPSAMode");
664 /* --------- Operand, which can be a reg or a u16/s16. --------- */
666 MIPSRH
*MIPSRH_Imm(Bool syned
, UShort imm16
)
668 MIPSRH
*op
= LibVEX_Alloc_inline(sizeof(MIPSRH
));
670 op
->Mrh
.Imm
.syned
= syned
;
671 op
->Mrh
.Imm
.imm16
= imm16
;
672 /* If this is a signed value, ensure it's not -32768, so that we
673 are guaranteed always to be able to negate if needed. */
675 vassert(imm16
!= 0x8000);
676 vassert(syned
== True
|| syned
== False
);
680 MIPSRH
*MIPSRH_Reg(HReg reg
)
682 MIPSRH
*op
= LibVEX_Alloc_inline(sizeof(MIPSRH
));
684 op
->Mrh
.Reg
.reg
= reg
;
688 void ppMIPSRH(MIPSRH
* op
, Bool mode64
)
690 MIPSRHTag tag
= op
->tag
;
693 if (op
->Mrh
.Imm
.syned
)
694 vex_printf("%d", (Int
) (Short
) op
->Mrh
.Imm
.imm16
);
696 vex_printf("%u", (UInt
) (UShort
) op
->Mrh
.Imm
.imm16
);
699 ppHRegMIPS(op
->Mrh
.Reg
.reg
, mode64
);
707 /* An MIPSRH can only be used in a "read" context (what would it mean
708 to write or modify a literal?) and so we enumerate its registers
710 static void addRegUsage_MIPSRH(HRegUsage
* u
, MIPSRH
* op
)
716 addHRegUse(u
, HRmRead
, op
->Mrh
.Reg
.reg
);
719 vpanic("addRegUsage_MIPSRH");
724 static void mapRegs_MIPSRH(HRegRemap
* m
, MIPSRH
* op
)
730 op
->Mrh
.Reg
.reg
= lookupHRegRemap(m
, op
->Mrh
.Reg
.reg
);
733 vpanic("mapRegs_MIPSRH");
738 /* --------- Instructions. --------- */
740 const HChar
*showMIPSUnaryOp(MIPSUnaryOp op
)
760 vpanic("showMIPSUnaryOp");
766 const HChar
*showMIPSAluOp(MIPSAluOp op
, Bool immR
)
771 ret
= immR
? "addiu" : "addu";
777 ret
= immR
? "andi" : "and";
780 ret
= immR
? "ori" : "or";
783 vassert(immR
== False
); /*there's no nor with an immediate operand!? */
787 ret
= immR
? "xori" : "xor";
790 ret
= immR
? "daddiu" : "dadd";
793 ret
= immR
? "dsubi" : "dsub";
796 ret
= immR
? "slti" : "slt";
799 vpanic("showMIPSAluOp");
805 const HChar
*showMIPSShftOp(MIPSShftOp op
, Bool immR
, Bool sz32
)
810 ret
= immR
? (sz32
? "sra" : "dsra") : (sz32
? "srav" : "dsrav");
813 ret
= immR
? (sz32
? "sll" : "dsll") : (sz32
? "sllv" : "dsllv");
816 ret
= immR
? (sz32
? "srl" : "dsrl") : (sz32
? "srlv" : "dsrlv");
819 vpanic("showMIPSShftOp");
825 const HChar
*showMIPSMaccOp(MIPSMaccOp op
, Bool variable
)
830 ret
= variable
? "madd" : "maddu";
833 ret
= variable
? "msub" : "msubu";
836 vpanic("showMIPSAccOp");
842 HChar
showMsaDF(MSADF df
) {
860 HChar
showMsaDFF(MSADFFlx df
, int op
) {
863 if (op
== MSA_MUL_Q
|| op
== MSA_MULR_Q
|| op
== MSA_FEXDO
) return 'w';
867 if (op
== MSA_MUL_Q
|| op
== MSA_MULR_Q
|| op
== MSA_FEXDO
) return 'h';
874 const HChar
*showMsaMI10op(MSAMI10Op op
) {
887 vpanic("showMsaMI10op");
894 const HChar
*showMsaElmOp(MSAELMOp op
) {
931 vpanic("showMsaElmOp");
938 const HChar
*showMsa2ROp(MSA2ROp op
) {
959 vpanic("showMsa2ROp");
966 const HChar
*showRotxOp(MIPSRotxOp op
) {
976 vpanic("showRotxOp");
983 const HChar
*showMsa2RFOp(MSA2RFOp op
) {
1032 vpanic("showMsa2RFOp");
1039 const HChar
*showMsa3ROp(MSA3ROp op
) {
1168 vpanic("showMsa3ROp");
1175 const HChar
*showMsaVecOp(MSAVECOp op
) {
1196 vpanic("showMsaVecOp");
1203 const HChar
*showMsaBitOp(MSABITOp op
) {
1228 vpanic("showMsaBitOp");
1235 const HChar
*showMsa3RFOp(MSA3RFOp op
) {
1312 vpanic("showMsa3RFOp");
1319 MIPSInstr
*MIPSInstr_LI(HReg dst
, ULong imm
)
1321 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1323 i
->Min
.LI
.dst
= dst
;
1324 i
->Min
.LI
.imm
= imm
;
1328 MIPSInstr
*MIPSInstr_Alu(MIPSAluOp op
, HReg dst
, HReg srcL
, MIPSRH
* srcR
)
1330 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1333 i
->Min
.Alu
.dst
= dst
;
1334 i
->Min
.Alu
.srcL
= srcL
;
1335 i
->Min
.Alu
.srcR
= srcR
;
1339 MIPSInstr
*MIPSInstr_Shft(MIPSShftOp op
, Bool sz32
, HReg dst
, HReg srcL
,
1342 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1344 i
->Min
.Shft
.op
= op
;
1345 i
->Min
.Shft
.sz32
= sz32
;
1346 i
->Min
.Shft
.dst
= dst
;
1347 i
->Min
.Shft
.srcL
= srcL
;
1348 i
->Min
.Shft
.srcR
= srcR
;
1352 MIPSInstr
*MIPSInstr_Unary(MIPSUnaryOp op
, HReg dst
, HReg src
)
1354 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1356 i
->Min
.Unary
.op
= op
;
1357 i
->Min
.Unary
.dst
= dst
;
1358 i
->Min
.Unary
.src
= src
;
1362 MIPSInstr
*MIPSInstr_Cmp(Bool syned
, Bool sz32
, HReg dst
, HReg srcL
, HReg srcR
,
1365 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1367 i
->Min
.Cmp
.syned
= syned
;
1368 i
->Min
.Cmp
.sz32
= sz32
;
1369 i
->Min
.Cmp
.dst
= dst
;
1370 i
->Min
.Cmp
.srcL
= srcL
;
1371 i
->Min
.Cmp
.srcR
= srcR
;
1372 i
->Min
.Cmp
.cond
= cond
;
1377 MIPSInstr
*MIPSInstr_Mul(HReg dst
, HReg srcL
, HReg srcR
)
1379 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1381 i
->Min
.Mul
.dst
= dst
;
1382 i
->Min
.Mul
.srcL
= srcL
;
1383 i
->Min
.Mul
.srcR
= srcR
;
1387 /* mult, multu / dmult, dmultu */
1388 MIPSInstr
*MIPSInstr_Mult(Bool syned
, HReg srcL
, HReg srcR
)
1390 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1392 i
->Min
.Mult
.syned
= syned
;
1393 i
->Min
.Mult
.srcL
= srcL
;
1394 i
->Min
.Mult
.srcR
= srcR
;
1398 /* ext / dext, dextm, dextu */
1399 MIPSInstr
*MIPSInstr_Ext(HReg dst
, HReg src
, UInt pos
, UInt size
)
1401 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1403 i
->Min
.Ext
.dst
= dst
;
1404 i
->Min
.Ext
.src
= src
;
1405 i
->Min
.Ext
.pos
= pos
;
1406 i
->Min
.Ext
.size
= size
;
1410 MIPSInstr
*MIPSInstr_Mulr6(Bool syned
, Bool sz32
, Bool low
, HReg dst
,
1411 HReg srcL
, HReg srcR
)
1413 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1415 i
->Min
.Mulr6
.syned
= syned
;
1416 i
->Min
.Mulr6
.sz32
= sz32
; /* True = 32 bits */
1417 i
->Min
.Mulr6
.low
= low
;
1418 i
->Min
.Mulr6
.dst
= dst
;
1419 i
->Min
.Mulr6
.srcL
= srcL
;
1420 i
->Min
.Mulr6
.srcR
= srcR
;
1425 MIPSInstr
*MIPSInstr_Msub(Bool syned
, HReg srcL
, HReg srcR
)
1427 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1430 i
->Min
.Macc
.op
= Macc_SUB
;
1431 i
->Min
.Macc
.syned
= syned
;
1432 i
->Min
.Macc
.srcL
= srcL
;
1433 i
->Min
.Macc
.srcR
= srcR
;
1438 MIPSInstr
*MIPSInstr_Madd(Bool syned
, HReg srcL
, HReg srcR
)
1440 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1443 i
->Min
.Macc
.op
= Macc_ADD
;
1444 i
->Min
.Macc
.syned
= syned
;
1445 i
->Min
.Macc
.srcL
= srcL
;
1446 i
->Min
.Macc
.srcR
= srcR
;
1451 MIPSInstr
*MIPSInstr_Div(Bool syned
, Bool sz32
, HReg srcL
, HReg srcR
)
1453 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1455 i
->Min
.Div
.syned
= syned
;
1456 i
->Min
.Div
.sz32
= sz32
; /* True = 32 bits */
1457 i
->Min
.Div
.srcL
= srcL
;
1458 i
->Min
.Div
.srcR
= srcR
;
1462 MIPSInstr
*MIPSInstr_Divr6(Bool syned
, Bool sz32
, Bool mod
, HReg dst
,
1463 HReg srcL
, HReg srcR
)
1465 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1467 i
->Min
.Divr6
.syned
= syned
;
1468 i
->Min
.Divr6
.sz32
= sz32
; /* True = 32 bits */
1469 i
->Min
.Divr6
.mod
= mod
;
1470 i
->Min
.Divr6
.dst
= dst
;
1471 i
->Min
.Divr6
.srcL
= srcL
;
1472 i
->Min
.Divr6
.srcR
= srcR
;
1476 MIPSInstr
*MIPSInstr_Call ( MIPSCondCode cond
, Addr64 target
, UInt argiregs
,
1477 HReg src
, RetLoc rloc
)
1480 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1482 i
->Min
.Call
.cond
= cond
;
1483 i
->Min
.Call
.target
= target
;
1484 i
->Min
.Call
.argiregs
= argiregs
;
1485 i
->Min
.Call
.src
= src
;
1486 i
->Min
.Call
.rloc
= rloc
;
1487 /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
1488 mask
= (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
1489 | (1 << 10) | (1 << 11);
1490 vassert(0 == (argiregs
& ~mask
));
1491 vassert(is_sane_RetLoc(rloc
));
1495 MIPSInstr
*MIPSInstr_CallAlways ( MIPSCondCode cond
, Addr64 target
,
1496 UInt argiregs
, RetLoc rloc
)
1499 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1501 i
->Min
.Call
.cond
= cond
;
1502 i
->Min
.Call
.target
= target
;
1503 i
->Min
.Call
.argiregs
= argiregs
;
1504 i
->Min
.Call
.rloc
= rloc
;
1505 /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
1506 mask
= (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
1507 | (1 << 10) | (1 << 11);
1508 vassert(0 == (argiregs
& ~mask
));
1509 vassert(is_sane_RetLoc(rloc
));
1513 MIPSInstr
*MIPSInstr_XDirect ( Addr64 dstGA
, MIPSAMode
* amPC
,
1514 MIPSCondCode cond
, Bool toFastEP
) {
1515 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1516 i
->tag
= Min_XDirect
;
1517 i
->Min
.XDirect
.dstGA
= dstGA
;
1518 i
->Min
.XDirect
.amPC
= amPC
;
1519 i
->Min
.XDirect
.cond
= cond
;
1520 i
->Min
.XDirect
.toFastEP
= toFastEP
;
1524 MIPSInstr
*MIPSInstr_XIndir ( HReg dstGA
, MIPSAMode
* amPC
,
1525 MIPSCondCode cond
) {
1526 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1527 i
->tag
= Min_XIndir
;
1528 i
->Min
.XIndir
.dstGA
= dstGA
;
1529 i
->Min
.XIndir
.amPC
= amPC
;
1530 i
->Min
.XIndir
.cond
= cond
;
1534 MIPSInstr
*MIPSInstr_XAssisted ( HReg dstGA
, MIPSAMode
* amPC
,
1535 MIPSCondCode cond
, IRJumpKind jk
) {
1536 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1537 i
->tag
= Min_XAssisted
;
1538 i
->Min
.XAssisted
.dstGA
= dstGA
;
1539 i
->Min
.XAssisted
.amPC
= amPC
;
1540 i
->Min
.XAssisted
.cond
= cond
;
1541 i
->Min
.XAssisted
.jk
= jk
;
1545 MIPSInstr
*MIPSInstr_Load(UChar sz
, HReg dst
, MIPSAMode
* src
, Bool mode64
)
1547 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1549 i
->Min
.Load
.sz
= sz
;
1550 i
->Min
.Load
.src
= src
;
1551 i
->Min
.Load
.dst
= dst
;
1552 vassert(sz
== 1 || sz
== 2 || sz
== 4 || sz
== 8);
1559 MIPSInstr
*MIPSInstr_Store(UChar sz
, MIPSAMode
* dst
, HReg src
, Bool mode64
)
1561 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1563 i
->Min
.Store
.sz
= sz
;
1564 i
->Min
.Store
.src
= src
;
1565 i
->Min
.Store
.dst
= dst
;
1566 vassert(sz
== 1 || sz
== 2 || sz
== 4 || sz
== 8);
1573 MIPSInstr
*MIPSInstr_LoadL(UChar sz
, HReg dst
, MIPSAMode
* src
, Bool mode64
)
1575 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1577 i
->Min
.LoadL
.sz
= sz
;
1578 i
->Min
.LoadL
.src
= src
;
1579 i
->Min
.LoadL
.dst
= dst
;
1580 vassert(sz
== 4 || sz
== 8);
1587 MIPSInstr
*MIPSInstr_Cas(UChar sz
, HReg old
, HReg addr
,
1588 HReg expd
, HReg data
, Bool mode64
)
1590 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1593 i
->Min
.Cas
.old
= old
;
1594 i
->Min
.Cas
.addr
= addr
;
1595 i
->Min
.Cas
.expd
= expd
;
1596 i
->Min
.Cas
.data
= data
;
1597 vassert(sz
== 1 || sz
== 2 || sz
== 4 || sz
== 8);
1604 MIPSInstr
*MIPSInstr_StoreC(UChar sz
, MIPSAMode
* dst
, HReg src
, Bool mode64
)
1606 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1607 i
->tag
= Min_StoreC
;
1608 i
->Min
.StoreC
.sz
= sz
;
1609 i
->Min
.StoreC
.src
= src
;
1610 i
->Min
.StoreC
.dst
= dst
;
1611 vassert(sz
== 4 || sz
== 8);
1618 MIPSInstr
*MIPSInstr_Mthi(HReg src
)
1620 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1622 i
->Min
.MtHL
.src
= src
;
1626 MIPSInstr
*MIPSInstr_Mtlo(HReg src
)
1628 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1630 i
->Min
.MtHL
.src
= src
;
1634 MIPSInstr
*MIPSInstr_Mfhi(HReg dst
)
1636 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1638 i
->Min
.MfHL
.dst
= dst
;
1642 MIPSInstr
*MIPSInstr_Mflo(HReg dst
)
1644 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1646 i
->Min
.MfHL
.dst
= dst
;
1650 /* Read/Write Link Register */
1651 MIPSInstr
*MIPSInstr_RdWrLR(Bool wrLR
, HReg gpr
)
1653 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1654 i
->tag
= Min_RdWrLR
;
1655 i
->Min
.RdWrLR
.wrLR
= wrLR
;
1656 i
->Min
.RdWrLR
.gpr
= gpr
;
1660 MIPSInstr
*MIPSInstr_FpLdSt(Bool isLoad
, UChar sz
, HReg reg
, MIPSAMode
* addr
)
1662 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1663 i
->tag
= Min_FpLdSt
;
1664 i
->Min
.FpLdSt
.isLoad
= isLoad
;
1665 i
->Min
.FpLdSt
.sz
= sz
;
1666 i
->Min
.FpLdSt
.reg
= reg
;
1667 i
->Min
.FpLdSt
.addr
= addr
;
1668 vassert(sz
== 4 || sz
== 8);
1672 MIPSInstr
*MIPSInstr_FpUnary(MIPSFpOp op
, HReg dst
, HReg src
)
1674 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1675 i
->tag
= Min_FpUnary
;
1676 i
->Min
.FpUnary
.op
= op
;
1677 i
->Min
.FpUnary
.dst
= dst
;
1678 i
->Min
.FpUnary
.src
= src
;
1682 MIPSInstr
*MIPSInstr_FpBinary(MIPSFpOp op
, HReg dst
, HReg srcL
, HReg srcR
)
1684 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1685 i
->tag
= Min_FpBinary
;
1686 i
->Min
.FpBinary
.op
= op
;
1687 i
->Min
.FpBinary
.dst
= dst
;
1688 i
->Min
.FpBinary
.srcL
= srcL
;
1689 i
->Min
.FpBinary
.srcR
= srcR
;
1693 MIPSInstr
*MIPSInstr_FpTernary ( MIPSFpOp op
, HReg dst
, HReg src1
, HReg src2
,
1696 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1697 i
->tag
= Min_FpTernary
;
1698 i
->Min
.FpTernary
.op
= op
;
1699 i
->Min
.FpTernary
.dst
= dst
;
1700 i
->Min
.FpTernary
.src1
= src1
;
1701 i
->Min
.FpTernary
.src2
= src2
;
1702 i
->Min
.FpTernary
.src3
= src3
;
1706 MIPSInstr
*MIPSInstr_FpConvert(MIPSFpOp op
, HReg dst
, HReg src
)
1708 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1709 i
->tag
= Min_FpConvert
;
1710 i
->Min
.FpConvert
.op
= op
;
1711 i
->Min
.FpConvert
.dst
= dst
;
1712 i
->Min
.FpConvert
.src
= src
;
1717 MIPSInstr
*MIPSInstr_FpCompare(MIPSFpOp op
, HReg dst
, HReg srcL
, HReg srcR
)
1719 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1720 i
->tag
= Min_FpCompare
;
1721 i
->Min
.FpCompare
.op
= op
;
1722 i
->Min
.FpCompare
.dst
= dst
;
1723 i
->Min
.FpCompare
.srcL
= srcL
;
1724 i
->Min
.FpCompare
.srcR
= srcR
;
1728 MIPSInstr
*MIPSInstr_FpMinMax(MIPSFpOp op
, HReg dst
, HReg srcL
, HReg srcR
)
1730 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1731 i
->tag
= Min_FpMinMax
;
1732 i
->Min
.FpMinMax
.op
= op
;
1733 i
->Min
.FpMinMax
.dst
= dst
;
1734 i
->Min
.FpMinMax
.srcL
= srcL
;
1735 i
->Min
.FpMinMax
.srcR
= srcR
;
1740 MIPSInstr
*MIPSInstr_MtFCSR(HReg src
)
1742 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1743 i
->tag
= Min_MtFCSR
;
1744 i
->Min
.MtFCSR
.src
= src
;
1748 MIPSInstr
*MIPSInstr_MfFCSR(HReg dst
)
1750 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1751 i
->tag
= Min_MfFCSR
;
1752 i
->Min
.MfFCSR
.dst
= dst
;
1756 MIPSInstr
*MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op
, HReg dst
, HReg src
)
1758 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1759 i
->tag
= Min_FpGpMove
;
1760 i
->Min
.FpGpMove
.op
= op
;
1761 i
->Min
.FpGpMove
.dst
= dst
;
1762 i
->Min
.FpGpMove
.src
= src
;
1766 MIPSInstr
*MIPSInstr_MoveCond ( MIPSMoveCondOp op
, HReg dst
, HReg src
,
1769 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1770 i
->tag
= Min_MoveCond
;
1771 i
->Min
.MoveCond
.op
= op
;
1772 i
->Min
.MoveCond
.dst
= dst
;
1773 i
->Min
.MoveCond
.src
= src
;
1774 i
->Min
.MoveCond
.cond
= cond
;
1778 MIPSInstr
*MIPSInstr_EvCheck ( MIPSAMode
* amCounter
,
1779 MIPSAMode
* amFailAddr
) {
1780 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1781 i
->tag
= Min_EvCheck
;
1782 i
->Min
.EvCheck
.amCounter
= amCounter
;
1783 i
->Min
.EvCheck
.amFailAddr
= amFailAddr
;
1787 MIPSInstr
* MIPSInstr_ProfInc ( void ) {
1788 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1789 i
->tag
= Min_ProfInc
;
1794 MIPSInstr
* MIPSInstr_MsaMi10(MSAMI10Op op
, UInt s10
, HReg rs
, HReg wd
,
1796 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1798 i
->Min
.MsaMi10
.op
= op
;
1799 i
->Min
.MsaMi10
.s10
= s10
;
1800 i
->Min
.MsaMi10
.rs
= rs
;
1801 i
->Min
.MsaMi10
.wd
= wd
;
1802 i
->Min
.MsaMi10
.df
= df
;
1806 MIPSInstr
* MIPSInstr_MsaElm(MSAELMOp op
, HReg ws
, HReg wd
, UInt dfn
) {
1807 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1809 i
->Min
.MsaElm
.op
= op
;
1810 i
->Min
.MsaElm
.ws
= ws
;
1811 i
->Min
.MsaElm
.wd
= wd
;
1812 i
->Min
.MsaElm
.dfn
= dfn
;
1816 MIPSInstr
* MIPSInstr_Msa2R(MSA2ROp op
, MSADF df
, HReg ws
, HReg wd
) {
1817 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1819 i
->Min
.Msa2R
.op
= op
;
1820 i
->Min
.Msa2R
.df
= df
;
1821 i
->Min
.Msa2R
.ws
= ws
;
1822 i
->Min
.Msa2R
.wd
= wd
;
1826 MIPSInstr
* MIPSInstr_Msa3R(MSA3ROp op
, MSADF df
, HReg wd
, HReg ws
, HReg wt
) {
1827 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1829 i
->Min
.Msa3R
.op
= op
;
1830 i
->Min
.Msa3R
.df
= df
;
1831 i
->Min
.Msa3R
.wd
= wd
;
1832 i
->Min
.Msa3R
.wt
= wt
;
1833 i
->Min
.Msa3R
.ws
= ws
;
1837 MIPSInstr
* MIPSInstr_MsaVec(MSAVECOp op
, HReg wd
, HReg ws
, HReg wt
) {
1838 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1840 i
->Min
.MsaVec
.op
= op
;
1841 i
->Min
.MsaVec
.wd
= wd
;
1842 i
->Min
.MsaVec
.wt
= wt
;
1843 i
->Min
.MsaVec
.ws
= ws
;
1847 MIPSInstr
* MIPSInstr_MsaBit(MSABITOp op
, MSADF df
, UChar ms
, HReg ws
, HReg wd
) {
1848 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1850 i
->Min
.MsaBit
.op
= op
;
1851 i
->Min
.MsaBit
.df
= df
;
1852 i
->Min
.MsaBit
.ws
= ws
;
1853 i
->Min
.MsaBit
.wd
= wd
;
1854 i
->Min
.MsaBit
.ms
= ms
;
1858 MIPSInstr
* MIPSInstr_Msa3RF(MSA3RFOp op
, MSADFFlx df
, HReg wd
, HReg ws
,
1860 MIPSInstr
* i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1862 i
->Min
.Msa3RF
.op
= op
;
1863 i
->Min
.Msa3RF
.df
= df
;
1864 i
->Min
.Msa3RF
.wd
= wd
;
1865 i
->Min
.Msa3RF
.wt
= wt
;
1866 i
->Min
.Msa3RF
.ws
= ws
;
1870 MIPSInstr
* MIPSInstr_Msa2RF(MSA2RFOp op
, MSADFFlx df
, HReg wd
, HReg ws
) {
1871 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1873 i
->Min
.Msa2RF
.op
= op
;
1874 i
->Min
.Msa2RF
.df
= df
;
1875 i
->Min
.Msa2RF
.wd
= wd
;
1876 i
->Min
.Msa2RF
.ws
= ws
;
1880 MIPSInstr
* MIPSInstr_Bitswap(MIPSRotxOp op
, HReg rd
, HReg rt
, HReg shift
, HReg shiftx
, HReg stripe
) {
1881 MIPSInstr
*i
= LibVEX_Alloc_inline(sizeof(MIPSInstr
));
1883 i
->Min
.Rotx
.op
= op
;
1884 i
->Min
.Rotx
.rd
= rd
;
1885 i
->Min
.Rotx
.rt
= rt
;
1886 i
->Min
.Rotx
.shift
= shift
;
1887 i
->Min
.Rotx
.shiftx
= shiftx
;
1888 i
->Min
.Rotx
.stripe
= stripe
;
1892 /* -------- Pretty Print instructions ------------- */
1893 static void ppLoadImm(HReg dst
, ULong imm
, Bool mode64
)
1896 ppHRegMIPS(dst
, mode64
);
1897 vex_printf(",0x%016llx", imm
);
1900 static void MSAdfn(UInt dfn
, MSADF
* df
, UInt
* n
) {
1901 if ((dfn
& 0x3e) == MSA_DFN_D
) {
1907 if ((dfn
& 0x3c) == MSA_DFN_W
) {
1913 if ((dfn
& 0x38) == MSA_DFN_H
) {
1923 void ppMIPSInstr(const MIPSInstr
* i
, Bool mode64
)
1927 ppLoadImm(i
->Min
.LI
.dst
, i
->Min
.LI
.imm
, mode64
);
1930 HReg r_srcL
= i
->Min
.Alu
.srcL
;
1931 MIPSRH
*rh_srcR
= i
->Min
.Alu
.srcR
;
1933 vex_printf("%s ", showMIPSAluOp(i
->Min
.Alu
.op
,
1934 toBool(rh_srcR
->tag
== Mrh_Imm
)));
1935 ppHRegMIPS(i
->Min
.Alu
.dst
, mode64
);
1937 ppHRegMIPS(r_srcL
, mode64
);
1939 ppMIPSRH(rh_srcR
, mode64
);
1943 HReg r_srcL
= i
->Min
.Shft
.srcL
;
1944 MIPSRH
*rh_srcR
= i
->Min
.Shft
.srcR
;
1945 vex_printf("%s ", showMIPSShftOp(i
->Min
.Shft
.op
,
1946 toBool(rh_srcR
->tag
== Mrh_Imm
),
1948 ppHRegMIPS(i
->Min
.Shft
.dst
, mode64
);
1950 ppHRegMIPS(r_srcL
, mode64
);
1952 ppMIPSRH(rh_srcR
, mode64
);
1956 HReg r_src
= i
->Min
.Rotx
.rt
;
1957 vex_printf("rotx ");
1958 ppHRegMIPS(i
->Min
.Rotx
.rd
, mode64
);
1960 ppHRegMIPS(r_src
, mode64
);
1964 vex_printf("%s ", showMIPSUnaryOp(i
->Min
.Unary
.op
));
1965 ppHRegMIPS(i
->Min
.Unary
.dst
, mode64
);
1967 ppHRegMIPS(i
->Min
.Unary
.src
, mode64
);
1971 vex_printf("word_compare ");
1972 ppHRegMIPS(i
->Min
.Cmp
.dst
, mode64
);
1973 vex_printf(" = %s ( ", showMIPSCondCode(i
->Min
.Cmp
.cond
));
1974 ppHRegMIPS(i
->Min
.Cmp
.srcL
, mode64
);
1976 ppHRegMIPS(i
->Min
.Cmp
.srcR
, mode64
);
1983 ppHRegMIPS(i
->Min
.Mul
.dst
, mode64
);
1985 ppHRegMIPS(i
->Min
.Mul
.srcL
, mode64
);
1987 ppHRegMIPS(i
->Min
.Mul
.srcR
, mode64
);
1991 vex_printf("%s%s ", mode64
? "dmult" : "mult",
1992 i
->Min
.Mult
.syned
? "" : "u");
1993 ppHRegMIPS(i
->Min
.Mult
.srcL
, mode64
);
1995 ppHRegMIPS(i
->Min
.Mult
.srcR
, mode64
);
2000 vassert(i
->Min
.Ext
.pos
< 32);
2001 vassert(i
->Min
.Ext
.size
> 0);
2002 vassert(i
->Min
.Ext
.size
<= 32);
2003 vassert(i
->Min
.Ext
.size
+ i
->Min
.Ext
.pos
> 0);
2004 vassert(i
->Min
.Ext
.size
+ i
->Min
.Ext
.pos
<= 63);
2005 vex_printf("dext ");
2006 ppHRegMIPS(i
->Min
.Ext
.dst
, mode64
);
2008 ppHRegMIPS(i
->Min
.Ext
.src
, mode64
);
2009 vex_printf(", %u, %u", i
->Min
.Ext
.pos
, i
->Min
.Ext
.size
);
2013 if(i
->Min
.Mulr6
.sz32
) {
2014 if(i
->Min
.Mulr6
.low
)vex_printf("mul");
2015 else vex_printf("muh");
2016 if(i
->Min
.Mulr6
.syned
)vex_printf("u ");
2017 else vex_printf(" ");
2019 if(i
->Min
.Mulr6
.low
)
2020 vex_printf("%s%s ", "dmul",
2021 i
->Min
.Mulr6
.syned
? "" : "u");
2023 vex_printf("%s%s ","dmuh",
2024 i
->Min
.Mulr6
.syned
? "" : "u");
2026 ppHRegMIPS(i
->Min
.Mulr6
.dst
, mode64
);
2028 ppHRegMIPS(i
->Min
.Mulr6
.srcL
, mode64
);
2030 ppHRegMIPS(i
->Min
.Mulr6
.srcR
, mode64
);
2034 vex_printf("mthi ");
2035 ppHRegMIPS(i
->Min
.MtHL
.src
, mode64
);
2039 vex_printf("mtlo ");
2040 ppHRegMIPS(i
->Min
.MtHL
.src
, mode64
);
2044 vex_printf("mfhi ");
2045 ppHRegMIPS(i
->Min
.MfHL
.dst
, mode64
);
2049 vex_printf("mflo ");
2050 ppHRegMIPS(i
->Min
.MfHL
.dst
, mode64
);
2054 vex_printf("%s ", showMIPSMaccOp(i
->Min
.Macc
.op
, i
->Min
.Macc
.syned
));
2055 ppHRegMIPS(i
->Min
.Macc
.srcL
, mode64
);
2057 ppHRegMIPS(i
->Min
.Macc
.srcR
, mode64
);
2061 if (!i
->Min
.Div
.sz32
)
2064 vex_printf("%s ", i
->Min
.Div
.syned
? "s" : "u");
2065 ppHRegMIPS(i
->Min
.Div
.srcL
, mode64
);
2067 ppHRegMIPS(i
->Min
.Div
.srcR
, mode64
);
2071 if(i
->Min
.Divr6
.sz32
) {
2072 if(i
->Min
.Divr6
.mod
)vex_printf("mod");
2073 else vex_printf("div");
2074 if(i
->Min
.Divr6
.syned
)vex_printf("u ");
2075 else vex_printf(" ");
2077 if(i
->Min
.Divr6
.mod
)
2078 vex_printf("%s%s ", "dmod",
2079 i
->Min
.Divr6
.syned
? "" : "u");
2081 vex_printf("%s%s ","ddiv",
2082 i
->Min
.Divr6
.syned
? "" : "u");
2084 ppHRegMIPS(i
->Min
.Divr6
.dst
, mode64
);
2086 ppHRegMIPS(i
->Min
.Divr6
.srcL
, mode64
);
2088 ppHRegMIPS(i
->Min
.Divr6
.srcR
, mode64
);
2093 vex_printf("call: ");
2094 if (i
->Min
.Call
.cond
!= MIPScc_AL
) {
2095 vex_printf("if (%s) ", showMIPSCondCode(i
->Min
.Call
.cond
));
2099 vex_printf(" addiu $29, $29, -16");
2101 ppLoadImm(hregMIPS_GPR25(mode64
), i
->Min
.Call
.target
, mode64
);
2103 vex_printf(" ; jarl $31, $25; # args [");
2104 for (n
= 0; n
< 32; n
++) {
2105 if (i
->Min
.Call
.argiregs
& (1 << n
)) {
2106 vex_printf("$%d", n
);
2107 if ((i
->Min
.Call
.argiregs
>> n
) > 1)
2111 vex_printf("] nop; ");
2113 vex_printf("addiu $29, $29, 16; ]");
2118 vex_printf("(xDirect) ");
2119 vex_printf("if (guest_COND.%s) { ",
2120 showMIPSCondCode(i
->Min
.XDirect
.cond
));
2121 vex_printf("move $9, 0x%x,", (UInt
)i
->Min
.XDirect
.dstGA
);
2122 vex_printf("; sw $9, ");
2123 ppMIPSAMode(i
->Min
.XDirect
.amPC
, mode64
);
2124 vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}",
2125 i
->Min
.XDirect
.toFastEP
? "fast" : "slow");
2128 vex_printf("(xIndir) ");
2129 vex_printf("if (guest_COND.%s) { sw ",
2130 showMIPSCondCode(i
->Min
.XIndir
.cond
));
2131 ppHRegMIPS(i
->Min
.XIndir
.dstGA
, mode64
);
2133 ppMIPSAMode(i
->Min
.XIndir
.amPC
, mode64
);
2134 vex_printf("; move $9, $disp_indir; jalr $9; nop}");
2137 vex_printf("(xAssisted) ");
2138 vex_printf("if (guest_COND.%s) { ",
2139 showMIPSCondCode(i
->Min
.XAssisted
.cond
));
2141 ppHRegMIPS(i
->Min
.XAssisted
.dstGA
, mode64
);
2143 ppMIPSAMode(i
->Min
.XAssisted
.amPC
, mode64
);
2144 vex_printf("; move $9, $IRJumpKind_to_TRCVAL(%d)",
2145 (Int
)i
->Min
.XAssisted
.jk
);
2146 vex_printf("; move $9, $disp_assisted; jalr $9; nop; }");
2149 Bool idxd
= toBool(i
->Min
.Load
.src
->tag
== Mam_RR
);
2150 UChar sz
= i
->Min
.Load
.sz
;
2151 HChar c_sz
= sz
== 1 ? 'b' : sz
== 2 ? 'h' : sz
== 4 ? 'w' : 'd';
2152 vex_printf("l%c%s ", c_sz
, idxd
? "x" : "");
2153 ppHRegMIPS(i
->Min
.Load
.dst
, mode64
);
2155 ppMIPSAMode(i
->Min
.Load
.src
, mode64
);
2159 UChar sz
= i
->Min
.Store
.sz
;
2160 Bool idxd
= toBool(i
->Min
.Store
.dst
->tag
== Mam_RR
);
2161 HChar c_sz
= sz
== 1 ? 'b' : sz
== 2 ? 'h' : sz
== 4 ? 'w' : 'd';
2162 vex_printf("s%c%s ", c_sz
, idxd
? "x" : "");
2163 ppHRegMIPS(i
->Min
.Store
.src
, mode64
);
2165 ppMIPSAMode(i
->Min
.Store
.dst
, mode64
);
2170 ppHRegMIPS(i
->Min
.LoadL
.dst
, mode64
);
2172 ppMIPSAMode(i
->Min
.LoadL
.src
, mode64
);
2176 Bool sz8
= toBool(i
->Min
.Cas
.sz
== 8);
2178 * ll(d) old, 0(addr)
2179 * bne old, expd, end
2181 * (d)addiu old, old, 1
2182 * sc(d) data, 0(addr)
2183 * movn old, expd, data
2186 // ll(d) old, 0(addr)
2187 vex_printf("cas: ");
2189 vex_printf("%s ", sz8
? "lld" : "ll");
2190 ppHRegMIPS(i
->Min
.Cas
.old
, mode64
);
2192 ppHRegMIPS(i
->Min
.Cas
.addr
, mode64
);
2196 ppHRegMIPS(i
->Min
.Cas
.old
, mode64
);
2198 ppHRegMIPS(i
->Min
.Cas
.expd
, mode64
);
2199 vex_printf(", end\n");
2201 vex_printf("nop\n");
2203 vex_printf("%s ", sz8
? "daddiu" : "addiu");
2204 ppHRegMIPS(i
->Min
.Cas
.old
, mode64
);
2206 ppHRegMIPS(i
->Min
.Cas
.old
, mode64
);
2207 vex_printf(", 1\n");
2209 vex_printf("%s ", sz8
? "scd" : "sc");
2210 ppHRegMIPS(i
->Min
.Cas
.data
, mode64
);
2212 ppHRegMIPS(i
->Min
.Cas
.addr
, mode64
);
2215 vex_printf("movn ");
2216 ppHRegMIPS(i
->Min
.Cas
.old
, mode64
);
2218 ppHRegMIPS(i
->Min
.Cas
.expd
, mode64
);
2220 ppHRegMIPS(i
->Min
.Cas
.data
, mode64
);
2221 vex_printf("\nend:");
2226 ppHRegMIPS(i
->Min
.StoreC
.src
, mode64
);
2228 ppMIPSAMode(i
->Min
.StoreC
.dst
, mode64
);
2232 vex_printf("%s ", i
->Min
.RdWrLR
.wrLR
? "mtlr" : "mflr");
2233 ppHRegMIPS(i
->Min
.RdWrLR
.gpr
, mode64
);
2237 vex_printf("%s ", showMIPSFpOp(i
->Min
.FpUnary
.op
));
2238 ppHRegMIPS(i
->Min
.FpUnary
.dst
, mode64
);
2240 ppHRegMIPS(i
->Min
.FpUnary
.src
, mode64
);
2243 vex_printf("%s", showMIPSFpOp(i
->Min
.FpBinary
.op
));
2244 ppHRegMIPS(i
->Min
.FpBinary
.dst
, mode64
);
2246 ppHRegMIPS(i
->Min
.FpBinary
.srcL
, mode64
);
2248 ppHRegMIPS(i
->Min
.FpBinary
.srcR
, mode64
);
2251 vex_printf("%s", showMIPSFpOp(i
->Min
.FpTernary
.op
));
2252 ppHRegMIPS(i
->Min
.FpTernary
.dst
, mode64
);
2254 ppHRegMIPS(i
->Min
.FpTernary
.src1
, mode64
);
2256 ppHRegMIPS(i
->Min
.FpTernary
.src2
, mode64
);
2258 ppHRegMIPS(i
->Min
.FpTernary
.src3
, mode64
);
2261 vex_printf("%s", showMIPSFpOp(i
->Min
.FpConvert
.op
));
2262 ppHRegMIPS(i
->Min
.FpConvert
.dst
, mode64
);
2264 ppHRegMIPS(i
->Min
.FpConvert
.src
, mode64
);
2267 vex_printf("%s ", showMIPSFpOp(i
->Min
.FpCompare
.op
));
2268 ppHRegMIPS(i
->Min
.FpCompare
.srcL
, mode64
);
2270 ppHRegMIPS(i
->Min
.FpCompare
.srcR
, mode64
);
2273 vex_printf("%s ", showMIPSFpOp(i
->Min
.FpMinMax
.op
));
2274 ppHRegMIPS(i
->Min
.FpCompare
.srcL
, mode64
);
2276 ppHRegMIPS(i
->Min
.FpCompare
.srcR
, mode64
);
2279 vex_printf("%s ", showMIPSFpOp(i
->Min
.FpMulAcc
.op
));
2280 ppHRegMIPS(i
->Min
.FpMulAcc
.dst
, mode64
);
2282 ppHRegMIPS(i
->Min
.FpMulAcc
.srcML
, mode64
);
2284 ppHRegMIPS(i
->Min
.FpMulAcc
.srcMR
, mode64
);
2286 ppHRegMIPS(i
->Min
.FpMulAcc
.srcAcc
, mode64
);
2289 if (i
->Min
.FpLdSt
.sz
== 4) {
2290 if (i
->Min
.FpLdSt
.isLoad
) {
2291 vex_printf("lwc1 ");
2292 ppHRegMIPS(i
->Min
.FpLdSt
.reg
, mode64
);
2294 ppMIPSAMode(i
->Min
.FpLdSt
.addr
, mode64
);
2296 vex_printf("swc1 ");
2297 ppHRegMIPS(i
->Min
.FpLdSt
.reg
, mode64
);
2299 ppMIPSAMode(i
->Min
.FpLdSt
.addr
, mode64
);
2301 } else if (i
->Min
.FpLdSt
.sz
== 8) {
2302 if (i
->Min
.FpLdSt
.isLoad
) {
2303 vex_printf("ldc1 ");
2304 ppHRegMIPS(i
->Min
.FpLdSt
.reg
, mode64
);
2306 ppMIPSAMode(i
->Min
.FpLdSt
.addr
, mode64
);
2308 vex_printf("sdc1 ");
2309 ppHRegMIPS(i
->Min
.FpLdSt
.reg
, mode64
);
2311 ppMIPSAMode(i
->Min
.FpLdSt
.addr
, mode64
);
2317 vex_printf("ctc1 ");
2318 ppHRegMIPS(i
->Min
.MtFCSR
.src
, mode64
);
2319 vex_printf(", $31");
2323 vex_printf("cfc1 ");
2324 ppHRegMIPS(i
->Min
.MfFCSR
.dst
, mode64
);
2325 vex_printf(", $31");
2328 case Min_FpGpMove
: {
2329 vex_printf("%s ", showMIPSFpGpMoveOp(i
->Min
.FpGpMove
.op
));
2330 ppHRegMIPS(i
->Min
.FpGpMove
.dst
, mode64
);
2332 ppHRegMIPS(i
->Min
.FpGpMove
.src
, mode64
);
2335 case Min_MoveCond
: {
2336 vex_printf("%s", showMIPSMoveCondOp(i
->Min
.MoveCond
.op
));
2337 ppHRegMIPS(i
->Min
.MoveCond
.dst
, mode64
);
2339 ppHRegMIPS(i
->Min
.MoveCond
.src
, mode64
);
2341 ppHRegMIPS(i
->Min
.MoveCond
.cond
, mode64
);
2345 vex_printf("(evCheck) lw $9, ");
2346 ppMIPSAMode(i
->Min
.EvCheck
.amCounter
, mode64
);
2347 vex_printf("; addiu $9, $9, -1");
2348 vex_printf("; sw $9, ");
2349 ppMIPSAMode(i
->Min
.EvCheck
.amCounter
, mode64
);
2350 vex_printf("; bgez $t9, nofail; jalr *");
2351 ppMIPSAMode(i
->Min
.EvCheck
.amFailAddr
, mode64
);
2352 vex_printf("; nofail:");
2356 vex_printf("(profInc) move $9, ($NotKnownYet); "
2358 "daddiu $8, $8, 1; "
2361 vex_printf("(profInc) move $9, ($NotKnownYet); "
2371 Int imm
= (i
->Min
.MsaMi10
.s10
<< 22) >> 22;
2373 switch (i
->Min
.MsaMi10
.df
) {
2390 vex_printf("%s.%c ", showMsaMI10op(i
->Min
.MsaMi10
.op
),
2391 showMsaDF(i
->Min
.MsaMi10
.df
));
2392 ppHRegMIPS(i
->Min
.MsaMi10
.wd
, mode64
);
2393 vex_printf(", (%d)", imm
);
2394 ppHRegMIPS(i
->Min
.MsaMi10
.rs
, mode64
);
2399 switch (i
->Min
.MsaElm
.op
) {
2401 vex_printf("move.v ");
2402 ppHRegMIPS(i
->Min
.MsaElm
.wd
, mode64
);
2404 ppHRegMIPS(i
->Min
.MsaElm
.ws
, mode64
);
2410 MSAdfn(i
->Min
.MsaElm
.dfn
, &df
, &n
);
2411 vex_printf("%s.%c ", showMsaElmOp(i
->Min
.MsaElm
.op
),
2413 ppHRegMIPS(i
->Min
.MsaElm
.wd
, mode64
);
2415 ppHRegMIPS(i
->Min
.MsaElm
.ws
, mode64
);
2416 vex_printf("[%u]", n
);
2423 MSAdfn(i
->Min
.MsaElm
.dfn
, &df
, &n
);
2424 vex_printf("%s.%c ", showMsaElmOp(i
->Min
.MsaElm
.op
),
2426 ppHRegMIPS(i
->Min
.MsaElm
.wd
, mode64
);
2427 vex_printf("[%u], ", n
);
2428 ppHRegMIPS(i
->Min
.MsaElm
.ws
, mode64
);
2437 MSAdfn(i
->Min
.MsaElm
.dfn
, &df
, &n
);
2438 vex_printf("%s.%c ", showMsaElmOp(i
->Min
.MsaElm
.op
),
2440 ppHRegMIPS(i
->Min
.MsaElm
.wd
, mode64
);
2442 ppHRegMIPS(i
->Min
.MsaElm
.ws
, mode64
);
2443 vex_printf("[%u]", n
);
2450 MSAdfn(i
->Min
.MsaElm
.dfn
, &df
, &n
);
2451 vex_printf("%s.%c ", showMsaElmOp(i
->Min
.MsaElm
.op
),
2453 ppHRegMIPS(i
->Min
.MsaElm
.wd
, mode64
);
2454 vex_printf("[%u], ", n
);
2455 ppHRegMIPS(i
->Min
.MsaElm
.ws
, mode64
);
2460 vex_printf("cfcmsa ");
2461 ppHRegMIPS(i
->Min
.MsaElm
.wd
, mode64
);
2466 vex_printf("ctcmsa $1, ");
2467 ppHRegMIPS(i
->Min
.MsaElm
.ws
, mode64
);
2474 vex_printf("%s.%c ",
2475 showMsa3ROp(i
->Min
.Msa3R
.op
), showMsaDF(i
->Min
.Msa3R
.df
));
2476 ppHRegMIPS(i
->Min
.Msa3R
.wd
, mode64
);
2478 ppHRegMIPS(i
->Min
.Msa3R
.ws
, mode64
);
2480 ppHRegMIPS(i
->Min
.Msa3R
.wt
, mode64
);
2484 vex_printf("%s.%c ",
2485 showMsa2ROp(i
->Min
.Msa2R
.op
), showMsaDF(i
->Min
.Msa2R
.df
));
2486 ppHRegMIPS(i
->Min
.Msa2R
.wd
, mode64
);
2488 ppHRegMIPS(i
->Min
.Msa2R
.ws
, mode64
);
2492 vex_printf("%s ", showMsaVecOp(i
->Min
.MsaVec
.op
));
2493 ppHRegMIPS(i
->Min
.MsaVec
.wd
, mode64
);
2495 ppHRegMIPS(i
->Min
.MsaVec
.ws
, mode64
);
2497 ppHRegMIPS(i
->Min
.MsaVec
.wt
, mode64
);
2501 vex_printf("%s.%c ", showMsaBitOp(i
->Min
.MsaBit
.op
),
2502 showMsaDF(i
->Min
.MsaBit
.df
));
2503 ppHRegMIPS(i
->Min
.MsaBit
.wd
, mode64
);
2505 ppHRegMIPS(i
->Min
.MsaBit
.ws
, mode64
);
2506 vex_printf(", %d ", i
->Min
.MsaBit
.ms
);
2510 vex_printf("%s.%c ", showMsa3RFOp(i
->Min
.Msa3RF
.op
),
2511 showMsaDFF(i
->Min
.Msa3RF
.df
, i
->Min
.Msa3RF
.op
));
2512 ppHRegMIPS(i
->Min
.Msa3RF
.wd
, mode64
);
2514 ppHRegMIPS(i
->Min
.Msa3RF
.ws
, mode64
);
2516 ppHRegMIPS(i
->Min
.Msa3RF
.wt
, mode64
);
2520 vex_printf("%s.%c ", showMsa2RFOp(i
->Min
.Msa2RF
.op
),
2521 showMsaDFF(i
->Min
.Msa2RF
.df
, i
->Min
.Msa2RF
.op
));
2522 ppHRegMIPS(i
->Min
.Msa2RF
.wd
, mode64
);
2524 ppHRegMIPS(i
->Min
.Msa2RF
.ws
, mode64
);
2527 vpanic("ppMIPSInstr");
2532 /* --------- Helpers for register allocation. --------- */
2534 void getRegUsage_MIPSInstr(HRegUsage
* u
, const MIPSInstr
* i
, Bool mode64
)
2539 addHRegUse(u
, HRmWrite
, i
->Min
.LI
.dst
);
2542 addHRegUse(u
, HRmRead
, i
->Min
.Alu
.srcL
);
2543 addRegUsage_MIPSRH(u
, i
->Min
.Alu
.srcR
);
2544 addHRegUse(u
, HRmWrite
, i
->Min
.Alu
.dst
);
2546 /* or Rd,Rs,Rs == mr Rd,Rs */
2547 if ((i
->Min
.Alu
.op
== Malu_OR
)
2548 && (i
->Min
.Alu
.srcR
->tag
== Mrh_Reg
)
2549 && sameHReg(i
->Min
.Alu
.srcR
->Mrh
.Reg
.reg
, i
->Min
.Alu
.srcL
)) {
2550 u
->isRegRegMove
= True
;
2551 u
->regMoveSrc
= i
->Min
.Alu
.srcL
;
2552 u
->regMoveDst
= i
->Min
.Alu
.dst
;
2556 addHRegUse(u
, HRmRead
, i
->Min
.Shft
.srcL
);
2557 addRegUsage_MIPSRH(u
, i
->Min
.Shft
.srcR
);
2558 addHRegUse(u
, HRmWrite
, i
->Min
.Shft
.dst
);
2561 addHRegUse(u
, HRmRead
, i
->Min
.Rotx
.rt
);
2562 addHRegUse(u
, HRmWrite
, i
->Min
.Rotx
.rd
);
2565 addHRegUse(u
, HRmRead
, i
->Min
.Cmp
.srcL
);
2566 addHRegUse(u
, HRmRead
, i
->Min
.Cmp
.srcR
);
2567 addHRegUse(u
, HRmWrite
, i
->Min
.Cmp
.dst
);
2570 addHRegUse(u
, HRmRead
, i
->Min
.Unary
.src
);
2571 addHRegUse(u
, HRmWrite
, i
->Min
.Unary
.dst
);
2574 addHRegUse(u
, HRmWrite
, i
->Min
.Mul
.dst
);
2575 addHRegUse(u
, HRmRead
, i
->Min
.Mul
.srcL
);
2576 addHRegUse(u
, HRmRead
, i
->Min
.Mul
.srcR
);
2577 addHRegUse(u
, HRmWrite
, hregMIPS_HI(mode64
));
2578 addHRegUse(u
, HRmWrite
, hregMIPS_LO(mode64
));
2581 addHRegUse(u
, HRmRead
, i
->Min
.Mult
.srcL
);
2582 addHRegUse(u
, HRmRead
, i
->Min
.Mult
.srcR
);
2583 addHRegUse(u
, HRmWrite
, hregMIPS_HI(mode64
));
2584 addHRegUse(u
, HRmWrite
, hregMIPS_LO(mode64
));
2587 addHRegUse(u
, HRmWrite
, i
->Min
.Ext
.dst
);
2588 addHRegUse(u
, HRmRead
, i
->Min
.Ext
.src
);
2591 addHRegUse(u
, HRmWrite
, i
->Min
.Mulr6
.dst
);
2592 addHRegUse(u
, HRmRead
, i
->Min
.Mulr6
.srcL
);
2593 addHRegUse(u
, HRmRead
, i
->Min
.Mulr6
.srcR
);
2597 addHRegUse(u
, HRmWrite
, hregMIPS_HI(mode64
));
2598 addHRegUse(u
, HRmWrite
, hregMIPS_LO(mode64
));
2599 addHRegUse(u
, HRmRead
, i
->Min
.MtHL
.src
);
2603 addHRegUse(u
, HRmRead
, hregMIPS_HI(mode64
));
2604 addHRegUse(u
, HRmRead
, hregMIPS_LO(mode64
));
2605 addHRegUse(u
, HRmWrite
, i
->Min
.MfHL
.dst
);
2608 addHRegUse(u
, HRmRead
, i
->Min
.MsaMi10
.rs
);
2610 switch (i
->Min
.MsaMi10
.op
) {
2612 addHRegUse(u
, HRmWrite
, i
->Min
.MsaMi10
.wd
);
2616 addHRegUse(u
, HRmRead
, i
->Min
.MsaMi10
.wd
);
2623 if (LIKELY(i
->Min
.MsaElm
.op
!= MSA_CFCMSA
))
2624 addHRegUse(u
, HRmRead
, i
->Min
.MsaElm
.ws
);
2626 switch (i
->Min
.MsaElm
.op
) {
2631 addHRegUse(u
, HRmWrite
, i
->Min
.MsaElm
.wd
);
2637 addHRegUse(u
, HRmModify
, i
->Min
.MsaElm
.wd
);
2646 addHRegUse(u
, HRmRead
, i
->Min
.Msa3R
.ws
);
2647 addHRegUse(u
, HRmRead
, i
->Min
.Msa3R
.wt
);
2649 if (i
->Min
.Msa3R
.op
== MSA_SLD
||
2650 i
->Min
.Msa3R
.op
== MSA_VSHF
) {
2651 addHRegUse(u
, HRmModify
, i
->Min
.Msa3R
.wd
);
2653 addHRegUse(u
, HRmWrite
, i
->Min
.Msa3R
.wd
);
2659 addHRegUse(u
, HRmWrite
, i
->Min
.Msa2R
.wd
);
2660 addHRegUse(u
, HRmRead
, i
->Min
.Msa2R
.ws
);
2664 addHRegUse(u
, HRmRead
, i
->Min
.MsaVec
.ws
);
2665 addHRegUse(u
, HRmRead
, i
->Min
.MsaVec
.wt
);
2666 addHRegUse(u
, HRmWrite
, i
->Min
.MsaVec
.wd
);
2670 addHRegUse(u
, HRmRead
, i
->Min
.MsaBit
.ws
);
2671 addHRegUse(u
, HRmWrite
, i
->Min
.MsaBit
.wd
);
2675 addHRegUse(u
, HRmRead
, i
->Min
.Msa3RF
.ws
);
2676 addHRegUse(u
, HRmRead
, i
->Min
.Msa3RF
.wt
);
2677 addHRegUse(u
, HRmWrite
, i
->Min
.Msa3RF
.wd
);
2681 addHRegUse(u
, HRmRead
, i
->Min
.Msa2RF
.ws
);
2682 addHRegUse(u
, HRmWrite
, i
->Min
.Msa2RF
.wd
);
2686 addHRegUse(u
, HRmRead
, i
->Min
.MtFCSR
.src
);
2689 addHRegUse(u
, HRmWrite
, i
->Min
.MfFCSR
.dst
);
2692 addHRegUse(u
, HRmModify
, hregMIPS_HI(mode64
));
2693 addHRegUse(u
, HRmModify
, hregMIPS_LO(mode64
));
2694 addHRegUse(u
, HRmRead
, i
->Min
.Macc
.srcL
);
2695 addHRegUse(u
, HRmRead
, i
->Min
.Macc
.srcR
);
2698 addHRegUse(u
, HRmWrite
, hregMIPS_HI(mode64
));
2699 addHRegUse(u
, HRmWrite
, hregMIPS_LO(mode64
));
2700 addHRegUse(u
, HRmRead
, i
->Min
.Div
.srcL
);
2701 addHRegUse(u
, HRmRead
, i
->Min
.Div
.srcR
);
2704 addHRegUse(u
, HRmWrite
, i
->Min
.Divr6
.dst
);
2705 addHRegUse(u
, HRmRead
, i
->Min
.Divr6
.srcL
);
2706 addHRegUse(u
, HRmRead
, i
->Min
.Divr6
.srcR
);
2709 /* Logic and comments copied/modified from x86, ppc and arm back end.
2710 First off, claim it trashes all the caller-saved regs
2711 which fall within the register allocator's jurisdiction. */
2712 if (i
->Min
.Call
.cond
!= MIPScc_AL
)
2713 addHRegUse(u
, HRmRead
, i
->Min
.Call
.src
);
2715 addHRegUse(u
, HRmWrite
, hregMIPS_GPR1(mode64
));
2717 addHRegUse(u
, HRmWrite
, hregMIPS_GPR2(mode64
));
2718 addHRegUse(u
, HRmWrite
, hregMIPS_GPR3(mode64
));
2720 addHRegUse(u
, HRmWrite
, hregMIPS_GPR4(mode64
));
2721 addHRegUse(u
, HRmWrite
, hregMIPS_GPR5(mode64
));
2722 addHRegUse(u
, HRmWrite
, hregMIPS_GPR6(mode64
));
2723 addHRegUse(u
, HRmWrite
, hregMIPS_GPR7(mode64
));
2725 addHRegUse(u
, HRmWrite
, hregMIPS_GPR8(mode64
));
2726 addHRegUse(u
, HRmWrite
, hregMIPS_GPR9(mode64
));
2727 addHRegUse(u
, HRmWrite
, hregMIPS_GPR10(mode64
));
2728 addHRegUse(u
, HRmWrite
, hregMIPS_GPR11(mode64
));
2729 addHRegUse(u
, HRmWrite
, hregMIPS_GPR12(mode64
));
2730 addHRegUse(u
, HRmWrite
, hregMIPS_GPR13(mode64
));
2731 addHRegUse(u
, HRmWrite
, hregMIPS_GPR14(mode64
));
2732 addHRegUse(u
, HRmWrite
, hregMIPS_GPR15(mode64
));
2734 addHRegUse(u
, HRmWrite
, hregMIPS_GPR24(mode64
));
2735 addHRegUse(u
, HRmWrite
, hregMIPS_GPR25(mode64
));
2736 addHRegUse(u
, HRmWrite
, hregMIPS_GPR31(mode64
));
2738 /* Now we have to state any parameter-carrying registers
2739 which might be read. This depends on the argiregs field. */
2740 argir
= i
->Min
.Call
.argiregs
;
2741 if (argir
& (1<<11)) addHRegUse(u
, HRmRead
, hregMIPS_GPR11(mode64
));
2742 if (argir
& (1<<10)) addHRegUse(u
, HRmRead
, hregMIPS_GPR10(mode64
));
2743 if (argir
& (1<<9)) addHRegUse(u
, HRmRead
, hregMIPS_GPR9(mode64
));
2744 if (argir
& (1<<8)) addHRegUse(u
, HRmRead
, hregMIPS_GPR8(mode64
));
2745 if (argir
& (1<<7)) addHRegUse(u
, HRmRead
, hregMIPS_GPR7(mode64
));
2746 if (argir
& (1<<6)) addHRegUse(u
, HRmRead
, hregMIPS_GPR6(mode64
));
2747 if (argir
& (1<<5)) addHRegUse(u
, HRmRead
, hregMIPS_GPR5(mode64
));
2748 if (argir
& (1<<4)) addHRegUse(u
, HRmRead
, hregMIPS_GPR4(mode64
));
2750 vassert(0 == (argir
& ~((1 << 4) | (1 << 5) | (1 << 6)
2751 | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)
2756 /* XDirect/XIndir/XAssisted are also a bit subtle. They
2757 conditionally exit the block. Hence we only need to list (1)
2758 the registers that they read, and (2) the registers that they
2759 write in the case where the block is not exited. (2) is
2760 empty, hence only (1) is relevant here. */
2762 addRegUsage_MIPSAMode(u
, i
->Min
.XDirect
.amPC
);
2765 addHRegUse(u
, HRmRead
, i
->Min
.XIndir
.dstGA
);
2766 addRegUsage_MIPSAMode(u
, i
->Min
.XIndir
.amPC
);
2769 addHRegUse(u
, HRmRead
, i
->Min
.XAssisted
.dstGA
);
2770 addRegUsage_MIPSAMode(u
, i
->Min
.XAssisted
.amPC
);
2773 addRegUsage_MIPSAMode(u
, i
->Min
.Load
.src
);
2774 addHRegUse(u
, HRmWrite
, i
->Min
.Load
.dst
);
2777 addHRegUse(u
, HRmRead
, i
->Min
.Store
.src
);
2778 addRegUsage_MIPSAMode(u
, i
->Min
.Store
.dst
);
2781 addRegUsage_MIPSAMode(u
, i
->Min
.LoadL
.src
);
2782 addHRegUse(u
, HRmWrite
, i
->Min
.LoadL
.dst
);
2785 addHRegUse(u
, HRmWrite
, i
->Min
.Cas
.old
);
2786 addHRegUse(u
, HRmRead
, i
->Min
.Cas
.addr
);
2787 addHRegUse(u
, HRmRead
, i
->Min
.Cas
.expd
);
2788 addHRegUse(u
, HRmModify
, i
->Min
.Cas
.data
);
2791 addHRegUse(u
, HRmWrite
, i
->Min
.StoreC
.src
);
2792 addHRegUse(u
, HRmRead
, i
->Min
.StoreC
.src
);
2793 addRegUsage_MIPSAMode(u
, i
->Min
.StoreC
.dst
);
2796 addHRegUse(u
, (i
->Min
.RdWrLR
.wrLR
? HRmRead
: HRmWrite
),
2800 if (i
->Min
.FpLdSt
.sz
== 4) {
2801 addHRegUse(u
, (i
->Min
.FpLdSt
.isLoad
? HRmWrite
: HRmRead
),
2803 addRegUsage_MIPSAMode(u
, i
->Min
.FpLdSt
.addr
);
2805 } else if (i
->Min
.FpLdSt
.sz
== 8) {
2806 addHRegUse(u
, (i
->Min
.FpLdSt
.isLoad
? HRmWrite
: HRmRead
),
2808 addRegUsage_MIPSAMode(u
, i
->Min
.FpLdSt
.addr
);
2813 addHRegUse(u
, HRmWrite
, i
->Min
.FpUnary
.dst
);
2814 addHRegUse(u
, HRmRead
, i
->Min
.FpUnary
.src
);
2817 addHRegUse(u
, HRmWrite
, i
->Min
.FpBinary
.dst
);
2818 addHRegUse(u
, HRmRead
, i
->Min
.FpBinary
.srcL
);
2819 addHRegUse(u
, HRmRead
, i
->Min
.FpBinary
.srcR
);
2822 addHRegUse(u
, HRmWrite
, i
->Min
.FpTernary
.dst
);
2823 addHRegUse(u
, HRmRead
, i
->Min
.FpTernary
.src1
);
2824 addHRegUse(u
, HRmRead
, i
->Min
.FpTernary
.src2
);
2825 addHRegUse(u
, HRmRead
, i
->Min
.FpTernary
.src3
);
2828 addHRegUse(u
, HRmWrite
, i
->Min
.FpConvert
.dst
);
2829 addHRegUse(u
, HRmRead
, i
->Min
.FpConvert
.src
);
2832 addHRegUse(u
, HRmWrite
, i
->Min
.FpCompare
.dst
);
2833 addHRegUse(u
, HRmRead
, i
->Min
.FpCompare
.srcL
);
2834 addHRegUse(u
, HRmRead
, i
->Min
.FpCompare
.srcR
);
2837 addHRegUse(u
, HRmWrite
, i
->Min
.FpMinMax
.dst
);
2838 addHRegUse(u
, HRmRead
, i
->Min
.FpMinMax
.srcL
);
2839 addHRegUse(u
, HRmRead
, i
->Min
.FpMinMax
.srcR
);
2842 addHRegUse(u
, HRmWrite
, i
->Min
.FpGpMove
.dst
);
2843 addHRegUse(u
, HRmRead
, i
->Min
.FpGpMove
.src
);
2846 addHRegUse(u
, HRmWrite
, i
->Min
.MoveCond
.dst
);
2847 addHRegUse(u
, HRmRead
, i
->Min
.MoveCond
.src
);
2848 addHRegUse(u
, HRmRead
, i
->Min
.MoveCond
.cond
);
2851 /* We expect both amodes only to mention %ebp, so this is in
2852 fact pointless, since %ebp isn't allocatable, but anyway.. */
2853 addRegUsage_MIPSAMode(u
, i
->Min
.EvCheck
.amCounter
);
2854 addRegUsage_MIPSAMode(u
, i
->Min
.EvCheck
.amFailAddr
);
2857 /* does not use any registers. */
2860 ppMIPSInstr(i
, mode64
);
2861 vpanic("getRegUsage_MIPSInstr");
2867 static void mapReg(HRegRemap
* m
, HReg
* r
)
2869 *r
= lookupHRegRemap(m
, *r
);
2872 void mapRegs_MIPSInstr(HRegRemap
* m
, MIPSInstr
* i
, Bool mode64
)
2876 mapReg(m
, &i
->Min
.LI
.dst
);
2879 mapReg(m
, &i
->Min
.Alu
.srcL
);
2880 mapRegs_MIPSRH(m
, i
->Min
.Alu
.srcR
);
2881 mapReg(m
, &i
->Min
.Alu
.dst
);
2884 mapReg(m
, &i
->Min
.Shft
.srcL
);
2885 mapRegs_MIPSRH(m
, i
->Min
.Shft
.srcR
);
2886 mapReg(m
, &i
->Min
.Shft
.dst
);
2889 mapReg(m
, &i
->Min
.Rotx
.rt
);
2890 mapReg(m
, &i
->Min
.Rotx
.rd
);
2893 mapReg(m
, &i
->Min
.Cmp
.srcL
);
2894 mapReg(m
, &i
->Min
.Cmp
.srcR
);
2895 mapReg(m
, &i
->Min
.Cmp
.dst
);
2898 mapReg(m
, &i
->Min
.Unary
.src
);
2899 mapReg(m
, &i
->Min
.Unary
.dst
);
2902 mapReg(m
, &i
->Min
.Mul
.dst
);
2903 mapReg(m
, &i
->Min
.Mul
.srcL
);
2904 mapReg(m
, &i
->Min
.Mul
.srcR
);
2907 mapReg(m
, &i
->Min
.Mult
.srcL
);
2908 mapReg(m
, &i
->Min
.Mult
.srcR
);
2911 mapReg(m
, &i
->Min
.Ext
.src
);
2912 mapReg(m
, &i
->Min
.Ext
.dst
);
2915 mapReg(m
, &i
->Min
.Mulr6
.dst
);
2916 mapReg(m
, &i
->Min
.Mulr6
.srcL
);
2917 mapReg(m
, &i
->Min
.Mulr6
.srcR
);
2921 mapReg(m
, &i
->Min
.MtHL
.src
);
2925 mapReg(m
, &i
->Min
.MfHL
.dst
);
2928 mapReg(m
, &i
->Min
.Macc
.srcL
);
2929 mapReg(m
, &i
->Min
.Macc
.srcR
);
2932 mapReg(m
, &i
->Min
.Div
.srcL
);
2933 mapReg(m
, &i
->Min
.Div
.srcR
);
2937 mapReg(m
, &i
->Min
.Divr6
.dst
);
2938 mapReg(m
, &i
->Min
.Divr6
.srcL
);
2939 mapReg(m
, &i
->Min
.Divr6
.srcR
);
2943 if (i
->Min
.Call
.cond
!= MIPScc_AL
)
2944 mapReg(m
, &i
->Min
.Call
.src
);
2948 mapReg(m
, &i
->Min
.MsaMi10
.rs
);
2949 mapReg(m
, &i
->Min
.MsaMi10
.wd
);
2953 mapReg(m
, &i
->Min
.MsaElm
.ws
);
2954 mapReg(m
, &i
->Min
.MsaElm
.wd
);
2958 mapReg(m
, &i
->Min
.Msa2R
.wd
);
2959 mapReg(m
, &i
->Min
.Msa2R
.ws
);
2963 mapReg(m
, &i
->Min
.Msa3R
.wt
);
2964 mapReg(m
, &i
->Min
.Msa3R
.ws
);
2965 mapReg(m
, &i
->Min
.Msa3R
.wd
);
2969 mapReg(m
, &i
->Min
.MsaVec
.wt
);
2970 mapReg(m
, &i
->Min
.MsaVec
.ws
);
2971 mapReg(m
, &i
->Min
.MsaVec
.wd
);
2975 mapReg(m
, &i
->Min
.MsaBit
.ws
);
2976 mapReg(m
, &i
->Min
.MsaBit
.wd
);
2980 mapReg(m
, &i
->Min
.Msa3RF
.wt
);
2981 mapReg(m
, &i
->Min
.Msa3RF
.ws
);
2982 mapReg(m
, &i
->Min
.Msa3RF
.wd
);
2986 mapReg(m
, &i
->Min
.Msa2RF
.ws
);
2987 mapReg(m
, &i
->Min
.Msa2RF
.wd
);
2991 mapRegs_MIPSAMode(m
, i
->Min
.XDirect
.amPC
);
2994 mapReg(m
, &i
->Min
.XIndir
.dstGA
);
2995 mapRegs_MIPSAMode(m
, i
->Min
.XIndir
.amPC
);
2998 mapReg(m
, &i
->Min
.XAssisted
.dstGA
);
2999 mapRegs_MIPSAMode(m
, i
->Min
.XAssisted
.amPC
);
3002 mapRegs_MIPSAMode(m
, i
->Min
.Load
.src
);
3003 mapReg(m
, &i
->Min
.Load
.dst
);
3006 mapReg(m
, &i
->Min
.Store
.src
);
3007 mapRegs_MIPSAMode(m
, i
->Min
.Store
.dst
);
3010 mapRegs_MIPSAMode(m
, i
->Min
.LoadL
.src
);
3011 mapReg(m
, &i
->Min
.LoadL
.dst
);
3014 mapReg(m
, &i
->Min
.Cas
.old
);
3015 mapReg(m
, &i
->Min
.Cas
.addr
);
3016 mapReg(m
, &i
->Min
.Cas
.expd
);
3017 mapReg(m
, &i
->Min
.Cas
.data
);
3020 mapReg(m
, &i
->Min
.StoreC
.src
);
3021 mapRegs_MIPSAMode(m
, i
->Min
.StoreC
.dst
);
3024 mapReg(m
, &i
->Min
.RdWrLR
.gpr
);
3027 if (i
->Min
.FpLdSt
.sz
== 4) {
3028 mapReg(m
, &i
->Min
.FpLdSt
.reg
);
3029 mapRegs_MIPSAMode(m
, i
->Min
.FpLdSt
.addr
);
3031 } else if (i
->Min
.FpLdSt
.sz
== 8) {
3032 mapReg(m
, &i
->Min
.FpLdSt
.reg
);
3033 mapRegs_MIPSAMode(m
, i
->Min
.FpLdSt
.addr
);
3038 mapReg(m
, &i
->Min
.FpUnary
.dst
);
3039 mapReg(m
, &i
->Min
.FpUnary
.src
);
3042 mapReg(m
, &i
->Min
.FpBinary
.dst
);
3043 mapReg(m
, &i
->Min
.FpBinary
.srcL
);
3044 mapReg(m
, &i
->Min
.FpBinary
.srcR
);
3047 mapReg(m
, &i
->Min
.FpTernary
.dst
);
3048 mapReg(m
, &i
->Min
.FpTernary
.src1
);
3049 mapReg(m
, &i
->Min
.FpTernary
.src2
);
3050 mapReg(m
, &i
->Min
.FpTernary
.src3
);
3053 mapReg(m
, &i
->Min
.FpConvert
.dst
);
3054 mapReg(m
, &i
->Min
.FpConvert
.src
);
3057 mapReg(m
, &i
->Min
.FpCompare
.dst
);
3058 mapReg(m
, &i
->Min
.FpCompare
.srcL
);
3059 mapReg(m
, &i
->Min
.FpCompare
.srcR
);
3062 mapReg(m
, &i
->Min
.FpMinMax
.dst
);
3063 mapReg(m
, &i
->Min
.FpMinMax
.srcL
);
3064 mapReg(m
, &i
->Min
.FpMinMax
.srcR
);
3067 mapReg(m
, &i
->Min
.MtFCSR
.src
);
3070 mapReg(m
, &i
->Min
.MfFCSR
.dst
);
3073 mapReg(m
, &i
->Min
.FpGpMove
.dst
);
3074 mapReg(m
, &i
->Min
.FpGpMove
.src
);
3077 mapReg(m
, &i
->Min
.MoveCond
.dst
);
3078 mapReg(m
, &i
->Min
.MoveCond
.src
);
3079 mapReg(m
, &i
->Min
.MoveCond
.cond
);
3082 /* We expect both amodes only to mention %ebp, so this is in
3083 fact pointless, since %ebp isn't allocatable, but anyway.. */
3084 mapRegs_MIPSAMode(m
, i
->Min
.EvCheck
.amCounter
);
3085 mapRegs_MIPSAMode(m
, i
->Min
.EvCheck
.amFailAddr
);
3088 /* does not use any registers. */
3091 ppMIPSInstr(i
, mode64
);
3092 vpanic("mapRegs_MIPSInstr");
3098 /* Generate mips spill/reload instructions under the direction of the
3099 register allocator. */
3100 void genSpill_MIPS( /*OUT*/ HInstr
** i1
, /*OUT*/ HInstr
** i2
, HReg rreg
,
3101 Int offsetB
, Bool mode64
)
3104 vassert(offsetB
>= 0);
3105 vassert(!hregIsVirtual(rreg
));
3107 am
= MIPSAMode_IR(offsetB
, GuestStatePointer(mode64
));
3109 switch (hregClass(rreg
)) {
3112 *i1
= MIPSInstr_Store(8, am
, rreg
, mode64
);
3116 *i1
= MIPSInstr_Store(4, am
, rreg
, mode64
);
3120 *i1
= MIPSInstr_FpLdSt(False
/*Store */ , 4, rreg
, am
);
3123 *i1
= MIPSInstr_FpLdSt(False
/*Store */ , 8, rreg
, am
);
3126 *i1
= MIPSInstr_MsaMi10(MSA_ST
, (offsetB
>>3),
3127 GuestStatePointer(mode64
), rreg
, MSA_D
);
3130 ppHRegClass(hregClass(rreg
));
3131 vpanic("genSpill_MIPS: unimplemented regclass");
3136 void genReload_MIPS( /*OUT*/ HInstr
** i1
, /*OUT*/ HInstr
** i2
, HReg rreg
,
3137 Int offsetB
, Bool mode64
)
3140 vassert(!hregIsVirtual(rreg
));
3141 am
= MIPSAMode_IR(offsetB
, GuestStatePointer(mode64
));
3143 switch (hregClass(rreg
)) {
3146 *i1
= MIPSInstr_Load(8, rreg
, am
, mode64
);
3150 *i1
= MIPSInstr_Load(4, rreg
, am
, mode64
);
3154 *i1
= MIPSInstr_FpLdSt(True
/*Load */ , 8, rreg
, am
);
3156 *i1
= MIPSInstr_FpLdSt(True
/*Load */ , 4, rreg
, am
);
3159 *i1
= MIPSInstr_FpLdSt(True
/*Load */ , 8, rreg
, am
);
3162 *i1
= MIPSInstr_MsaMi10(MSA_LD
, (offsetB
>>3),
3163 GuestStatePointer(mode64
), rreg
, MSA_D
);
3166 ppHRegClass(hregClass(rreg
));
3167 vpanic("genReload_MIPS: unimplemented regclass");
3172 MIPSInstr
* genMove_MIPS(HReg from
, HReg to
, Bool mode64
)
3174 switch (hregClass(from
)) {
3177 return MIPSInstr_Alu(Malu_OR
, to
, from
, MIPSRH_Reg(from
));
3179 ppHRegClass(hregClass(from
));
3180 vpanic("genMove_MIPS: unimplemented regclass");
3184 /* --------- The mips assembler --------- */
3186 inline static UInt
iregNo(HReg r
, Bool mode64
)
3189 vassert(hregClass(r
) == (mode64
? HRcInt64
: HRcInt32
));
3190 vassert(!hregIsVirtual(r
));
3191 n
= hregEncoding(r
);
3196 inline static UInt
fregNo(HReg r
, Bool mode64
)
3199 vassert(!hregIsVirtual(r
));
3200 n
= hregEncoding(r
);
3205 inline static UInt
dregNo(HReg r
)
3208 vassert(!hregIsVirtual(r
));
3209 n
= hregEncoding(r
);
3214 inline static UInt
qregEnc ( HReg r
)
3217 vassert(!hregIsVirtual(r
));
3218 n
= hregEncoding(r
);
3223 /* Emit 32bit instruction */
3224 static UChar
*emit32(UChar
* p
, UInt w32
)
3226 #if defined (_MIPSEL)
3227 *p
++ = toUChar(w32
& 0x000000FF);
3228 *p
++ = toUChar((w32
>> 8) & 0x000000FF);
3229 *p
++ = toUChar((w32
>> 16) & 0x000000FF);
3230 *p
++ = toUChar((w32
>> 24) & 0x000000FF);
3232 MIPS endianess is decided at compile time using gcc defined
3233 symbols _MIPSEL or _MIPSEB. When compiling libvex in a cross-arch
3234 setup, then none of these is defined. We just choose here by default
3235 mips Big Endian to allow libvexmultiarch_test to work when using
3236 a mips host architecture.
3237 A cleaner way would be to either have mips using 'dynamic endness'
3238 (like ppc64be or le, decided at runtime) or at least defining
3239 by default _MIPSEB when compiling on a non mips system.
3240 #elif defined (_MIPSEB).
3243 *p
++ = toUChar((w32
>> 24) & 0x000000FF);
3244 *p
++ = toUChar((w32
>> 16) & 0x000000FF);
3245 *p
++ = toUChar((w32
>> 8) & 0x000000FF);
3246 *p
++ = toUChar(w32
& 0x000000FF);
3250 /* Fetch an instruction */
3251 static UInt
fetch32 ( UChar
* p
)
3254 #if defined (_MIPSEL)
3255 w32
|= ((0xFF & (UInt
)p
[0]) << 0);
3256 w32
|= ((0xFF & (UInt
)p
[1]) << 8);
3257 w32
|= ((0xFF & (UInt
)p
[2]) << 16);
3258 w32
|= ((0xFF & (UInt
)p
[3]) << 24);
3259 #elif defined (_MIPSEB)
3260 w32
|= ((0xFF & (UInt
)p
[0]) << 24);
3261 w32
|= ((0xFF & (UInt
)p
[1]) << 16);
3262 w32
|= ((0xFF & (UInt
)p
[2]) << 8);
3263 w32
|= ((0xFF & (UInt
)p
[3]) << 0);
3268 /* physical structure of mips instructions */
3269 /* type I : opcode - 6 bits
3274 static UChar
*mkFormI(UChar
* p
, UInt opc
, UInt rs
, UInt rt
, UInt imm
)
3277 vassert(opc
< 0x40);
3281 theInstr
= ((opc
<< 26) | (rs
<< 21) | (rt
<< 16) | (imm
));
3282 return emit32(p
, theInstr
);
3285 /* type R: opcode - 6 bits
3292 static UChar
*mkFormR(UChar
* p
, UInt opc
, UInt rs
, UInt rt
, UInt rd
, UInt sa
,
3296 vex_printf("rs = %u\n", rs
);
3298 vassert(opc
< 0x40);
3303 func
= func
& 0xFFFF;
3304 theInstr
= ((opc
<< 26) | (rs
<< 21) | (rt
<< 16) | (rd
<< 11) | (sa
<< 6) |
3307 return emit32(p
, theInstr
);
3310 static UChar
*mkFormS(UChar
* p
, UInt opc1
, UInt rRD
, UInt rRS
, UInt rRT
,
3314 vassert(opc1
<= 0x3F);
3315 vassert(rRD
< 0x20);
3316 vassert(rRS
< 0x20);
3317 vassert(rRT
< 0x20);
3318 vassert(opc2
<= 0x3F);
3319 vassert(sa
<= 0x3F);
3321 theInstr
= ((opc1
<< 26) | (rRS
<< 21) | (rRT
<< 16) | (rRD
<< 11) |
3322 ((sa
& 0x1F) << 6) | (opc2
));
3324 return emit32(p
, theInstr
);
3327 static UChar
*mkFormMI10(UChar
* p
, UInt msa
, UInt s10
, UInt rRS
, UInt rWD
,
3328 UInt opc
, UInt rDF
) {
3330 vassert(rDF
< 0x04);
3331 vassert(opc
< 0x10);
3332 vassert(rWD
< 0x20);
3333 vassert(rRS
< 0x20);
3334 vassert(s10
< 0x400);
3335 vassert(msa
< 0x40);
3336 theInstr
= ((msa
<< 26) | (s10
<< 16) | (rRS
<< 11) | (rWD
<< 6) |
3337 ((opc
<< 2) | rDF
));
3338 return emit32(p
, theInstr
);
3341 static UChar
*mkFormELM(UChar
*p
, UInt msa
, UInt op
, UInt df
, UInt ws
, UInt wd
,
3344 vassert(msa
< 0x40);
3347 vassert(opc
< 0x40);
3348 theInstr
= ((msa
<< 26) | (op
<< 22) | (df
<< 16) | (ws
<< 11) |
3350 return emit32(p
, theInstr
);
3353 static UChar
*mkForm2R(UChar
*p
, UInt msa
, UInt op
, UInt df
, UInt ws
, UInt wd
,
3356 theInstr
= ((msa
<< 26) | (op
<< 18) | (df
<< 16) | (ws
<< 11) |
3358 return emit32(p
, theInstr
);
3361 static UChar
*mkForm3R(UChar
*p
, UInt op
, UInt df
, UInt wd
, UInt ws
, UInt wt
) {
3363 vassert(op
< 0x3800040);
3368 theInstr
= OPC_MSA
| op
| (df
<< 21) | (wt
<< 16) | (ws
<< 11) |
3370 return emit32(p
, theInstr
);
3373 static UChar
*mkFormVEC(UChar
*p
, UInt op
, UInt wt
, UInt ws
, UInt wd
) {
3379 theInstr
= OPC_MSA
| (op
<< 21) | (wt
<< 16) | (ws
<< 11) |
3381 return emit32(p
, theInstr
);
3384 static UChar
*mkFormBIT(UChar
*p
, UInt op
, UInt df
, UInt ms
, UInt ws
, UInt wd
) {
3387 vassert(op
< 0x3800040);
3389 vassert(ms
< 0x100);
3405 theInstr
= OPC_MSA
| op
| (dfm
<< 16) | (ws
<< 11) |
3407 return emit32(p
, theInstr
);
3410 static UChar
*mkForm3RF(UChar
*p
, UInt op
, UInt df
, UInt wd
, UInt ws
, UInt wt
) {
3412 vassert(op
< 0x3C0001D);
3417 theInstr
= OPC_MSA
| op
| (df
<< 21) | (wt
<< 16) | (ws
<< 11) |
3419 return emit32(p
, theInstr
);
3422 static UChar
*mkForm2RF(UChar
*p
, UInt op
, UInt df
, UInt ws
, UInt wd
,
3425 theInstr
= OPC_MSA
| (op
<< 17) | (df
<< 16) | (ws
<< 11) | (wd
<< 6) | opc
;
3426 return emit32(p
, theInstr
);
3429 static UChar
*doAMode_IR(UChar
* p
, UInt opc1
, UInt rSD
, MIPSAMode
* am
,
3432 UInt rA
, idx
, r_dst
;
3433 vassert(am
->tag
== Mam_IR
);
3434 vassert(am
->Mam
.IR
.index
< 0x10000);
3436 rA
= iregNo(am
->Mam
.IR
.base
, mode64
);
3437 idx
= am
->Mam
.IR
.index
;
3439 if (rSD
== 33 || rSD
== 34)
3448 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 16);
3451 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 18);
3454 p
= mkFormI(p
, opc1
, rA
, r_dst
, idx
);
3460 p
= mkFormR(p
, 0, r_dst
, 0, 0, 0, 17);
3463 p
= mkFormR(p
, 0, r_dst
, 0, 0, 0, 19);
3469 static UChar
*doAMode_RR(UChar
* p
, UInt opc1
, UInt rSD
, MIPSAMode
* am
,
3473 vassert(am
->tag
== Mam_RR
);
3475 rA
= iregNo(am
->Mam
.RR
.base
, mode64
);
3476 rB
= iregNo(am
->Mam
.RR
.index
, mode64
);
3478 if (rSD
== 33 || rSD
== 34)
3487 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 16);
3490 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 18);
3494 /* daddu rA, rA, rB$
3497 p
= mkFormR(p
, 0, rA
, rB
, rA
, 0, 45);
3498 p
= mkFormI(p
, opc1
, rA
, r_dst
, 0);
3499 p
= mkFormR(p
, 0, rA
, rB
, rA
, 0, 47);
3504 p
= mkFormR(p
, 0, rA
, rB
, rA
, 0, 33);
3505 p
= mkFormI(p
, opc1
, rA
, r_dst
, 0);
3506 p
= mkFormR(p
, 0, rA
, rB
, rA
, 0, 35);
3512 p
= mkFormR(p
, 0, r_dst
, 0, 0, 0, 17);
3515 p
= mkFormR(p
, 0, r_dst
, 0, 0, 0, 19);
3521 /* Load imm to r_dst */
3522 static UChar
*mkLoadImm(UChar
* p
, UInt r_dst
, ULong imm
, Bool mode64
)
3525 vassert(r_dst
< 0x20);
3526 UInt u32
= (UInt
) imm
;
3527 Int s32
= (Int
) u32
;
3528 Long s64
= (Long
) s32
;
3532 if (imm
>= 0xFFFFFFFFFFFF8000ULL
|| imm
< 0x8000) {
3533 /* sign-extendable from 16 bits
3534 addiu r_dst, 0, imm => li r_dst, imm */
3535 p
= mkFormI(p
, 9, 0, r_dst
, imm
& 0xFFFF);
3537 if (imm
>= 0xFFFFFFFF80000000ULL
|| imm
< 0x80000000ULL
) {
3538 /* sign-extendable from 32 bits
3539 addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
3540 lui r_dst, (imm >> 16) */
3541 p
= mkFormI(p
, 15, 0, r_dst
, (imm
>> 16) & 0xFFFF);
3542 /* ori r_dst, r_dst, (imm & 0xFFFF) */
3543 p
= mkFormI(p
, 13, r_dst
, r_dst
, imm
& 0xFFFF);
3546 /* lui load in upper half of low word */
3547 p
= mkFormI(p
, 15, 0, r_dst
, (imm
>> 48) & 0xFFFF);
3549 p
= mkFormI(p
, 13, r_dst
, r_dst
, (imm
>> 32) & 0xFFFF);
3551 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 16, 56);
3553 p
= mkFormI(p
, 13, r_dst
, r_dst
, (imm
>> 16) & 0xFFFF);
3555 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 16, 56);
3557 p
= mkFormI(p
, 13, r_dst
, r_dst
, imm
& 0xFFFF);
3563 /* A simplified version of mkLoadImm that always generates 2 or 6
3564 instructions (32 or 64 bits respectively) even if it could generate
3565 fewer. This is needed for generating fixed sized patchable
3567 static UChar
* mkLoadImm_EXACTLY2or6 ( UChar
* p
,
3568 UInt r_dst
, ULong imm
, Bool mode64
)
3570 vassert(r_dst
< 0x20);
3573 /* In 32-bit mode, make sure the top 32 bits of imm are a sign
3574 extension of the bottom 32 bits. (Probably unnecessary.) */
3575 UInt u32
= (UInt
)imm
;
3577 Long s64
= (Long
)s32
;
3582 /* sign-extendable from 32 bits
3583 addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
3584 lui r_dst, (imm >> 16) */
3585 p
= mkFormI(p
, 15, 0, r_dst
, (imm
>> 16) & 0xFFFF);
3586 /* ori r_dst, r_dst, (imm & 0xFFFF) */
3587 p
= mkFormI(p
, 13, r_dst
, r_dst
, imm
& 0xFFFF);
3589 /* full 64bit immediate load: 6 (six!) insns. */
3591 /* lui load in upper half of low word */
3592 p
= mkFormI(p
, 15, 0, r_dst
, (imm
>> 48) & 0xFFFF);
3594 p
= mkFormI(p
, 13, r_dst
, r_dst
, (imm
>> 32) & 0xFFFF);
3596 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 16, 56);
3598 p
= mkFormI(p
, 13, r_dst
, r_dst
, (imm
>> 16) & 0xFFFF);
3600 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 16, 56);
3602 p
= mkFormI(p
, 13, r_dst
, r_dst
, imm
& 0xFFFF);
3607 /* Checks whether the sequence of bytes at p was indeed created
3608 by mkLoadImm_EXACTLY2or6 with the given parameters. */
3609 static Bool
isLoadImm_EXACTLY2or6 ( UChar
* p_to_check
,
3610 UInt r_dst
, ULong imm
, Bool mode64
)
3612 vassert(r_dst
< 0x20);
3615 /* In 32-bit mode, make sure the top 32 bits of imm are a sign
3616 extension of the bottom 32 bits. (Probably unnecessary.) */
3617 UInt u32
= (UInt
)imm
;
3619 Long s64
= (Long
)s32
;
3624 UInt expect
[2] = { 0, 0 };
3625 UChar
* p
= (UChar
*)&expect
[0];
3626 /* lui r_dst, (immi >> 16) */
3627 p
= mkFormI(p
, 15, 0, r_dst
, (imm
>> 16) & 0xFFFF);
3628 /* ori r_dst, r_dst, (imm & 0xFFFF) */
3629 p
= mkFormI(p
, 13, r_dst
, r_dst
, imm
& 0xFFFF);
3630 vassert(p
== (UChar
*)&expect
[2]);
3632 ret
= fetch32(p_to_check
+ 0) == expect
[0]
3633 && fetch32(p_to_check
+ 4) == expect
[1];
3635 UInt expect
[6] = { 0, 0, 0, 0, 0, 0};
3636 UChar
* p
= (UChar
*)&expect
[0];
3637 /* lui load in upper half of low word */
3638 p
= mkFormI(p
, 15, 0, r_dst
, (imm
>> 48) & 0xFFFF);
3640 p
= mkFormI(p
, 13, r_dst
, r_dst
, (imm
>> 32) & 0xFFFF);
3642 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 16, 56);
3644 p
= mkFormI(p
, 13, r_dst
, r_dst
, (imm
>> 16) & 0xFFFF);
3646 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 16, 56);
3648 p
= mkFormI(p
, 13, r_dst
, r_dst
, imm
& 0xFFFF);
3649 vassert(p
== (UChar
*)&expect
[6]);
3651 ret
= fetch32(p_to_check
+ 0) == expect
[0]
3652 && fetch32(p_to_check
+ 4) == expect
[1]
3653 && fetch32(p_to_check
+ 8) == expect
[2]
3654 && fetch32(p_to_check
+ 12) == expect
[3]
3655 && fetch32(p_to_check
+ 16) == expect
[4]
3656 && fetch32(p_to_check
+ 20) == expect
[5];
3661 /* Generate a machine-word sized load or store. Simplified version of
3662 the Min_Load and Min_Store cases below.
3663 This will generate 32-bit load/store on MIPS32, and 64-bit load/store on
3666 static UChar
* do_load_or_store_machine_word ( UChar
* p
, Bool isLoad
, UInt reg
,
3667 MIPSAMode
* am
, Bool mode64
)
3669 if (isLoad
) { /* load */
3673 vassert(0 == (am
->Mam
.IR
.index
& 3));
3675 p
= doAMode_IR(p
, mode64
? 55 : 35, reg
, am
, mode64
);
3678 /* we could handle this case, but we don't expect to ever
3686 } else /* store */ {
3690 vassert(0 == (am
->Mam
.IR
.index
& 3));
3692 p
= doAMode_IR(p
, mode64
? 63 : 43, reg
, am
, mode64
);
3695 /* we could handle this case, but we don't expect to ever
3707 /* Generate a 32-bit sized load or store. Simplified version of
3708 do_load_or_store_machine_word above. */
3709 static UChar
* do_load_or_store_word32 ( UChar
* p
, Bool isLoad
, UInt reg
,
3710 MIPSAMode
* am
, Bool mode64
)
3712 if (isLoad
) { /* load */
3716 vassert(0 == (am
->Mam
.IR
.index
& 3));
3718 p
= doAMode_IR(p
, 35, reg
, am
, mode64
);
3721 /* we could handle this case, but we don't expect to ever
3729 } else /* store */ {
3733 vassert(0 == (am
->Mam
.IR
.index
& 3));
3735 p
= doAMode_IR(p
, 43, reg
, am
, mode64
);
3738 /* we could handle this case, but we don't expect to ever
3750 /* Move r_dst to r_src */
3751 static UChar
*mkMoveReg(UChar
* p
, UInt r_dst
, UInt r_src
)
3753 vassert(r_dst
< 0x20);
3754 vassert(r_src
< 0x20);
3756 if (r_dst
!= r_src
) {
3757 /* or r_dst, r_src, r_src */
3758 p
= mkFormR(p
, 0, r_src
, r_src
, r_dst
, 0, 37);
3763 /* Emit an instruction into buf and return the number of bytes used.
3764 Note that buf is not the insn's final place, and therefore it is
3765 imperative to emit position-independent code. If the emitted
3766 instruction was a profiler inc, set *is_profInc to True, else
3767 leave it unchanged. */
3768 Int
emit_MIPSInstr ( /*MB_MOD*/Bool
* is_profInc
,
3769 UChar
* buf
, Int nbuf
, const MIPSInstr
* i
,
3771 VexEndness endness_host
,
3772 const void* disp_cp_chain_me_to_slowEP
,
3773 const void* disp_cp_chain_me_to_fastEP
,
3774 const void* disp_cp_xindir
,
3775 const void* disp_cp_xassisted
)
3779 vassert(nbuf
>= 32);
3784 p
= mkLoadImm(p
, iregNo(i
->Min
.LI
.dst
, mode64
), i
->Min
.LI
.imm
, mode64
);
3788 MIPSRH
*srcR
= i
->Min
.Alu
.srcR
;
3789 Bool immR
= toBool(srcR
->tag
== Mrh_Imm
);
3790 UInt r_dst
= iregNo(i
->Min
.Alu
.dst
, mode64
);
3791 UInt r_srcL
= iregNo(i
->Min
.Alu
.srcL
, mode64
);
3792 UInt r_srcR
= immR
? (-1) /*bogus */ : iregNo(srcR
->Mrh
.Reg
.reg
,
3794 switch (i
->Min
.Alu
.op
) {
3795 /* Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, Malu_SLT */
3798 vassert(srcR
->Mrh
.Imm
.syned
);
3800 p
= mkFormI(p
, 9, r_srcL
, r_dst
, srcR
->Mrh
.Imm
.imm16
);
3803 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 33);
3808 /* addiu , but with negated imm */
3809 vassert(srcR
->Mrh
.Imm
.syned
);
3810 vassert(srcR
->Mrh
.Imm
.imm16
!= 0x8000);
3811 p
= mkFormI(p
, 9, r_srcL
, r_dst
, (-srcR
->Mrh
.Imm
.imm16
));
3814 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 35);
3820 vassert(!srcR
->Mrh
.Imm
.syned
);
3821 p
= mkFormI(p
, 12, r_srcL
, r_dst
, srcR
->Mrh
.Imm
.imm16
);
3824 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 36);
3830 vassert(!srcR
->Mrh
.Imm
.syned
);
3831 p
= mkFormI(p
, 13, r_srcL
, r_dst
, srcR
->Mrh
.Imm
.imm16
);
3836 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 16);
3837 else if (r_srcL
== 34)
3839 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 18);
3840 else if (r_dst
== 33)
3842 p
= mkFormR(p
, 0, r_srcL
, 0, 0, 0, 17);
3843 else if (r_dst
== 34)
3845 p
= mkFormR(p
, 0, r_srcL
, 0, 0, 0, 19);
3847 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 37);
3853 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 39);
3858 vassert(!srcR
->Mrh
.Imm
.syned
);
3859 p
= mkFormI(p
, 14, r_srcL
, r_dst
, srcR
->Mrh
.Imm
.imm16
);
3862 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 38);
3867 vassert(srcR
->Mrh
.Imm
.syned
);
3868 vassert(srcR
->Mrh
.Imm
.imm16
!= 0x8000);
3869 p
= mkFormI(p
, 25, r_srcL
, r_dst
, srcR
->Mrh
.Imm
.imm16
);
3871 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 45);
3876 p
= mkFormI(p
, 25, r_srcL
, r_dst
, (-srcR
->Mrh
.Imm
.imm16
));
3878 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 47);
3885 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 42);
3896 UInt v_reg
= qregEnc(i
->Min
.MsaMi10
.wd
);
3897 UInt r_reg
= iregNo(i
->Min
.MsaMi10
.rs
, mode64
);
3898 p
= mkFormMI10(p
, 0x1E, i
->Min
.MsaMi10
.s10
, r_reg
, v_reg
, i
->Min
.MsaMi10
.op
,
3906 switch (i
->Min
.MsaElm
.op
) {
3908 v_src
= iregNo(i
->Min
.MsaElm
.ws
, mode64
);
3909 v_dst
= qregEnc(i
->Min
.MsaElm
.wd
);
3914 v_src
= qregEnc(i
->Min
.MsaElm
.ws
);
3915 v_dst
= iregNo(i
->Min
.MsaElm
.wd
, mode64
);
3919 v_src
= iregNo(i
->Min
.MsaElm
.ws
, mode64
);
3925 v_dst
= iregNo(i
->Min
.MsaElm
.wd
, mode64
);
3929 v_src
= qregEnc(i
->Min
.MsaElm
.ws
);
3930 v_dst
= qregEnc(i
->Min
.MsaElm
.wd
);
3934 switch (i
->Min
.MsaElm
.op
) {
3938 p
= mkFormELM(p
, 0x1E, 0, i
->Min
.MsaElm
.op
, v_src
, v_dst
, 25);
3942 p
= mkFormELM(p
, 0x1E, i
->Min
.MsaElm
.op
, i
->Min
.MsaElm
.dfn
, v_src
, v_dst
, 25);
3952 switch (i
->Min
.Msa3R
.op
) {
3955 v_wt
= iregNo(i
->Min
.Msa3R
.wt
, mode64
);
3959 v_wt
= qregEnc(i
->Min
.Msa3R
.wt
);
3963 UInt v_ws
= qregEnc(i
->Min
.Msa3R
.ws
);
3964 UInt v_wd
= qregEnc(i
->Min
.Msa3R
.wd
);;
3965 p
= mkForm3R(p
, i
->Min
.Msa3R
.op
, i
->Min
.Msa3R
.df
, v_wd
, v_ws
, v_wt
);
3973 switch (i
->Min
.Msa2R
.op
) {
3975 v_src
= iregNo(i
->Min
.Msa2R
.ws
, mode64
);
3976 v_dst
= qregEnc(i
->Min
.Msa2R
.wd
);
3980 v_src
= qregEnc(i
->Min
.Msa2R
.ws
);
3981 v_dst
= qregEnc(i
->Min
.Msa2R
.wd
);
3985 p
= mkForm2R(p
, 0x1E, i
->Min
.Msa2R
.op
, i
->Min
.Msa2R
.df
, v_src
, v_dst
, 0x1E);
3990 UInt v_src
= qregEnc(i
->Min
.Msa2RF
.ws
);
3991 UInt v_dst
= qregEnc(i
->Min
.Msa2RF
.wd
);
3992 p
= mkForm2RF(p
, i
->Min
.Msa2RF
.op
, i
->Min
.Msa2RF
.df
, v_src
, v_dst
, 0x1E);
3997 UInt v_wt
= qregEnc(i
->Min
.MsaVec
.wt
);
3998 UInt v_ws
= qregEnc(i
->Min
.MsaVec
.ws
);
3999 UInt v_wd
= qregEnc(i
->Min
.MsaVec
.wd
);
4000 p
= mkFormVEC(p
, i
->Min
.MsaVec
.op
, v_wt
, v_ws
, v_wd
);
4005 UInt v_ws
= qregEnc(i
->Min
.MsaBit
.ws
);
4006 UInt v_wd
= qregEnc(i
->Min
.MsaBit
.wd
);
4007 p
= mkFormBIT(p
, i
->Min
.MsaBit
.op
, i
->Min
.Msa3R
.df
, i
->Min
.MsaBit
.ms
, v_ws
,
4013 UInt v_wt
= qregEnc(i
->Min
.Msa3RF
.wt
);
4014 UInt v_ws
= qregEnc(i
->Min
.Msa3RF
.ws
);
4015 UInt v_wd
= qregEnc(i
->Min
.Msa3RF
.wd
);;
4016 p
= mkForm3RF(p
, i
->Min
.Msa3RF
.op
, i
->Min
.Msa3RF
.df
, v_wd
, v_ws
, v_wt
);
4021 MIPSRH
*srcR
= i
->Min
.Shft
.srcR
;
4022 Bool sz32
= i
->Min
.Shft
.sz32
;
4023 Bool immR
= toBool(srcR
->tag
== Mrh_Imm
);
4024 UInt r_dst
= iregNo(i
->Min
.Shft
.dst
, mode64
);
4025 UInt r_srcL
= iregNo(i
->Min
.Shft
.srcL
, mode64
);
4026 UInt r_srcR
= immR
? (-1) /*bogus */ : iregNo(srcR
->Mrh
.Reg
.reg
,
4030 switch (i
->Min
.Shft
.op
) {
4034 UInt n
= srcR
->Mrh
.Imm
.imm16
;
4035 vassert(n
>= 0 && n
<= 32);
4036 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
, 0);
4038 /* shift variable */
4039 p
= mkFormS(p
, 0, r_dst
, r_srcR
, r_srcL
, 0, 4);
4043 UInt n
= srcR
->Mrh
.Imm
.imm16
;
4044 vassert((n
>= 0 && n
< 32) || (n
> 31 && n
< 64));
4045 if (n
>= 0 && n
< 32) {
4046 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
, 56);
4048 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
- 32, 60);
4051 p
= mkFormS(p
, 0, r_dst
, r_srcR
, r_srcL
, 0, 20);
4060 UInt n
= srcR
->Mrh
.Imm
.imm16
;
4061 vassert(n
>= 0 && n
< 32);
4062 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
, 2);
4064 /* shift variable */
4065 p
= mkFormS(p
, 0, r_dst
, r_srcR
, r_srcL
, 0, 6);
4068 /* DSRL, DSRL32, DSRLV */
4070 UInt n
= srcR
->Mrh
.Imm
.imm16
;
4071 vassert((n
>= 0 && n
< 32) || (n
> 31 && n
< 64));
4072 if (n
>= 0 && n
< 32) {
4073 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
, 58);
4075 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
- 32, 62);
4078 p
= mkFormS(p
, 0, r_dst
, r_srcR
, r_srcL
, 0, 22);
4087 UInt n
= srcR
->Mrh
.Imm
.imm16
;
4088 vassert(n
>= 0 && n
< 32);
4089 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
, 3);
4091 /* shift variable */
4092 p
= mkFormS(p
, 0, r_dst
, r_srcR
, r_srcL
, 0, 7);
4095 /* DSRA, DSRA32, DSRAV */
4097 UInt n
= srcR
->Mrh
.Imm
.imm16
;
4098 vassert((n
>= 0 && n
< 32) || (n
> 31 && n
< 64));
4099 if (n
>= 0 && n
< 32) {
4100 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
, 59);
4102 p
= mkFormS(p
, 0, r_dst
, 0, r_srcL
, n
- 32, 63);
4105 p
= mkFormS(p
, 0, r_dst
, r_srcR
, r_srcL
, 0, 23);
4118 UInt r_dst
= iregNo(i
->Min
.Rotx
.rd
, mode64
);
4119 UInt r_src
= iregNo(i
->Min
.Rotx
.rt
, mode64
);
4120 switch(i
->Min
.Rotx
.op
) {
4122 p
= mkFormR(p
, 31, 0, r_src
, r_dst
, 0, 32);
4125 p
= mkFormR(p
, 31, 0, r_src
, r_dst
, 0, 36);
4131 UInt r_dst
= iregNo(i
->Min
.Unary
.dst
, mode64
);
4132 UInt r_src
= iregNo(i
->Min
.Unary
.src
, mode64
);
4134 switch (i
->Min
.Unary
.op
) {
4135 /* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */
4136 #if (__mips_isa_rev >= 6)
4137 case Mun_CLO
: /* clo */
4138 p
= mkFormR(p
, 0, r_src
, 0, r_dst
, 1, 17);
4140 case Mun_CLZ
: /* clz */
4141 p
= mkFormR(p
, 0, r_src
, 0, r_dst
, 1, 16);
4143 case Mun_DCLO
: /* clo */
4144 p
= mkFormR(p
, 0, r_src
, 0, r_dst
, 1, 19);
4146 case Mun_DCLZ
: /* clz */
4147 p
= mkFormR(p
, 0, r_src
, 0, r_dst
, 1, 18);
4150 case Mun_CLO
: /* clo */
4151 p
= mkFormR(p
, 28, r_src
, r_dst
, r_dst
, 0, 33);
4153 case Mun_CLZ
: /* clz */
4154 p
= mkFormR(p
, 28, r_src
, r_dst
, r_dst
, 0, 32);
4156 case Mun_DCLO
: /* clo */
4157 p
= mkFormR(p
, 28, r_src
, r_dst
, r_dst
, 0, 37);
4159 case Mun_DCLZ
: /* clz */
4160 p
= mkFormR(p
, 28, r_src
, r_dst
, r_dst
, 0, 36);
4163 case Mun_NOP
: /* nop (sll r0,r0,0) */
4164 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0);
4171 UInt r_srcL
= iregNo(i
->Min
.Cmp
.srcL
, mode64
);
4172 UInt r_srcR
= iregNo(i
->Min
.Cmp
.srcR
, mode64
);
4173 UInt r_dst
= iregNo(i
->Min
.Cmp
.dst
, mode64
);
4175 switch (i
->Min
.Cmp
.cond
) {
4177 /* xor r_dst, r_srcL, r_srcR
4178 sltiu r_dst, r_dst, 1 */
4179 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 38);
4180 p
= mkFormI(p
, 11, r_dst
, r_dst
, 1);
4183 /* xor r_dst, r_srcL, r_srcR
4184 sltu r_dst, zero, r_dst */
4185 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 38);
4186 p
= mkFormR(p
, 0, 0, r_dst
, r_dst
, 0, 43);
4189 /* slt r_dst, r_srcL, r_srcR */
4190 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 42);
4193 /* sltu r_dst, r_srcL, r_srcR */
4194 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, 0, 43);
4197 /* slt r_dst, r_srcR, r_srcL
4198 xori r_dst, r_dst, 1 */
4199 p
= mkFormR(p
, 0, r_srcR
, r_srcL
, r_dst
, 0, 42);
4200 p
= mkFormI(p
, 14, r_dst
, r_dst
, 1);
4203 /* sltu r_dst, rsrcR, r_srcL
4204 xori r_dsr, r_dst, 1 */
4205 p
= mkFormR(p
, 0, r_srcR
, r_srcL
, r_dst
, 0, 43);
4206 p
= mkFormI(p
, 14, r_dst
, r_dst
, 1);
4215 UInt r_srcL
= iregNo(i
->Min
.Mul
.srcL
, mode64
);
4216 UInt r_srcR
= iregNo(i
->Min
.Mul
.srcR
, mode64
);
4217 UInt r_dst
= iregNo(i
->Min
.Mul
.dst
, mode64
);
4218 /* mul r_dst, r_srcL, r_srcR */
4219 p
= mkFormR(p
, 28, r_srcL
, r_srcR
, r_dst
, 0, 2);
4224 Bool syned
= i
->Min
.Mult
.syned
;
4225 UInt r_srcL
= iregNo(i
->Min
.Mult
.srcL
, mode64
);
4226 UInt r_srcR
= iregNo(i
->Min
.Mult
.srcR
, mode64
);
4229 /* dmult r_srcL, r_srcR */
4230 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 28);
4232 /* dmultu r_srcL, r_srcR */
4233 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 29);
4236 /* mult r_srcL, r_srcR */
4237 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 24);
4239 /* multu r_srcL, r_srcR */
4240 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 25);
4246 UInt r_src
= iregNo(i
->Min
.Ext
.src
, mode64
);
4247 UInt r_dst
= iregNo(i
->Min
.Ext
.dst
, mode64
);
4248 /* For now, only DEXT is implemented. */
4250 vassert(i
->Min
.Ext
.pos
< 32);
4251 vassert(i
->Min
.Ext
.size
> 0);
4252 vassert(i
->Min
.Ext
.size
<= 32);
4253 vassert(i
->Min
.Ext
.size
+ i
->Min
.Ext
.pos
> 0);
4254 vassert(i
->Min
.Ext
.size
+ i
->Min
.Ext
.pos
<= 63);
4255 /* DEXT r_dst, r_src, pos, size */
4256 p
= mkFormR(p
, 0x1F, r_src
, r_dst
,
4257 i
->Min
.Ext
.size
- 1, i
->Min
.Ext
.pos
, 3);
4262 Bool syned
= i
->Min
.Mulr6
.syned
;
4263 Bool sz32
= i
->Min
.Mulr6
.sz32
;
4264 UInt r_srcL
= iregNo(i
->Min
.Mulr6
.srcL
, mode64
);
4265 UInt r_srcR
= iregNo(i
->Min
.Mulr6
.srcR
, mode64
);
4266 UInt r_dst
= iregNo(i
->Min
.Mulr6
.dst
, mode64
);
4267 int low
= i
->Min
.Mulr6
.low
?2:3;
4271 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, low
, 24);
4274 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, low
, 25);
4276 if (syned
) /* DMUL/DMUH r_dst,r_srcL,r_srcR */
4277 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, low
, 28);
4278 else /* DMULU/DMUHU r_dst,r_srcL,r_srcR */
4279 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, low
, 29);
4285 Bool syned
= i
->Min
.Macc
.syned
;
4286 UInt r_srcL
= iregNo(i
->Min
.Macc
.srcL
, mode64
);
4287 UInt r_srcR
= iregNo(i
->Min
.Macc
.srcR
, mode64
);
4290 switch (i
->Min
.Macc
.op
) {
4293 p
= mkFormR(p
, 28, r_srcL
, r_srcR
, 0, 0, 0);
4297 p
= mkFormR(p
, 28, r_srcL
, r_srcR
, 0, 0,
4304 switch (i
->Min
.Macc
.op
) {
4307 p
= mkFormR(p
, 28, r_srcL
, r_srcR
, 0, 0,
4312 p
= mkFormR(p
, 28, r_srcL
, r_srcR
, 0, 0,
4324 Bool syned
= i
->Min
.Div
.syned
;
4325 Bool sz32
= i
->Min
.Div
.sz32
;
4326 UInt r_srcL
= iregNo(i
->Min
.Div
.srcL
, mode64
);
4327 UInt r_srcR
= iregNo(i
->Min
.Div
.srcR
, mode64
);
4331 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 26);
4334 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 27);
4339 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 30);
4342 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, 0, 0, 31);
4347 Bool syned
= i
->Min
.Divr6
.syned
;
4348 Bool sz32
= i
->Min
.Divr6
.sz32
;
4349 UInt r_srcL
= iregNo(i
->Min
.Divr6
.srcL
, mode64
);
4350 UInt r_srcR
= iregNo(i
->Min
.Divr6
.srcR
, mode64
);
4351 UInt r_dst
= iregNo(i
->Min
.Divr6
.dst
, mode64
);
4352 int mod
= i
->Min
.Divr6
.mod
?3:2;
4356 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, mod
, 26);
4359 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, mod
, 27);
4361 if (syned
) /* DMUL/DMUH r_dst,r_srcL,r_srcR */
4362 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, mod
, 30);
4363 else /* DMULU/DMUHU r_dst,r_srcL,r_srcR */
4364 p
= mkFormR(p
, 0, r_srcL
, r_srcR
, r_dst
, mod
, 31);
4369 UInt r_src
= iregNo(i
->Min
.MtHL
.src
, mode64
);
4370 p
= mkFormR(p
, 0, r_src
, 0, 0, 0, 17);
4375 UInt r_src
= iregNo(i
->Min
.MtHL
.src
, mode64
);
4376 p
= mkFormR(p
, 0, r_src
, 0, 0, 0, 19);
4381 UInt r_dst
= iregNo(i
->Min
.MfHL
.dst
, mode64
);
4382 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 16);
4387 UInt r_dst
= iregNo(i
->Min
.MfHL
.dst
, mode64
);
4388 p
= mkFormR(p
, 0, 0, 0, r_dst
, 0, 18);
4393 UInt r_src
= iregNo(i
->Min
.MtFCSR
.src
, mode64
);
4395 p
= mkFormR(p
, 17, 6, r_src
, 31, 0, 0);
4400 UInt r_dst
= iregNo(i
->Min
.MfFCSR
.dst
, mode64
);
4402 p
= mkFormR(p
, 17, 2, r_dst
, 31, 0, 0);
4407 if (i
->Min
.Call
.cond
!= MIPScc_AL
4408 && i
->Min
.Call
.rloc
.pri
!= RLPri_None
) {
4409 /* The call might not happen (it isn't unconditional) and
4410 it returns a result. In this case we will need to
4411 generate a control flow diamond to put 0x555..555 in
4412 the return register(s) in the case where the call
4413 doesn't happen. If this ever becomes necessary, maybe
4414 copy code from the ARM equivalent. Until that day,
4418 MIPSCondCode cond
= i
->Min
.Call
.cond
;
4419 UInt r_dst
= 25; /* using %r25 as address temporary -
4420 see getRegUsage_MIPSInstr */
4422 /* jump over the following insns if condition does not hold */
4423 if (cond
!= MIPScc_AL
) {
4424 /* jmp fwds if !condition */
4425 /* don't know how many bytes to jump over yet...
4426 make space for a jump instruction + nop!!! and fill in later. */
4427 ptmp
= p
; /* fill in this bit later */
4428 p
+= 8; /* p += 8 */
4432 /* addiu $29, $29, -16 */
4433 p
= mkFormI(p
, 9, 29, 29, 0xFFF0);
4436 /* load target to r_dst; p += 4|8 */
4437 p
= mkLoadImm(p
, r_dst
, i
->Min
.Call
.target
, mode64
);
4440 p
= mkFormR(p
, 0, r_dst
, 0, 31, 0, 9); /* p += 4 */
4441 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0); /* p += 4 */
4444 /* addiu $29, $29, 16 */
4445 p
= mkFormI(p
, 9, 29, 29, 0x0010);
4448 /* Fix up the conditional jump, if there was one. */
4449 if (cond
!= MIPScc_AL
) {
4450 UInt r_src
= iregNo(i
->Min
.Call
.src
, mode64
);
4451 Int delta
= p
- ptmp
;
4453 vassert(delta
>= 20 && delta
<= 32);
4454 /* blez r_src, delta/4-1
4456 ptmp
= mkFormI(ptmp
, 6, r_src
, 0, delta
/ 4 - 1);
4457 mkFormR(ptmp
, 0, 0, 0, 0, 0, 0);
4463 /* NB: what goes on here has to be very closely coordinated
4464 with the chainXDirect_MIPS and unchainXDirect_MIPS below. */
4465 /* We're generating chain-me requests here, so we need to be
4466 sure this is actually allowed -- no-redir translations
4467 can't use chain-me's. Hence: */
4468 vassert(disp_cp_chain_me_to_slowEP
!= NULL
);
4469 vassert(disp_cp_chain_me_to_fastEP
!= NULL
);
4471 /* Use ptmp for backpatching conditional jumps. */
4474 /* First off, if this is conditional, create a conditional
4475 jump over the rest of it. Or at least, leave a space for
4476 it that we will shortly fill in. */
4477 if (i
->Min
.XDirect
.cond
!= MIPScc_AL
) {
4478 vassert(i
->Min
.XDirect
.cond
!= MIPScc_NV
);
4483 /* Update the guest PC. */
4484 /* move r9, dstGA */
4485 /* sw/sd r9, amPC */
4486 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9, (ULong
)i
->Min
.XDirect
.dstGA
,
4488 p
= do_load_or_store_machine_word(p
, False
/*!isLoad*/ , /*r*/ 9,
4489 i
->Min
.XDirect
.amPC
, mode64
);
4491 /* --- FIRST PATCHABLE BYTE follows --- */
4492 /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
4493 calling to) backs up the return address, so as to find the
4494 address of the first patchable byte. So: don't change the
4495 number of instructions (3) below. */
4496 /* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */
4498 const void* disp_cp_chain_me
4499 = i
->Min
.XDirect
.toFastEP
? disp_cp_chain_me_to_fastEP
4500 : disp_cp_chain_me_to_slowEP
;
4501 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9,
4502 (Addr
)disp_cp_chain_me
, mode64
);
4505 p
= mkFormR(p
, 0, 9, 0, 31, 0, 9); /* p += 4 */
4506 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0); /* p += 4 */
4507 /* --- END of PATCHABLE BYTES --- */
4509 /* Fix up the conditional jump, if there was one. */
4510 if (i
->Min
.XDirect
.cond
!= MIPScc_AL
) {
4511 Int delta
= p
- ptmp
;
4512 delta
= delta
/ 4 - 3;
4513 vassert(delta
> 0 && delta
< 40);
4515 /* lw $9, COND_OFFSET(GuestSP)
4518 ptmp
= mkFormI(ptmp
, 35, GuestSP
, 9, COND_OFFSET(mode64
));
4519 ptmp
= mkFormI(ptmp
, 4, 0, 9, (delta
));
4520 mkFormR(ptmp
, 0, 0, 0, 0, 0, 0);
4526 /* We're generating transfers that could lead indirectly to a
4527 chain-me, so we need to be sure this is actually allowed --
4528 no-redir translations are not allowed to reach normal
4529 translations without going through the scheduler. That means
4530 no XDirects or XIndirs out from no-redir translations.
4532 vassert(disp_cp_xindir
!= NULL
);
4534 /* Use ptmp for backpatching conditional jumps. */
4537 /* First off, if this is conditional, create a conditional
4538 jump over the rest of it. */
4539 if (i
->Min
.XIndir
.cond
!= MIPScc_AL
) {
4540 vassert(i
->Min
.XIndir
.cond
!= MIPScc_NV
);
4545 /* Update the guest PC. */
4546 /* sw/sd r-dstGA, amPC */
4547 p
= do_load_or_store_machine_word(p
, False
/*!isLoad*/ ,
4548 iregNo(i
->Min
.XIndir
.dstGA
, mode64
),
4549 i
->Min
.XIndir
.amPC
, mode64
);
4551 /* move r9, VG_(disp_cp_xindir) */
4554 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9,
4555 (Addr
)disp_cp_xindir
, mode64
);
4556 p
= mkFormR(p
, 0, 9, 0, 31, 0, 9); /* p += 4 */
4557 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0); /* p += 4 */
4559 /* Fix up the conditional jump, if there was one. */
4560 if (i
->Min
.XIndir
.cond
!= MIPScc_AL
) {
4561 Int delta
= p
- ptmp
;
4562 delta
= delta
/ 4 - 3;
4563 vassert(delta
> 0 && delta
< 40);
4565 /* lw $9, COND_OFFSET($GuestSP)
4568 ptmp
= mkFormI(ptmp
, 35, GuestSP
, 9, COND_OFFSET(mode64
));
4569 ptmp
= mkFormI(ptmp
, 4, 0, 9, (delta
));
4570 mkFormR(ptmp
, 0, 0, 0, 0, 0, 0);
4575 case Min_XAssisted
: {
4576 /* First off, if this is conditional, create a conditional jump
4577 over the rest of it. Or at least, leave a space for it that
4578 we will shortly fill in. */
4580 if (i
->Min
.XAssisted
.cond
!= MIPScc_AL
) {
4581 vassert(i
->Min
.XAssisted
.cond
!= MIPScc_NV
);
4586 /* Update the guest PC. */
4587 /* sw/sd r-dstGA, amPC */
4588 p
= do_load_or_store_machine_word(p
, False
/*!isLoad*/ ,
4589 iregNo(i
->Min
.XIndir
.dstGA
, mode64
),
4590 i
->Min
.XIndir
.amPC
, mode64
);
4592 /* imm32/64 r31, $magic_number */
4594 switch (i
->Min
.XAssisted
.jk
) {
4595 case Ijk_ClientReq
: trcval
= VEX_TRC_JMP_CLIENTREQ
; break;
4596 case Ijk_Sys_syscall
: trcval
= VEX_TRC_JMP_SYS_SYSCALL
; break;
4597 /* case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break; */
4598 case Ijk_Yield
: trcval
= VEX_TRC_JMP_YIELD
; break;
4599 case Ijk_EmWarn
: trcval
= VEX_TRC_JMP_EMWARN
; break;
4600 case Ijk_EmFail
: trcval
= VEX_TRC_JMP_EMFAIL
; break;
4601 /* case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; */
4602 case Ijk_NoDecode
: trcval
= VEX_TRC_JMP_NODECODE
; break;
4603 case Ijk_InvalICache
: trcval
= VEX_TRC_JMP_INVALICACHE
; break;
4604 case Ijk_NoRedir
: trcval
= VEX_TRC_JMP_NOREDIR
; break;
4605 case Ijk_SigILL
: trcval
= VEX_TRC_JMP_SIGILL
; break;
4606 case Ijk_SigTRAP
: trcval
= VEX_TRC_JMP_SIGTRAP
; break;
4607 /* case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; */
4608 case Ijk_SigBUS
: trcval
= VEX_TRC_JMP_SIGBUS
; break;
4609 case Ijk_SigFPE_IntDiv
: trcval
= VEX_TRC_JMP_SIGFPE_INTDIV
; break;
4610 case Ijk_SigFPE_IntOvf
: trcval
= VEX_TRC_JMP_SIGFPE_INTOVF
; break;
4611 case Ijk_Boring
: trcval
= VEX_TRC_JMP_BORING
; break;
4612 /* We don't expect to see the following being assisted.
4617 ppIRJumpKind(i
->Min
.XAssisted
.jk
);
4618 vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind");
4620 vassert(trcval
!= 0);
4621 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ GuestSP
, trcval
, mode64
);
4623 /* move r9, VG_(disp_cp_xassisted) */
4624 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9,
4625 (ULong
)(Addr
)disp_cp_xassisted
, mode64
);
4628 p
= mkFormR(p
, 0, 9, 0, 31, 0, 9); /* p += 4 */
4629 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0); /* p += 4 */
4631 /* Fix up the conditional jump, if there was one. */
4632 if (i
->Min
.XAssisted
.cond
!= MIPScc_AL
) {
4633 Int delta
= p
- ptmp
;
4634 delta
= delta
/ 4 - 3;
4635 vassert(delta
> 0 && delta
< 40);
4637 /* lw $9, COND_OFFSET($GuestSP)
4640 ptmp
= mkFormI(ptmp
, 35, GuestSP
, 9, COND_OFFSET(mode64
));
4641 ptmp
= mkFormI(ptmp
, 4, 0, 9, (delta
));
4642 mkFormR(ptmp
, 0, 0, 0, 0, 0, 0);
4648 MIPSAMode
*am_addr
= i
->Min
.Load
.src
;
4649 if (am_addr
->tag
== Mam_IR
) {
4650 UInt r_dst
= iregNo(i
->Min
.Load
.dst
, mode64
);
4651 UInt opc
, sz
= i
->Min
.Load
.sz
;
4652 if (mode64
&& (sz
== 4 || sz
== 8)) {
4653 /* should be guaranteed to us by iselWordExpr_AMode */
4654 vassert(0 == (am_addr
->Mam
.IR
.index
& 3));
4674 p
= doAMode_IR(p
, opc
, r_dst
, am_addr
, mode64
);
4676 } else if (am_addr
->tag
== Mam_RR
) {
4677 UInt r_dst
= iregNo(i
->Min
.Load
.dst
, mode64
);
4678 UInt opc
, sz
= i
->Min
.Load
.sz
;
4698 p
= doAMode_RR(p
, opc
, r_dst
, am_addr
, mode64
);
4705 MIPSAMode
*am_addr
= i
->Min
.Store
.dst
;
4706 if (am_addr
->tag
== Mam_IR
) {
4707 UInt r_src
= iregNo(i
->Min
.Store
.src
, mode64
);
4708 UInt opc
, sz
= i
->Min
.Store
.sz
;
4709 if (mode64
&& (sz
== 4 || sz
== 8)) {
4710 /* should be guaranteed to us by iselWordExpr_AMode */
4711 vassert(0 == (am_addr
->Mam
.IR
.index
& 3));
4731 p
= doAMode_IR(p
, opc
, r_src
, am_addr
, mode64
);
4733 } else if (am_addr
->tag
== Mam_RR
) {
4734 UInt r_src
= iregNo(i
->Min
.Store
.src
, mode64
);
4735 UInt opc
, sz
= i
->Min
.Store
.sz
;
4755 p
= doAMode_RR(p
, opc
, r_src
, am_addr
, mode64
);
4761 MIPSAMode
*am_addr
= i
->Min
.LoadL
.src
;
4762 UInt r_src
= iregNo(am_addr
->Mam
.IR
.base
, mode64
);
4763 UInt idx
= am_addr
->Mam
.IR
.index
;
4764 UInt r_dst
= iregNo(i
->Min
.LoadL
.dst
, mode64
);
4765 #if (__mips_isa_rev >= 6)
4766 if (i
->Min
.LoadL
.sz
== 4)
4767 p
= mkFormI(p
, 0x1F, r_src
, r_dst
, ((idx
<< 7) & 0xff80) | 0x36);
4769 p
= mkFormI(p
, 0x1F, r_src
, r_dst
, ((idx
<< 7) & 0xff80) | 0x37);
4771 if (i
->Min
.LoadL
.sz
== 4)
4772 p
= mkFormI(p
, 0x30, r_src
, r_dst
, idx
);
4774 p
= mkFormI(p
, 0x34, r_src
, r_dst
, idx
);
4779 MIPSAMode
*am_addr
= i
->Min
.StoreC
.dst
;
4780 UInt r_src
= iregNo(i
->Min
.StoreC
.src
, mode64
);
4781 UInt idx
= am_addr
->Mam
.IR
.index
;
4782 UInt r_dst
= iregNo(am_addr
->Mam
.IR
.base
, mode64
);
4783 #if (__mips_isa_rev >= 6)
4784 if (i
->Min
.LoadL
.sz
== 4)
4785 p
= mkFormI(p
, 0x1F, r_src
, r_dst
, ((idx
<< 7) & 0xff80) | 0x26);
4787 p
= mkFormI(p
, 0x1F, r_src
, r_dst
, ((idx
<< 7) & 0xff80) | 0x27);
4789 if (i
->Min
.StoreC
.sz
== 4)
4790 p
= mkFormI(p
, 0x38, r_dst
, r_src
, idx
);
4792 p
= mkFormI(p
, 0x3C, r_dst
, r_src
, idx
);
4797 if (i
->Min
.Cas
.sz
!= 8 && i
->Min
.Cas
.sz
!= 4)
4799 UInt old
= iregNo(i
->Min
.Cas
.old
, mode64
);
4800 UInt addr
= iregNo(i
->Min
.Cas
.addr
, mode64
);
4801 UInt expd
= iregNo(i
->Min
.Cas
.expd
, mode64
);
4802 UInt data
= iregNo(i
->Min
.Cas
.data
, mode64
);
4803 Bool sz8
= toBool(i
->Min
.Cas
.sz
== 8);
4806 * ll(d) old, 0(addr)
4807 * bne old, expd, end
4809 * (d)addiu old, old, 1
4810 * sc(d) data, 0(addr)
4811 * movn old, expd, data
4814 #if (__mips_isa_rev >= 6)
4815 // ll(d) old, 0(addr)
4816 p
= mkFormI(p
, 0x1F, addr
, old
, sz8
? 0x37: 0x36);
4817 // bne old, expd, end
4818 p
= mkFormI(p
, 5, old
, expd
, 5);
4820 // ll(d) old, 0(addr)
4821 p
= mkFormI(p
, sz8
? 0x34 : 0x30, addr
, old
, 0);
4822 // bne old, expd, end
4823 p
= mkFormI(p
, 5, old
, expd
, 4);
4826 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0);
4827 // (d)addiu old, old, 1
4828 p
= mkFormI(p
, sz8
? 25 : 9, old
, old
, 4);
4830 #if (__mips_isa_rev >= 6)
4831 // sc(d) data, 0(addr)
4832 p
= mkFormI(p
, 0x1F, addr
, data
, sz8
? 0x27: 0x26);
4834 p
= mkFormI(p
, 0x36, data
, 0, 1);
4836 p
= mkFormR(p
, 0, 0, expd
, old
, 0, 0x25 );
4838 // sc(d) data, 0(addr)
4839 p
= mkFormI(p
, sz8
? 0x3C : 0x38, addr
, data
, 0);
4840 // movn old, expd, data
4841 p
= mkFormR(p
, 0, expd
, data
, old
, 0, 0xb);
4847 UInt reg
= iregNo(i
->Min
.RdWrLR
.gpr
, mode64
);
4848 Bool wrLR
= i
->Min
.RdWrLR
.wrLR
;
4850 p
= mkMoveReg(p
, 31, reg
);
4852 p
= mkMoveReg(p
, reg
, 31);
4856 /* Floating point */
4858 MIPSAMode
*am_addr
= i
->Min
.FpLdSt
.addr
;
4859 UChar sz
= i
->Min
.FpLdSt
.sz
;
4860 vassert(sz
== 4 || sz
== 8);
4862 UInt f_reg
= fregNo(i
->Min
.FpLdSt
.reg
, mode64
);
4863 if (i
->Min
.FpLdSt
.isLoad
) {
4864 if (am_addr
->tag
== Mam_IR
)
4865 p
= doAMode_IR(p
, 0x31, f_reg
, am_addr
, mode64
);
4866 else if (am_addr
->tag
== Mam_RR
)
4867 p
= doAMode_RR(p
, 0x31, f_reg
, am_addr
, mode64
);
4869 if (am_addr
->tag
== Mam_IR
)
4870 p
= doAMode_IR(p
, 0x39, f_reg
, am_addr
, mode64
);
4871 else if (am_addr
->tag
== Mam_RR
)
4872 p
= doAMode_RR(p
, 0x39, f_reg
, am_addr
, mode64
);
4874 } else if (sz
== 8) {
4875 UInt f_reg
= dregNo(i
->Min
.FpLdSt
.reg
);
4876 if (i
->Min
.FpLdSt
.isLoad
) {
4877 if (am_addr
->tag
== Mam_IR
) {
4878 p
= doAMode_IR(p
, 0x35, f_reg
, am_addr
, mode64
);
4879 } else if (am_addr
->tag
== Mam_RR
) {
4880 p
= doAMode_RR(p
, 0x35, f_reg
, am_addr
, mode64
);
4883 if (am_addr
->tag
== Mam_IR
) {
4884 p
= doAMode_IR(p
, 0x3d, f_reg
, am_addr
, mode64
);
4885 } else if (am_addr
->tag
== Mam_RR
) {
4886 p
= doAMode_RR(p
, 0x3d, f_reg
, am_addr
, mode64
);
4894 switch (i
->Min
.FpUnary
.op
) {
4895 case Mfp_MOVS
: { /* FP move */
4896 UInt fr_dst
= fregNo(i
->Min
.FpUnary
.dst
, mode64
);
4897 UInt fr_src
= fregNo(i
->Min
.FpUnary
.src
, mode64
);
4898 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x6);
4901 case Mfp_MOVD
: { /* FP move */
4902 UInt fr_dst
= dregNo(i
->Min
.FpUnary
.dst
);
4903 UInt fr_src
= dregNo(i
->Min
.FpUnary
.src
);
4904 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x6);
4907 case Mfp_ABSS
: { /* ABS.S */
4908 UInt fr_dst
= fregNo(i
->Min
.FpUnary
.dst
, mode64
);
4909 UInt fr_src
= fregNo(i
->Min
.FpUnary
.src
, mode64
);
4910 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x5);
4913 case Mfp_ABSD
: { /* ABS.D */
4914 UInt fr_dst
= dregNo(i
->Min
.FpUnary
.dst
);
4915 UInt fr_src
= dregNo(i
->Min
.FpUnary
.src
);
4916 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x5);
4919 case Mfp_NEGS
: { /* NEG.S */
4920 UInt fr_dst
= fregNo(i
->Min
.FpUnary
.dst
, mode64
);
4921 UInt fr_src
= fregNo(i
->Min
.FpUnary
.src
, mode64
);
4922 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x7);
4925 case Mfp_NEGD
: { /* NEG.D */
4926 UInt fr_dst
= dregNo(i
->Min
.FpUnary
.dst
);
4927 UInt fr_src
= dregNo(i
->Min
.FpUnary
.src
);
4928 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x7);
4931 case Mfp_SQRTS
: { /* SQRT.S */
4932 UInt fr_dst
= fregNo(i
->Min
.FpUnary
.dst
, mode64
);
4933 UInt fr_src
= fregNo(i
->Min
.FpUnary
.src
, mode64
);
4934 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x04);
4937 case Mfp_SQRTD
: { /* SQRT.D */
4938 UInt fr_dst
= dregNo(i
->Min
.FpUnary
.dst
);
4939 UInt fr_src
= dregNo(i
->Min
.FpUnary
.src
);
4940 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x04);
4949 case Min_FpBinary
: {
4950 switch (i
->Min
.FpBinary
.op
) {
4952 UInt fr_dst
= fregNo(i
->Min
.FpBinary
.dst
, mode64
);
4953 UInt fr_srcL
= fregNo(i
->Min
.FpBinary
.srcL
, mode64
);
4954 UInt fr_srcR
= fregNo(i
->Min
.FpBinary
.srcR
, mode64
);
4955 p
= mkFormR(p
, 0x11, 0x10, fr_srcR
, fr_srcL
, fr_dst
, 0);
4959 UInt fr_dst
= fregNo(i
->Min
.FpBinary
.dst
, mode64
);
4960 UInt fr_srcL
= fregNo(i
->Min
.FpBinary
.srcL
, mode64
);
4961 UInt fr_srcR
= fregNo(i
->Min
.FpBinary
.srcR
, mode64
);
4962 p
= mkFormR(p
, 0x11, 0x10, fr_srcR
, fr_srcL
, fr_dst
, 1);
4966 UInt fr_dst
= fregNo(i
->Min
.FpBinary
.dst
, mode64
);
4967 UInt fr_srcL
= fregNo(i
->Min
.FpBinary
.srcL
, mode64
);
4968 UInt fr_srcR
= fregNo(i
->Min
.FpBinary
.srcR
, mode64
);
4969 p
= mkFormR(p
, 0x11, 0x10, fr_srcR
, fr_srcL
, fr_dst
, 2);
4973 UInt fr_dst
= fregNo(i
->Min
.FpBinary
.dst
, mode64
);
4974 UInt fr_srcL
= fregNo(i
->Min
.FpBinary
.srcL
, mode64
);
4975 UInt fr_srcR
= fregNo(i
->Min
.FpBinary
.srcR
, mode64
);
4976 p
= mkFormR(p
, 0x11, 0x10, fr_srcR
, fr_srcL
, fr_dst
, 3);
4980 UInt fr_dst
= dregNo(i
->Min
.FpBinary
.dst
);
4981 UInt fr_srcL
= dregNo(i
->Min
.FpBinary
.srcL
);
4982 UInt fr_srcR
= dregNo(i
->Min
.FpBinary
.srcR
);
4983 p
= mkFormR(p
, 0x11, 0x11, fr_srcR
, fr_srcL
, fr_dst
, 0);
4987 UInt fr_dst
= dregNo(i
->Min
.FpBinary
.dst
);
4988 UInt fr_srcL
= dregNo(i
->Min
.FpBinary
.srcL
);
4989 UInt fr_srcR
= dregNo(i
->Min
.FpBinary
.srcR
);
4990 p
= mkFormR(p
, 0x11, 0x11, fr_srcR
, fr_srcL
, fr_dst
, 1);
4994 UInt fr_dst
= dregNo(i
->Min
.FpBinary
.dst
);
4995 UInt fr_srcL
= dregNo(i
->Min
.FpBinary
.srcL
);
4996 UInt fr_srcR
= dregNo(i
->Min
.FpBinary
.srcR
);
4997 p
= mkFormR(p
, 0x11, 0x11, fr_srcR
, fr_srcL
, fr_dst
, 2);
5001 UInt fr_dst
= dregNo(i
->Min
.FpBinary
.dst
);
5002 UInt fr_srcL
= dregNo(i
->Min
.FpBinary
.srcL
);
5003 UInt fr_srcR
= dregNo(i
->Min
.FpBinary
.srcR
);
5004 p
= mkFormR(p
, 0x11, 0x11, fr_srcR
, fr_srcL
, fr_dst
, 3);
5013 case Min_FpTernary
: {
5014 switch (i
->Min
.FpTernary
.op
) {
5016 UInt fr_dst
= fregNo(i
->Min
.FpTernary
.dst
, mode64
);
5017 UInt fr_src1
= fregNo(i
->Min
.FpTernary
.src1
, mode64
);
5018 UInt fr_src2
= fregNo(i
->Min
.FpTernary
.src2
, mode64
);
5019 UInt fr_src3
= fregNo(i
->Min
.FpTernary
.src3
, mode64
);
5020 #if (__mips_isa_rev >= 6)
5021 p
= mkFormR(p
, 0x11, 0x10 , 0x0, fr_src1
, fr_dst
, 0x6);
5022 p
= mkFormR(p
, 0x11, 0x10, fr_src3
, fr_src2
, fr_dst
, 0x18);
5024 p
= mkFormR(p
, 0x13, fr_src1
, fr_src2
, fr_src3
, fr_dst
, 0x20);
5029 UInt fr_dst
= dregNo(i
->Min
.FpTernary
.dst
);
5030 UInt fr_src1
= dregNo(i
->Min
.FpTernary
.src1
);
5031 UInt fr_src2
= dregNo(i
->Min
.FpTernary
.src2
);
5032 UInt fr_src3
= dregNo(i
->Min
.FpTernary
.src3
);
5033 #if (__mips_isa_rev >= 6)
5034 p
= mkFormR(p
, 0x11, 0x11 , 0x0, fr_src1
, fr_dst
, 0x6);
5035 p
= mkFormR(p
, 0x11, 0x11, fr_src3
, fr_src2
, fr_dst
, 0x18);
5037 p
= mkFormR(p
, 0x13, fr_src1
, fr_src2
, fr_src3
, fr_dst
, 0x21);
5042 UInt fr_dst
= fregNo(i
->Min
.FpTernary
.dst
, mode64
);
5043 UInt fr_src1
= fregNo(i
->Min
.FpTernary
.src1
, mode64
);
5044 UInt fr_src2
= fregNo(i
->Min
.FpTernary
.src2
, mode64
);
5045 UInt fr_src3
= fregNo(i
->Min
.FpTernary
.src3
, mode64
);
5046 #if (__mips_isa_rev >= 6)
5047 p
= mkFormR(p
, 0x11, 0x10 , 0x0, fr_src1
, fr_dst
, 0x6);
5048 p
= mkFormR(p
, 0x11, 0x10, fr_src3
, fr_src2
, fr_dst
, 0x19);
5050 p
= mkFormR(p
, 0x13, fr_src1
, fr_src2
, fr_src3
, fr_dst
, 0x28);
5055 UInt fr_dst
= dregNo(i
->Min
.FpTernary
.dst
);
5056 UInt fr_src1
= dregNo(i
->Min
.FpTernary
.src1
);
5057 UInt fr_src2
= dregNo(i
->Min
.FpTernary
.src2
);
5058 UInt fr_src3
= dregNo(i
->Min
.FpTernary
.src3
);
5059 #if (__mips_isa_rev >= 6)
5060 p
= mkFormR(p
, 0x11, 0x11 , 0x0, fr_src1
, fr_dst
, 0x6);
5061 p
= mkFormR(p
, 0x11, 0x11, fr_src3
, fr_src2
, fr_dst
, 0x19);
5063 p
= mkFormR(p
, 0x13, fr_src1
, fr_src2
, fr_src3
, fr_dst
, 0x29);
5073 case Min_FpConvert
: {
5074 switch (i
->Min
.FpConvert
.op
) {
5075 UInt fr_dst
, fr_src
;
5077 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5078 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5079 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x20);
5082 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5083 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5084 p
= mkFormR(p
, 0x11, 0x14, 0, fr_src
, fr_dst
, 0x20);
5087 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5088 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5089 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x24);
5092 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5093 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5094 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x24);
5097 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5098 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5099 p
= mkFormR(p
, 0x11, 0x14, 0, fr_src
, fr_dst
, 0x21);
5102 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5103 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5104 p
= mkFormR(p
, 0x11, 0x15, 0, fr_src
, fr_dst
, 0x21);
5107 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5108 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5109 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x21);
5112 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5113 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5114 p
= mkFormR(p
, 0x11, 0x15, 0, fr_src
, fr_dst
, 0x20);
5118 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5119 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5121 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5122 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5124 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x25);
5127 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5128 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5129 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x25);
5132 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5133 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5134 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x0D);
5137 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5138 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5139 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x0D);
5142 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5143 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5144 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x09);
5147 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5148 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5149 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x09);
5152 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5153 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5154 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x0E);
5157 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5158 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5159 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x0E);
5162 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5163 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5164 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x0A);
5167 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5168 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5169 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x0A);
5172 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5173 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5174 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x0C);
5177 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5178 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5179 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x0C);
5182 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5183 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5184 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x08);
5187 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5188 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5189 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x0F);
5192 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5193 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5194 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x0F);
5197 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5198 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5199 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x0B);
5202 fr_dst
= fregNo(i
->Min
.FpConvert
.dst
, mode64
);
5203 fr_src
= fregNo(i
->Min
.FpConvert
.src
, mode64
);
5204 p
= mkFormR(p
, 0x11, 0x10, 0, fr_src
, fr_dst
, 0x1A);
5207 fr_dst
= dregNo(i
->Min
.FpConvert
.dst
);
5208 fr_src
= dregNo(i
->Min
.FpConvert
.src
);
5209 p
= mkFormR(p
, 0x11, 0x11, 0, fr_src
, fr_dst
, 0x1A);
5218 case Min_FpCompare
: {
5219 #if (__mips_isa_rev >= 6)
5226 switch (i
->Min
.FpConvert
.op
) {
5228 fr_dst
= dregNo(i
->Min
.FpCompare
.dst
);
5229 fr_srcL
= dregNo(i
->Min
.FpCompare
.srcL
);
5230 fr_srcR
= dregNo(i
->Min
.FpCompare
.srcR
);
5235 fr_dst
= dregNo(i
->Min
.FpCompare
.dst
);
5236 fr_srcL
= dregNo(i
->Min
.FpCompare
.srcL
);
5237 fr_srcR
= dregNo(i
->Min
.FpCompare
.srcR
);
5242 fr_dst
= dregNo(i
->Min
.FpCompare
.dst
);
5243 fr_srcL
= dregNo(i
->Min
.FpCompare
.srcL
);
5244 fr_srcR
= dregNo(i
->Min
.FpCompare
.srcR
);
5249 fr_dst
= dregNo(i
->Min
.FpCompare
.dst
);
5250 fr_srcL
= dregNo(i
->Min
.FpCompare
.srcL
);
5251 fr_srcR
= dregNo(i
->Min
.FpCompare
.srcR
);
5256 fr_dst
= fregNo(i
->Min
.FpCompare
.dst
, mode64
);
5257 fr_srcL
= fregNo(i
->Min
.FpCompare
.srcL
, mode64
);
5258 fr_srcR
= fregNo(i
->Min
.FpCompare
.srcR
, mode64
);
5263 fr_dst
= fregNo(i
->Min
.FpCompare
.dst
, mode64
);
5264 fr_srcL
= fregNo(i
->Min
.FpCompare
.srcL
, mode64
);
5265 fr_srcR
= fregNo(i
->Min
.FpCompare
.srcR
, mode64
);
5270 fr_dst
= fregNo(i
->Min
.FpCompare
.dst
, mode64
);
5271 fr_srcL
= fregNo(i
->Min
.FpCompare
.srcL
, mode64
);
5272 fr_srcR
= fregNo(i
->Min
.FpCompare
.srcR
, mode64
);
5277 fr_dst
= fregNo(i
->Min
.FpCompare
.dst
, mode64
);
5278 fr_srcL
= fregNo(i
->Min
.FpCompare
.srcL
, mode64
);
5279 fr_srcR
= fregNo(i
->Min
.FpCompare
.srcR
, mode64
);
5286 /* cmp.cond.d fr_srcL, fr_srcR */
5287 p
= mkFormR(p
, 0x11, format
, fr_srcR
, fr_srcL
, fr_dst
, op
);
5289 UInt r_dst
= iregNo(i
->Min
.FpCompare
.dst
, mode64
);
5290 UInt fr_srcL
= dregNo(i
->Min
.FpCompare
.srcL
);
5291 UInt fr_srcR
= dregNo(i
->Min
.FpCompare
.srcR
);
5294 switch (i
->Min
.FpConvert
.op
) {
5310 /* c.cond.d fr_srcL, fr_srcR
5312 srl r_dst, r_dst, 23
5313 andi r_dst, r_dst, 1 */
5314 p
= mkFormR(p
, 0x11, 0x11, fr_srcL
, fr_srcR
, 0, op
+ 48);
5315 p
= mkFormR(p
, 0x11, 0x2, r_dst
, 31, 0, 0);
5316 p
= mkFormS(p
, 0, r_dst
, 0, r_dst
, 23, 2);
5317 p
= mkFormI(p
, 12, r_dst
, r_dst
, 1);
5322 #if (__mips_isa_rev >= 6)
5323 case Min_FpMinMax
: {
5324 UInt r_dst
= dregNo(i
->Min
.FpCompare
.dst
);
5325 UInt fr_srcL
= dregNo(i
->Min
.FpCompare
.srcL
);
5326 UInt fr_srcR
= dregNo(i
->Min
.FpCompare
.srcR
);
5329 switch (i
->Min
.FpMinMax
.op
) {
5349 p
= mkFormR(p
, 0x11, format
, fr_srcR
, fr_srcL
, r_dst
, instr
);
5355 case Min_FpGpMove
: {
5356 switch (i
->Min
.FpGpMove
.op
) {
5358 case MFpGpMove_mfc1
: {
5359 rt
= iregNo(i
->Min
.FpGpMove
.dst
, mode64
);
5360 fs
= fregNo(i
->Min
.FpGpMove
.src
, mode64
);
5361 p
= mkFormR(p
, 0x11, 0x0, rt
, fs
, 0x0, 0x0);
5364 case MFpGpMove_dmfc1
: {
5366 rt
= iregNo(i
->Min
.FpGpMove
.dst
, mode64
);
5367 fs
= fregNo(i
->Min
.FpGpMove
.src
, mode64
);
5368 p
= mkFormR(p
, 0x11, 0x1, rt
, fs
, 0x0, 0x0);
5371 case MFpGpMove_mtc1
: {
5372 rt
= iregNo(i
->Min
.FpGpMove
.src
, mode64
);
5373 fs
= fregNo(i
->Min
.FpGpMove
.dst
, mode64
);
5374 p
= mkFormR(p
, 0x11, 0x4, rt
, fs
, 0x0, 0x0);
5377 case MFpGpMove_dmtc1
: {
5379 rt
= iregNo(i
->Min
.FpGpMove
.src
, mode64
);
5380 fs
= fregNo(i
->Min
.FpGpMove
.dst
, mode64
);
5381 p
= mkFormR(p
, 0x11, 0x5, rt
, fs
, 0x0, 0x0);
5390 case Min_MoveCond
: {
5391 switch (i
->Min
.MoveCond
.op
) {
5393 case MFpMoveCond_movns
: {
5394 d
= fregNo(i
->Min
.MoveCond
.dst
, mode64
);
5395 s
= fregNo(i
->Min
.MoveCond
.src
, mode64
);
5396 t
= iregNo(i
->Min
.MoveCond
.cond
, mode64
);
5397 p
= mkFormR(p
, 0x11, 0x10, t
, s
, d
, 0x13);
5400 case MFpMoveCond_movnd
: {
5401 d
= dregNo(i
->Min
.MoveCond
.dst
);
5402 s
= dregNo(i
->Min
.MoveCond
.src
);
5403 t
= iregNo(i
->Min
.MoveCond
.cond
, mode64
);
5404 p
= mkFormR(p
, 0x11, 0x11, t
, s
, d
, 0x13);
5407 case MMoveCond_movn
: {
5408 d
= iregNo(i
->Min
.MoveCond
.dst
, mode64
);
5409 s
= iregNo(i
->Min
.MoveCond
.src
, mode64
);
5410 t
= iregNo(i
->Min
.MoveCond
.cond
, mode64
);
5411 p
= mkFormR(p
, 0, s
, t
, d
, 0, 0xb);
5416 d
= iregNo(i
->Min
.MoveCond
.dst
, mode64
);
5417 s
= iregNo(i
->Min
.MoveCond
.src
, mode64
);
5418 t
= iregNo(i
->Min
.MoveCond
.cond
, mode64
);
5419 p
= mkFormR(p
, 0, s
, t
, d
, 0, 0x35);
5423 d
= iregNo(i
->Min
.MoveCond
.dst
, mode64
);
5424 s
= iregNo(i
->Min
.MoveCond
.src
, mode64
);
5425 t
= iregNo(i
->Min
.MoveCond
.cond
, mode64
);
5426 p
= mkFormR(p
, 0, s
, t
, d
, 0, 0x37);
5430 d
= fregNo(i
->Min
.MoveCond
.dst
, mode64
);
5431 s
= fregNo(i
->Min
.MoveCond
.src
, mode64
);
5432 t
= fregNo(i
->Min
.MoveCond
.cond
, mode64
);
5433 p
= mkFormR(p
, 0x11, 0x10, t
, s
, d
, 0x10);
5437 d
= fregNo(i
->Min
.MoveCond
.dst
, mode64
);
5438 s
= fregNo(i
->Min
.MoveCond
.src
, mode64
);
5439 t
= fregNo(i
->Min
.MoveCond
.cond
, mode64
);
5440 p
= mkFormR(p
, 0x11, 0x11, t
, s
, d
, 0x10);
5450 /* This requires a 32-bit dec/test in 32 mode. */
5462 /* lw r9, amCounter */
5463 p
= do_load_or_store_word32(p
, True
/*isLoad*/ , /*r*/ 9,
5464 i
->Min
.EvCheck
.amCounter
, mode64
);
5465 /* addiu r9,r9,-1 */
5466 p
= mkFormI(p
, 9, 9, 9, 0xFFFF);
5467 /* sw r30, amCounter */
5468 p
= do_load_or_store_word32(p
, False
/*!isLoad*/ , /*r*/ 9,
5469 i
->Min
.EvCheck
.amCounter
, mode64
);
5470 /* bgez t9, nofail */
5471 p
= mkFormI(p
, 1, 9, 1, 3);
5472 /* lw/ld r9, amFailAddr */
5473 p
= do_load_or_store_machine_word(p
, True
/*isLoad*/ , /*r*/ 9,
5474 i
->Min
.EvCheck
.amFailAddr
, mode64
);
5476 p
= mkFormR(p
, 0, 9, 0, 31, 0, 9); /* p += 4 */
5477 p
= mkFormR(p
, 0, 0, 0, 0, 0, 0); /* p += 4 */
5481 vassert(evCheckSzB_MIPS() == (UChar
*)p
- (UChar
*)p0
);
5486 /* Generate a code template to increment a memory location whose
5487 address will be known later as an immediate value. This code
5488 template will be patched once the memory location is known.
5489 For now we do this with address == 0x65556555. */
5492 move r9, 0x6555655565556555ULL
5497 /* move r9, 0x6555655565556555ULL */
5498 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9, 0x6555655565556555ULL
,
5501 p
= mkFormI(p
, 55, 9, 8, 0);
5503 /* daddiu r8, r8, 1 */
5504 p
= mkFormI(p
, 25, 8, 8, 1);
5507 p
= mkFormI(p
, 63, 9, 8, 0);
5512 addiu r8, r8, 1 # add least significant word
5514 sltiu r1, r8, 1 # set carry-in bit
5519 /* move r9, 0x65556555 */
5520 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9, 0x65556555ULL
,
5523 p
= mkFormI(p
, 35, 9, 8, 0);
5525 /* addiu r8, r8, 1 # add least significant word */
5526 p
= mkFormI(p
, 9, 8, 8, 1);
5529 p
= mkFormI(p
, 43, 9, 8, 0);
5531 /* sltiu r1, r8, 1 # set carry-in bit */
5532 p
= mkFormI(p
, 11, 8, 1, 1);
5535 p
= mkFormI(p
, 35, 9, 8, 4);
5537 /* addu r8, r8, r1 */
5538 p
= mkFormR(p
, 0, 8, 1, 8, 0, 33);
5541 p
= mkFormI(p
, 43, 9, 8, 4);
5544 /* Tell the caller .. */
5545 vassert(!(*is_profInc
));
5556 vex_printf("\n=> ");
5557 ppMIPSInstr(i
, mode64
);
5558 vpanic("emit_MIPSInstr");
5559 /* NOTREACHED */ done
:
5560 vassert(p
- &buf
[0] <= 128);
5564 /* How big is an event check? See case for Min_EvCheck in
5565 emit_MIPSInstr just above. That crosschecks what this returns, so
5566 we can tell if we're inconsistent. */
5567 Int
evCheckSzB_MIPS (void)
5569 UInt kInstrSize
= 4;
5570 return 7*kInstrSize
;
5573 /* NB: what goes on here has to be very closely coordinated with the
5574 emitInstr case for XDirect, above. */
5575 VexInvalRange
chainXDirect_MIPS ( VexEndness endness_host
,
5576 void* place_to_chain
,
5577 const void* disp_cp_chain_me_EXPECTED
,
5578 const void* place_to_jump_to
,
5581 vassert(endness_host
== VexEndnessLE
|| endness_host
== VexEndnessBE
);
5582 /* What we're expecting to see is:
5583 move r9, disp_cp_chain_me_to_EXPECTED
5587 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
5591 UChar
* p
= (UChar
*)place_to_chain
;
5592 vassert(0 == (3 & (HWord
)p
));
5593 vassert(isLoadImm_EXACTLY2or6(p
, /*r*/9,
5594 (UInt
)(Addr
)disp_cp_chain_me_EXPECTED
,
5596 vassert(fetch32(p
+ (mode64
? 24 : 8) + 0) == 0x120F809);
5597 vassert(fetch32(p
+ (mode64
? 24 : 8) + 4) == 0x00000000);
5598 /* And what we want to change it to is either:
5599 move r9, place_to_jump_to
5603 <8 bytes generated by mkLoadImm_EXACTLY2or6>
5607 The replacement has the same length as the original.
5610 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/9,
5611 (Addr
)place_to_jump_to
, mode64
);
5612 p
= emit32(p
, 0x120F809);
5613 p
= emit32(p
, 0x00000000);
5615 Int len
= p
- (UChar
*)place_to_chain
;
5616 vassert(len
== (mode64
? 32 : 16)); /* stay sane */
5617 VexInvalRange vir
= {(HWord
)place_to_chain
, len
};
5621 /* NB: what goes on here has to be very closely coordinated with the
5622 emitInstr case for XDirect, above. */
5623 VexInvalRange
unchainXDirect_MIPS ( VexEndness endness_host
,
5624 void* place_to_unchain
,
5625 const void* place_to_jump_to_EXPECTED
,
5626 const void* disp_cp_chain_me
,
5629 vassert(endness_host
== VexEndnessLE
|| endness_host
== VexEndnessBE
);
5630 /* What we're expecting to see is:
5631 move r9, place_to_jump_to_EXPECTED
5635 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
5639 UChar
* p
= (UChar
*)place_to_unchain
;
5640 vassert(0 == (3 & (HWord
)p
));
5641 vassert(isLoadImm_EXACTLY2or6(p
, /*r*/ 9,
5642 (Addr
)place_to_jump_to_EXPECTED
,
5644 vassert(fetch32(p
+ (mode64
? 24 : 8) + 0) == 0x120F809);
5645 vassert(fetch32(p
+ (mode64
? 24 : 8) + 4) == 0x00000000);
5646 /* And what we want to change it to is:
5647 move r9, disp_cp_chain_me
5651 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
5654 The replacement has the same length as the original.
5656 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/ 9,
5657 (Addr
)disp_cp_chain_me
, mode64
);
5658 p
= emit32(p
, 0x120F809);
5659 p
= emit32(p
, 0x00000000);
5661 Int len
= p
- (UChar
*)place_to_unchain
;
5662 vassert(len
== (mode64
? 32 : 16)); /* stay sane */
5663 VexInvalRange vir
= {(HWord
)place_to_unchain
, len
};
5667 /* Patch the counter address into a profile inc point, as previously
5668 created by the Min_ProfInc case for emit_MIPSInstr. */
5669 VexInvalRange
patchProfInc_MIPS ( VexEndness endness_host
,
5670 void* place_to_patch
,
5671 const ULong
* location_of_counter
,
5674 vassert(endness_host
== VexEndnessLE
|| endness_host
== VexEndnessBE
);
5676 vassert(sizeof(ULong
*) == 8);
5678 vassert(sizeof(ULong
*) == 4);
5680 UChar
* p
= (UChar
*)place_to_patch
;
5681 vassert(0 == (3 & (HWord
)p
));
5682 vassert(isLoadImm_EXACTLY2or6((UChar
*)p
, /*r*/9,
5683 mode64
? 0x6555655565556555ULL
: 0x65556555,
5687 vassert(fetch32(p
+ 24 + 0) == 0xDD280000);
5688 vassert(fetch32(p
+ 24 + 4) == 0x65080001);
5689 vassert(fetch32(p
+ 24 + 8) == 0xFD280000);
5691 vassert(fetch32(p
+ 8 + 0) == 0x8D280000);
5692 vassert(fetch32(p
+ 8 + 4) == 0x25080001);
5693 vassert(fetch32(p
+ 8 + 8) == 0xAD280000);
5694 vassert(fetch32(p
+ 8 + 12) == 0x2d010001);
5695 vassert(fetch32(p
+ 8 + 16) == 0x8d280004);
5696 vassert(fetch32(p
+ 8 + 20) == 0x01014021);
5697 vassert(fetch32(p
+ 8 + 24) == 0xad280004);
5700 p
= mkLoadImm_EXACTLY2or6(p
, /*r*/9,
5701 (Addr
)location_of_counter
, mode64
);
5703 VexInvalRange vir
= {(HWord
)p
, 8};
5708 /*---------------------------------------------------------------*/
5709 /*--- end host_mips_defs.c ---*/
5710 /*---------------------------------------------------------------*/