regtest: add check for -Wl,--no-warn-execstack
[valgrind.git] / VEX / priv / host_mips_defs.c
blob7641ccf62eed379aaea4fc6c37d63fea1bb854b0
2 /*---------------------------------------------------------------*/
3 /*--- begin host_mips_defs.c ---*/
4 /*---------------------------------------------------------------*/
6 /*
7 This file is part of Valgrind, a dynamic binary instrumentation
8 framework.
10 Copyright (C) 2010-2017 RT-RK
12 This program is free software; you can redistribute it and/or
13 modify it under the terms of the GNU General Public License as
14 published by the Free Software Foundation; either version 2 of the
15 License, or (at your option) any later version.
17 This program is distributed in the hope that it will be useful, but
18 WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
20 General Public License for more details.
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, see <http://www.gnu.org/licenses/>.
25 The GNU General Public License is contained in the file COPYING.
28 #include "libvex_basictypes.h"
29 #include "libvex.h"
30 #include "libvex_trc_values.h"
32 #include "main_util.h"
33 #include "host_generic_regs.h"
34 #include "host_mips_defs.h"
36 /* Register number for guest state pointer in host code. */
37 #define GuestSP 23
40 /*---------------- Registers ----------------*/
42 const RRegUniverse* getRRegUniverse_MIPS ( Bool mode64 )
44 /* The real-register universe is a big constant, so we just want to
45 initialise it once. rRegUniverse_MIPS_initted values: 0=not initted,
46 1=initted for 32-bit-mode, 2=initted for 64-bit-mode */
47 static RRegUniverse rRegUniverse_MIPS;
48 static UInt rRegUniverse_MIPS_initted = 0;
50 /* Handy shorthand, nothing more */
51 RRegUniverse* ru = &rRegUniverse_MIPS;
53 /* This isn't thread-safe. Sigh. */
54 UInt howNeeded = mode64 ? 2 : 1;
55 if (LIKELY(rRegUniverse_MIPS_initted == howNeeded))
56 return ru;
58 RRegUniverse__init(ru);
60 /* Add the registers. The initial segment of this array must be
61 those available for allocation by reg-alloc, and those that
62 follow are not available for allocation. */
63 ru->allocable_start[(mode64) ? HRcInt64 : HRcInt32] = ru->size;
64 ru->regs[ru->size++] = hregMIPS_GPR16(mode64);
65 ru->regs[ru->size++] = hregMIPS_GPR17(mode64);
66 ru->regs[ru->size++] = hregMIPS_GPR18(mode64);
67 ru->regs[ru->size++] = hregMIPS_GPR19(mode64);
68 ru->regs[ru->size++] = hregMIPS_GPR20(mode64);
69 ru->regs[ru->size++] = hregMIPS_GPR21(mode64);
70 ru->regs[ru->size++] = hregMIPS_GPR22(mode64);
72 ru->regs[ru->size++] = hregMIPS_GPR12(mode64);
73 ru->regs[ru->size++] = hregMIPS_GPR13(mode64);
74 ru->regs[ru->size++] = hregMIPS_GPR14(mode64);
75 ru->regs[ru->size++] = hregMIPS_GPR15(mode64);
76 ru->regs[ru->size++] = hregMIPS_GPR24(mode64);
77 ru->allocable_end[(mode64) ? HRcInt64 : HRcInt32] = ru->size - 1;
79 /* s7 (=guest_state) */
80 ru->allocable_start[(mode64) ? HRcFlt64 : HRcFlt32] = ru->size;
81 ru->regs[ru->size++] = hregMIPS_F16(mode64);
82 ru->regs[ru->size++] = hregMIPS_F18(mode64);
83 ru->regs[ru->size++] = hregMIPS_F20(mode64);
84 ru->regs[ru->size++] = hregMIPS_F22(mode64);
85 ru->regs[ru->size++] = hregMIPS_F24(mode64);
86 ru->regs[ru->size++] = hregMIPS_F26(mode64);
87 ru->regs[ru->size++] = hregMIPS_F28(mode64);
88 ru->regs[ru->size++] = hregMIPS_F30(mode64);
89 ru->allocable_end[(mode64) ? HRcFlt64 : HRcFlt32] = ru->size - 1;
91 ru->allocable_start[HRcVec128] = ru->size;
92 ru->regs[ru->size++] = hregMIPS_W16(mode64);
93 ru->regs[ru->size++] = hregMIPS_W17(mode64);
94 ru->regs[ru->size++] = hregMIPS_W18(mode64);
95 ru->regs[ru->size++] = hregMIPS_W19(mode64);
96 ru->regs[ru->size++] = hregMIPS_W20(mode64);
97 ru->regs[ru->size++] = hregMIPS_W21(mode64);
98 ru->regs[ru->size++] = hregMIPS_W22(mode64);
99 ru->regs[ru->size++] = hregMIPS_W23(mode64);
100 ru->regs[ru->size++] = hregMIPS_W24(mode64);
101 ru->regs[ru->size++] = hregMIPS_W25(mode64);
102 ru->regs[ru->size++] = hregMIPS_W26(mode64);
103 ru->regs[ru->size++] = hregMIPS_W27(mode64);
104 ru->regs[ru->size++] = hregMIPS_W28(mode64);
105 ru->regs[ru->size++] = hregMIPS_W29(mode64);
106 ru->regs[ru->size++] = hregMIPS_W30(mode64);
107 ru->regs[ru->size++] = hregMIPS_W31(mode64);
108 ru->allocable_end[HRcVec128] = ru->size - 1;
110 if (!mode64) {
111 /* Fake double floating point */
112 ru->allocable_start[HRcFlt64] = ru->size;
113 ru->regs[ru->size++] = hregMIPS_D0(mode64);
114 ru->regs[ru->size++] = hregMIPS_D1(mode64);
115 ru->regs[ru->size++] = hregMIPS_D2(mode64);
116 ru->regs[ru->size++] = hregMIPS_D3(mode64);
117 ru->regs[ru->size++] = hregMIPS_D4(mode64);
118 ru->regs[ru->size++] = hregMIPS_D5(mode64);
119 ru->regs[ru->size++] = hregMIPS_D6(mode64);
120 ru->regs[ru->size++] = hregMIPS_D7(mode64);
121 ru->allocable_end[HRcFlt64] = ru->size - 1;
124 ru->allocable = ru->size;
125 /* And other regs, not available to the allocator. */
127 ru->regs[ru->size++] = hregMIPS_HI(mode64);
128 ru->regs[ru->size++] = hregMIPS_LO(mode64);
129 ru->regs[ru->size++] = hregMIPS_GPR0(mode64);
130 ru->regs[ru->size++] = hregMIPS_GPR1(mode64);
131 ru->regs[ru->size++] = hregMIPS_GPR2(mode64);
132 ru->regs[ru->size++] = hregMIPS_GPR3(mode64);
133 ru->regs[ru->size++] = hregMIPS_GPR4(mode64);
134 ru->regs[ru->size++] = hregMIPS_GPR5(mode64);
135 ru->regs[ru->size++] = hregMIPS_GPR6(mode64);
136 ru->regs[ru->size++] = hregMIPS_GPR7(mode64);
137 ru->regs[ru->size++] = hregMIPS_GPR8(mode64);
138 ru->regs[ru->size++] = hregMIPS_GPR9(mode64);
139 ru->regs[ru->size++] = hregMIPS_GPR10(mode64);
140 ru->regs[ru->size++] = hregMIPS_GPR11(mode64);
141 ru->regs[ru->size++] = hregMIPS_GPR23(mode64);
142 ru->regs[ru->size++] = hregMIPS_GPR25(mode64);
143 ru->regs[ru->size++] = hregMIPS_GPR29(mode64);
144 ru->regs[ru->size++] = hregMIPS_GPR31(mode64);
146 rRegUniverse_MIPS_initted = howNeeded;
148 RRegUniverse__check_is_sane(ru);
149 return ru;
153 UInt ppHRegMIPS(HReg reg, Bool mode64)
155 Int r;
156 static const HChar *ireg32_names[35]
157 = { "$0", "$1", "$2", "$3", "$4", "$5", "$6", "$7",
158 "$8", "$9", "$10", "$11", "$12", "$13", "$14", "$15",
159 "$16", "$17", "$18", "$19", "$20", "$21", "$22", "$23",
160 "$24", "$25", "$26", "$27", "$28", "$29", "$30", "$31",
161 "%32", "%33", "%34",
164 static const HChar *freg32_names[32]
165 = { "$f0", "$f1", "$f2", "$f3", "$f4", "$f5", "$f6", "$f7",
166 "$f8", "$f9", "$f10", "$f11", "$f12", "$f13", "$f14", "$f15",
167 "$f16", "$f17", "$f18", "$f19", "$f20", "$f21", "$f22", "$f23",
168 "$f24", "$f25", "$f26", "$f27", "$f28", "$f29", "f30", "$f31"
171 static const HChar *freg64_names[32]
172 = { "$d0", "$d1", "$d2", "$d3", "$d4", "$d5", "$d6", "$d7",
173 "$d8", "$d9", "$d10", "$d11", "$d12", "$d13", "$d14", "$d15",
176 static const HChar *fvec128_names[32]
177 = { "$w0", "$w1", "$w2", "$w3", "$w4", "$w5", "$w6", "$w7",
178 "$w8", "$w9", "$w10", "$w11", "$w12", "$w13", "$w14", "$w15",
179 "$w16", "$w17", "$w18", "$w19", "$w20", "$w21", "$w22", "$w23",
180 "$w24", "$w24", "$w26", "$w27", "$w28", "$w29", "$w30", "$w31"
183 /* Be generic for all virtual regs. */
184 if (hregIsVirtual(reg)) {
185 return ppHReg(reg);
188 /* But specific for real regs. */
189 vassert(hregClass(reg) == HRcInt32 || hregClass(reg) == HRcInt64 ||
190 hregClass(reg) == HRcFlt32 || hregClass(reg) == HRcFlt64 ||
191 hregClass(reg) == HRcVec128);
193 /* But specific for real regs. */
194 switch (hregClass(reg)) {
195 case HRcInt32:
196 r = hregEncoding(reg);
197 vassert(r >= 0 && r < 32);
198 return vex_printf("%s", ireg32_names[r]);
199 case HRcInt64:
200 r = hregEncoding (reg);
201 vassert (r >= 0 && r < 32);
202 return vex_printf ("%s", ireg32_names[r]);
203 case HRcFlt32:
204 r = hregEncoding(reg);
205 vassert(r >= 0 && r < 32);
206 return vex_printf("%s", freg32_names[r]);
207 case HRcFlt64:
208 r = hregEncoding(reg);
209 vassert(r >= 0 && r < 32);
210 return vex_printf("%s", freg64_names[r]);
211 case HRcVec128:
212 r = hregEncoding(reg);
213 vassert(r >= 0 && r < 32);
214 return vex_printf("%s", fvec128_names[r]);
215 default:
216 vpanic("ppHRegMIPS");
217 break;
222 /*----------------- Condition Codes ----------------------*/
224 const HChar *showMIPSCondCode(MIPSCondCode cond)
226 const HChar* ret;
227 switch (cond) {
228 case MIPScc_EQ:
229 ret = "EQ"; /* equal */
230 break;
231 case MIPScc_NE:
232 ret = "NEQ"; /* not equal */
233 break;
234 case MIPScc_HS:
235 ret = "GE"; /* >=u (Greater Than or Equal) */
236 break;
237 case MIPScc_LO:
238 ret = "LT"; /* <u (lower) */
239 break;
240 case MIPScc_MI:
241 ret = "MI"; /* minus (negative) */
242 break;
243 case MIPScc_PL:
244 ret = "PL"; /* plus (zero or +ve) */
245 break;
246 case MIPScc_VS:
247 ret = "VS"; /* overflow */
248 break;
249 case MIPScc_VC:
250 ret = "VC"; /* no overflow */
251 break;
252 case MIPScc_HI:
253 ret = "HI"; /* >u (higher) */
254 break;
255 case MIPScc_LS:
256 ret = "LS"; /* <=u (lower or same) */
257 break;
258 case MIPScc_GE:
259 ret = "GE"; /* >=s (signed greater or equal) */
260 break;
261 case MIPScc_LT:
262 ret = "LT"; /* <s (signed less than) */
263 break;
264 case MIPScc_GT:
265 ret = "GT"; /* >s (signed greater) */
266 break;
267 case MIPScc_LE:
268 ret = "LE"; /* <=s (signed less or equal) */
269 break;
270 case MIPScc_AL:
271 ret = "AL"; /* always (unconditional) */
272 break;
273 case MIPScc_NV:
274 ret = "NV"; /* never (unconditional): */
275 break;
276 default:
277 vpanic("showMIPSCondCode");
278 break;
280 return ret;
283 const HChar *showMIPSFpOp(MIPSFpOp op)
285 const HChar *ret;
286 switch (op) {
287 case Mfp_ADDD:
288 ret = "add.d";
289 break;
290 case Mfp_SUBD:
291 ret = "sub.d";
292 break;
293 case Mfp_MULD:
294 ret = "mul.d";
295 break;
296 case Mfp_DIVD:
297 ret = "div.d";
298 break;
299 case Mfp_MADDD:
300 ret = "madd.d";
301 break;
302 case Mfp_MSUBD:
303 ret = "msub.d";
304 break;
305 case Mfp_MADDS:
306 ret = "madd.s";
307 break;
308 case Mfp_MSUBS:
309 ret = "msub.s";
310 break;
311 case Mfp_ADDS:
312 ret = "add.s";
313 break;
314 case Mfp_SUBS:
315 ret = "sub.s";
316 break;
317 case Mfp_MULS:
318 ret = "mul.s";
319 break;
320 case Mfp_DIVS:
321 ret = "div.s";
322 break;
323 case Mfp_SQRTS:
324 ret = "sqrt.s";
325 break;
326 case Mfp_SQRTD:
327 ret = "sqrt.d";
328 break;
329 case Mfp_ABSS:
330 ret = "abs.s";
331 break;
332 case Mfp_ABSD:
333 ret = "abs.d";
334 break;
335 case Mfp_NEGS:
336 ret = "neg.s";
337 break;
338 case Mfp_NEGD:
339 ret = "neg.d";
340 break;
341 case Mfp_MOVS:
342 ret = "mov.s";
343 break;
344 case Mfp_MOVD:
345 ret = "mov.d";
346 break;
347 case Mfp_ROUNDWS:
348 ret = "round.w.s";
349 break;
350 case Mfp_ROUNDWD:
351 ret = "round.w.d";
352 break;
353 case Mfp_ROUNDLD:
354 ret = "round.l.d";
355 break;
356 case Mfp_FLOORWS:
357 ret = "floor.w.s";
358 break;
359 case Mfp_FLOORWD:
360 ret = "floor.w.d";
361 break;
362 case Mfp_CVTDW:
363 ret = "cvt.d.w";
364 break;
365 case Mfp_CVTDL:
366 ret = "cvt.d.l";
367 break;
368 case Mfp_CVTDS:
369 ret = "cvt.d.s";
370 break;
371 case Mfp_CVTSD:
372 ret = "cvt.s.d";
373 break;
374 case Mfp_CVTSW:
375 ret = "cvt.s.w";
376 break;
377 case Mfp_CVTWS:
378 ret = "cvt.w.s";
379 break;
380 case Mfp_RINTS:
381 ret = "rint.s";
382 break;
383 case Mfp_RINTD:
384 ret = "rint.d";
385 break;
386 case Mfp_CVTWD:
387 ret = "cvt.w.d";
388 break;
389 case Mfp_CVTLD:
390 ret = "cvt.l.d";
391 break;
392 case Mfp_CVTLS:
393 ret = "cvt.l.s";
394 break;
395 case Mfp_TRUWD:
396 ret = "trunc.w.d";
397 break;
398 case Mfp_TRUWS:
399 ret = "trunc.w.s";
400 break;
401 case Mfp_TRULD:
402 ret = "trunc.l.d";
403 break;
404 case Mfp_TRULS:
405 ret = "trunc.l.s";
406 break;
407 case Mfp_CEILWS:
408 ret = "ceil.w.s";
409 break;
410 case Mfp_CEILWD:
411 ret = "ceil.w.d";
412 break;
413 case Mfp_CEILLS:
414 ret = "ceil.l.s";
415 break;
416 case Mfp_CEILLD:
417 ret = "ceil.l.d";
418 break;
419 #if (__mips_isa_rev >= 6)
420 case Mfp_CMP_UN:
421 ret = "cmp.un.d";
422 break;
423 case Mfp_CMP_EQ:
424 ret = "cmp.eq.d";
425 break;
426 case Mfp_CMP_LT:
427 ret = "cmp.lt.d";
428 break;
429 case Mfp_CMP_NGT:
430 ret = "cmp.ngt.d";
431 break;
432 case Mfp_CMP_UN_S:
433 ret = "cmp.un.s";
434 break;
435 case Mfp_CMP_EQ_S:
436 ret = "cmp.eq.s";
437 break;
438 case Mfp_CMP_LT_S:
439 ret = "cmp.lt.s";
440 break;
441 case Mfp_CMP_NGT_S:
442 ret = "cmp.ngt.s";
443 break;
444 case Mfp_MAXS:
445 ret = "max.s";
446 break;
447 case Mfp_MAXD:
448 ret = "max.d";
449 break;
450 case Mfp_MINS:
451 ret = "min.s";
452 break;
453 case Mfp_MIND:
454 ret = "min.d";
455 break;
456 #else
457 case Mfp_CMP_UN:
458 ret = "c.un.d";
459 break;
460 case Mfp_CMP_EQ:
461 ret = "c.eq.d";
462 break;
463 case Mfp_CMP_LT:
464 ret = "c.lt.d";
465 break;
466 case Mfp_CMP_NGT:
467 ret = "c.ngt.d";
468 break;
469 #endif
470 default:
471 vex_printf("Unknown op: %d", (Int)op);
472 vpanic("showMIPSFpOp");
473 break;
475 return ret;
478 /* Show move from/to fpr to/from gpr */
479 const HChar* showMIPSFpGpMoveOp ( MIPSFpGpMoveOp op )
481 const HChar *ret;
482 switch (op) {
483 case MFpGpMove_mfc1:
484 ret = "mfc1";
485 break;
486 case MFpGpMove_dmfc1:
487 ret = "dmfc1";
488 break;
489 case MFpGpMove_mtc1:
490 ret = "mtc1";
491 break;
492 case MFpGpMove_dmtc1:
493 ret = "dmtc1";
494 break;
495 default:
496 vpanic("showMIPSFpGpMoveOp");
497 break;
499 return ret;
502 /* Show floating point move conditional */
503 const HChar* showMIPSMoveCondOp ( MIPSMoveCondOp op )
505 const HChar *ret;
506 switch (op) {
507 case MFpMoveCond_movns:
508 ret = "movn.s";
509 break;
510 case MFpMoveCond_movnd:
511 ret = "movn.d";
512 break;
513 case MMoveCond_movn:
514 ret = "movn";
515 break;
516 case MSeleqz:
517 ret = "seleqz";
518 break;
519 case MSelnez:
520 ret = "selnez";
521 break;
522 case MFpSels:
523 ret = "sel.s";
524 break;
525 case MFpSeld:
526 ret = "sel.d";
527 break;
528 default:
529 vpanic("showMIPSFpMoveCondOp");
530 break;
532 return ret;
535 /* --------- MIPSAMode: memory address expressions. --------- */
537 MIPSAMode *MIPSAMode_IR(Int idx, HReg base)
539 MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
540 am->tag = Mam_IR;
541 am->Mam.IR.base = base;
542 am->Mam.IR.index = idx;
544 return am;
547 MIPSAMode *MIPSAMode_RR(HReg idx, HReg base)
549 MIPSAMode *am = LibVEX_Alloc_inline(sizeof(MIPSAMode));
550 am->tag = Mam_RR;
551 am->Mam.RR.base = base;
552 am->Mam.RR.index = idx;
554 return am;
557 MIPSAMode *dopyMIPSAMode(MIPSAMode * am)
559 MIPSAMode* ret;
560 switch (am->tag) {
561 case Mam_IR:
562 ret = MIPSAMode_IR(am->Mam.IR.index, am->Mam.IR.base);
563 break;
564 case Mam_RR:
565 ret = MIPSAMode_RR(am->Mam.RR.index, am->Mam.RR.base);
566 break;
567 default:
568 vpanic("dopyMIPSAMode");
569 break;
571 return ret;
574 MIPSAMode *nextMIPSAModeFloat(MIPSAMode * am)
576 MIPSAMode* ret;
577 switch (am->tag) {
578 case Mam_IR:
579 ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
580 break;
581 case Mam_RR:
582 /* We can't do anything with the RR case, so if it appears
583 we simply have to give up. */
584 /* fallthrough */
585 default:
586 vpanic("nextMIPSAModeFloat");
587 break;
589 return ret;
592 MIPSAMode *nextMIPSAModeInt(MIPSAMode * am)
594 MIPSAMode* ret;
595 switch (am->tag) {
596 case Mam_IR:
597 ret = MIPSAMode_IR(am->Mam.IR.index + 4, am->Mam.IR.base);
598 break;
599 case Mam_RR:
600 /* We can't do anything with the RR case, so if it appears
601 we simply have to give up. */
602 /* fallthrough */
603 default:
604 vpanic("nextMIPSAModeInt");
605 break;
607 return ret;
610 void ppMIPSAMode(MIPSAMode * am, Bool mode64)
612 switch (am->tag) {
613 case Mam_IR:
614 if (am->Mam.IR.index == 0)
615 vex_printf("0(");
616 else
617 vex_printf("%d(", (Int) am->Mam.IR.index);
618 ppHRegMIPS(am->Mam.IR.base, mode64);
619 vex_printf(")");
620 return;
621 case Mam_RR:
622 ppHRegMIPS(am->Mam.RR.base, mode64);
623 vex_printf(", ");
624 ppHRegMIPS(am->Mam.RR.index, mode64);
625 return;
626 default:
627 vpanic("ppMIPSAMode");
628 break;
632 static void addRegUsage_MIPSAMode(HRegUsage * u, MIPSAMode * am)
634 switch (am->tag) {
635 case Mam_IR:
636 addHRegUse(u, HRmRead, am->Mam.IR.base);
637 return;
638 case Mam_RR:
639 addHRegUse(u, HRmRead, am->Mam.RR.base);
640 addHRegUse(u, HRmRead, am->Mam.RR.index);
641 return;
642 default:
643 vpanic("addRegUsage_MIPSAMode");
644 break;
648 static void mapRegs_MIPSAMode(HRegRemap * m, MIPSAMode * am)
650 switch (am->tag) {
651 case Mam_IR:
652 am->Mam.IR.base = lookupHRegRemap(m, am->Mam.IR.base);
653 return;
654 case Mam_RR:
655 am->Mam.RR.base = lookupHRegRemap(m, am->Mam.RR.base);
656 am->Mam.RR.index = lookupHRegRemap(m, am->Mam.RR.index);
657 return;
658 default:
659 vpanic("mapRegs_MIPSAMode");
660 break;
664 /* --------- Operand, which can be a reg or a u16/s16. --------- */
666 MIPSRH *MIPSRH_Imm(Bool syned, UShort imm16)
668 MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
669 op->tag = Mrh_Imm;
670 op->Mrh.Imm.syned = syned;
671 op->Mrh.Imm.imm16 = imm16;
672 /* If this is a signed value, ensure it's not -32768, so that we
673 are guaranteed always to be able to negate if needed. */
674 if (syned)
675 vassert(imm16 != 0x8000);
676 vassert(syned == True || syned == False);
677 return op;
680 MIPSRH *MIPSRH_Reg(HReg reg)
682 MIPSRH *op = LibVEX_Alloc_inline(sizeof(MIPSRH));
683 op->tag = Mrh_Reg;
684 op->Mrh.Reg.reg = reg;
685 return op;
688 void ppMIPSRH(MIPSRH * op, Bool mode64)
690 MIPSRHTag tag = op->tag;
691 switch (tag) {
692 case Mrh_Imm:
693 if (op->Mrh.Imm.syned)
694 vex_printf("%d", (Int) (Short) op->Mrh.Imm.imm16);
695 else
696 vex_printf("%u", (UInt) (UShort) op->Mrh.Imm.imm16);
697 return;
698 case Mrh_Reg:
699 ppHRegMIPS(op->Mrh.Reg.reg, mode64);
700 return;
701 default:
702 vpanic("ppMIPSRH");
703 break;
707 /* An MIPSRH can only be used in a "read" context (what would it mean
708 to write or modify a literal?) and so we enumerate its registers
709 accordingly. */
710 static void addRegUsage_MIPSRH(HRegUsage * u, MIPSRH * op)
712 switch (op->tag) {
713 case Mrh_Imm:
714 return;
715 case Mrh_Reg:
716 addHRegUse(u, HRmRead, op->Mrh.Reg.reg);
717 return;
718 default:
719 vpanic("addRegUsage_MIPSRH");
720 break;
724 static void mapRegs_MIPSRH(HRegRemap * m, MIPSRH * op)
726 switch (op->tag) {
727 case Mrh_Imm:
728 return;
729 case Mrh_Reg:
730 op->Mrh.Reg.reg = lookupHRegRemap(m, op->Mrh.Reg.reg);
731 return;
732 default:
733 vpanic("mapRegs_MIPSRH");
734 break;
738 /* --------- Instructions. --------- */
740 const HChar *showMIPSUnaryOp(MIPSUnaryOp op)
742 const HChar* ret;
743 switch (op) {
744 case Mun_CLO:
745 ret = "clo";
746 break;
747 case Mun_CLZ:
748 ret = "clz";
749 break;
750 case Mun_NOP:
751 ret = "nop";
752 break;
753 case Mun_DCLO:
754 ret = "dclo";
755 break;
756 case Mun_DCLZ:
757 ret = "dclz";
758 break;
759 default:
760 vpanic("showMIPSUnaryOp");
761 break;
763 return ret;
766 const HChar *showMIPSAluOp(MIPSAluOp op, Bool immR)
768 const HChar* ret;
769 switch (op) {
770 case Malu_ADD:
771 ret = immR ? "addiu" : "addu";
772 break;
773 case Malu_SUB:
774 ret = "subu";
775 break;
776 case Malu_AND:
777 ret = immR ? "andi" : "and";
778 break;
779 case Malu_OR:
780 ret = immR ? "ori" : "or";
781 break;
782 case Malu_NOR:
783 vassert(immR == False); /*there's no nor with an immediate operand!? */
784 ret = "nor";
785 break;
786 case Malu_XOR:
787 ret = immR ? "xori" : "xor";
788 break;
789 case Malu_DADD:
790 ret = immR ? "daddiu" : "dadd";
791 break;
792 case Malu_DSUB:
793 ret = immR ? "dsubi" : "dsub";
794 break;
795 case Malu_SLT:
796 ret = immR ? "slti" : "slt";
797 break;
798 default:
799 vpanic("showMIPSAluOp");
800 break;
802 return ret;
805 const HChar *showMIPSShftOp(MIPSShftOp op, Bool immR, Bool sz32)
807 const HChar *ret;
808 switch (op) {
809 case Mshft_SRA:
810 ret = immR ? (sz32 ? "sra" : "dsra") : (sz32 ? "srav" : "dsrav");
811 break;
812 case Mshft_SLL:
813 ret = immR ? (sz32 ? "sll" : "dsll") : (sz32 ? "sllv" : "dsllv");
814 break;
815 case Mshft_SRL:
816 ret = immR ? (sz32 ? "srl" : "dsrl") : (sz32 ? "srlv" : "dsrlv");
817 break;
818 default:
819 vpanic("showMIPSShftOp");
820 break;
822 return ret;
825 const HChar *showMIPSMaccOp(MIPSMaccOp op, Bool variable)
827 const HChar *ret;
828 switch (op) {
829 case Macc_ADD:
830 ret = variable ? "madd" : "maddu";
831 break;
832 case Macc_SUB:
833 ret = variable ? "msub" : "msubu";
834 break;
835 default:
836 vpanic("showMIPSAccOp");
837 break;
839 return ret;
842 HChar showMsaDF(MSADF df) {
843 switch (df) {
844 case MSA_B:
845 return 'b';
847 case MSA_H:
848 return 'h';
850 case MSA_W:
851 return 'w';
853 case MSA_D:
854 return 'd';
857 return '?';
860 HChar showMsaDFF(MSADFFlx df, int op) {
861 switch (df) {
862 case MSA_F_DW:
863 if (op == MSA_MUL_Q || op == MSA_MULR_Q || op == MSA_FEXDO) return 'w';
864 else return 'd';
866 case MSA_F_WH:
867 if (op == MSA_MUL_Q || op == MSA_MULR_Q || op == MSA_FEXDO) return 'h';
868 else return 'w';
871 return '?';
874 const HChar *showMsaMI10op(MSAMI10Op op) {
875 const HChar *ret;
877 switch (op) {
878 case MSA_LD:
879 ret = "ld";
880 break;
882 case MSA_ST:
883 ret = "st";
884 break;
886 default:
887 vpanic("showMsaMI10op");
888 break;
891 return ret;
894 const HChar *showMsaElmOp(MSAELMOp op) {
895 const HChar *ret;
897 switch (op) {
898 case MSA_MOVE:
899 ret = "move.v";
900 break;
902 case MSA_INSERT:
903 ret = "insert";
904 break;
906 case MSA_COPY_U:
907 ret = "copy_u";
908 break;
910 case MSA_COPY_S:
911 ret = "copy_s";
912 break;
914 case MSA_SLDI:
915 ret = "sldi";
916 break;
918 case MSA_INSVE:
919 ret = "insve";
920 break;
922 case MSA_CFCMSA:
923 ret = "cfcmsa";
924 break;
926 case MSA_CTCMSA:
927 ret = "ctcmsa";
928 break;
930 default:
931 vpanic("showMsaElmOp");
932 break;
935 return ret;
938 const HChar *showMsa2ROp(MSA2ROp op) {
939 const HChar *ret;
941 switch (op) {
942 case MSA_NLZC:
943 ret = "nlzc";
944 break;
946 case MSA_NLOC:
947 ret = "nloc";
948 break;
950 case MSA_FILL:
951 ret = "fill";
952 break;
954 case MSA_PCNT:
955 ret = "pcnt";
956 break;
958 default:
959 vpanic("showMsa2ROp");
960 break;
963 return ret;
966 const HChar *showRotxOp(MIPSRotxOp op) {
967 const HChar *ret;
968 switch(op) {
969 case Rotx32:
970 ret = "rotx32";
971 break;
972 case Rotx64:
973 ret = "rotx64";
974 break;
975 default:
976 vpanic("showRotxOp");
977 break;
980 return ret;
983 const HChar *showMsa2RFOp(MSA2RFOp op) {
984 const HChar *ret;
986 switch (op) {
987 case MSA_FTRUNC_S:
988 ret = "ftrunc_s";
989 break;
991 case MSA_FTRUNC_U:
992 ret = "ftrunc_u";
993 break;
995 case MSA_FFINT_S:
996 ret = "ffint_s";
997 break;
999 case MSA_FFINT_U:
1000 ret = "ffint_u";
1001 break;
1003 case MSA_FSQRT:
1004 ret = "fsqrt";
1005 break;
1007 case MSA_FRSQRT:
1008 ret = "frsqrt";
1009 break;
1011 case MSA_FRCP:
1012 ret = "frcp";
1013 break;
1015 case MSA_FEXUPR:
1016 ret = "fexupr";
1017 break;
1019 case MSA_FTINT_U:
1020 ret = "ftint_u";
1021 break;
1023 case MSA_FTINT_S:
1024 ret = "ftint_s";
1025 break;
1027 case MSA_FLOG2:
1028 ret = "flog2";
1029 break;
1031 default:
1032 vpanic("showMsa2RFOp");
1033 break;
1036 return ret;
1039 const HChar *showMsa3ROp(MSA3ROp op) {
1040 const HChar *ret;
1042 switch (op) {
1043 case MSA_ADDV:
1044 ret = "addv";
1045 break;
1047 case MSA_ADD_A:
1048 ret = "add_a";
1049 break;
1051 case MSA_SUBV:
1052 ret = "subv";
1053 break;
1055 case MSA_ADDS_S:
1056 ret = "adds_s";
1057 break;
1059 case MSA_ADDS_U:
1060 ret = "adds_u";
1061 break;
1063 case MSA_SUBS_S:
1064 ret = "subs_s";
1065 break;
1067 case MSA_SUBS_U:
1068 ret = "subs_u";
1069 break;
1071 case MSA_MAX_S:
1072 ret = "max_s";
1073 break;
1075 case MSA_MAX_U:
1076 ret = "max_u";
1077 break;
1079 case MSA_MIN_S:
1080 ret = "min_s";
1081 break;
1083 case MSA_MIN_U:
1084 ret = "min_u";
1085 break;
1087 case MSA_SLL:
1088 ret = "sll";
1089 break;
1091 case MSA_SRL:
1092 ret = "srl";
1093 break;
1095 case MSA_SRA:
1096 ret = "sra";
1097 break;
1099 case MSA_CEQ:
1100 ret = "ceq";
1101 break;
1103 case MSA_CLT_S:
1104 ret = "clt_s";
1105 break;
1107 case MSA_CLT_U:
1108 ret = "clt_u";
1109 break;
1111 case MSA_ILVL:
1112 ret = "ilvl";
1113 break;
1115 case MSA_ILVR:
1116 ret = "ilvr";
1117 break;
1119 case MSA_ILVEV:
1120 ret = "ilvev";
1121 break;
1123 case MSA_ILVOD:
1124 ret = "ilvod";
1125 break;
1127 case MSA_PCKEV:
1128 ret = "ilvev";
1129 break;
1131 case MSA_PCKOD:
1132 ret = "ilvod";
1133 break;
1135 case MSA_AVER_S:
1136 ret = "aver_s";
1137 break;
1139 case MSA_AVER_U:
1140 ret = "aver_u";
1141 break;
1143 case MSA_SLD:
1144 ret = "sld";
1145 break;
1147 case MSA_SPLAT:
1148 ret = "splat";
1149 break;
1151 case MSA_MULV:
1152 ret = "mulv";
1153 break;
1155 case MSA_DIVS:
1156 ret = "divs";
1157 break;
1159 case MSA_DIVU:
1160 ret = "divu";
1161 break;
1163 case MSA_VSHF:
1164 ret = "vshf";
1165 break;
1167 default:
1168 vpanic("showMsa3ROp");
1169 break;
1172 return ret;
1175 const HChar *showMsaVecOp(MSAVECOp op) {
1176 const HChar *ret;
1178 switch (op) {
1179 case MSA_ANDV:
1180 ret = "and.v";
1181 break;
1183 case MSA_ORV:
1184 ret = "or.v";
1185 break;
1187 case MSA_XORV:
1188 ret = "xor.v";
1189 break;
1191 case MSA_NORV:
1192 ret = "nor.v";
1193 break;
1195 default:
1196 vpanic("showMsaVecOp");
1197 break;
1200 return ret;
1203 const HChar *showMsaBitOp(MSABITOp op) {
1204 const HChar *ret;
1206 switch (op) {
1207 case MSA_SLLI:
1208 ret = "slli";
1209 break;
1211 case MSA_SRAI:
1212 ret = "srai";
1213 break;
1215 case MSA_SRLI:
1216 ret = "srli";
1217 break;
1219 case MSA_SAT_S:
1220 ret = "sat_s";
1221 break;
1223 case MSA_SRARI:
1224 ret = "srari";
1225 break;
1227 default:
1228 vpanic("showMsaBitOp");
1229 break;
1232 return ret;
1235 const HChar *showMsa3RFOp(MSA3RFOp op) {
1236 const HChar *ret;
1238 switch (op) {
1239 case MSA_FADD:
1240 ret = "fadd";
1241 break;
1243 case MSA_FSUB:
1244 ret = "fsub";
1245 break;
1247 case MSA_FMUL:
1248 ret = "fmul";
1249 break;
1251 case MSA_FDIV:
1252 ret = "fdiv";
1253 break;
1255 case MSA_MUL_Q:
1256 ret = "mul_q";
1257 break;
1259 case MSA_MULR_Q:
1260 ret = "mulr_q";
1261 break;
1263 case MSA_FCEQ:
1264 ret = "fceq";
1265 break;
1267 case MSA_FCLT:
1268 ret = "fclt";
1269 break;
1271 case MSA_FCUN:
1272 ret = "fcun";
1273 break;
1275 case MSA_FEXP2:
1276 ret = "fexp2";
1277 break;
1279 case MSA_FMIN:
1280 ret = "fmin";
1281 break;
1283 case MSA_FMIN_A:
1284 ret = "fmin_a";
1285 break;
1287 case MSA_FMAX:
1288 ret = "fmax";
1289 break;
1291 case MSA_FMADD:
1292 ret = "fmadd";
1293 break;
1295 case MSA_FMSUB:
1296 ret = "fmsub";
1297 break;
1299 case MSA_FEXDO:
1300 ret = "fexdo";
1301 break;
1303 case MSA_FTQ:
1304 ret = "ftq";
1305 break;
1307 case MSA_FCLE:
1308 ret = "fcle";
1309 break;
1311 default:
1312 vpanic("showMsa3RFOp");
1313 break;
1316 return ret;
1319 MIPSInstr *MIPSInstr_LI(HReg dst, ULong imm)
1321 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1322 i->tag = Min_LI;
1323 i->Min.LI.dst = dst;
1324 i->Min.LI.imm = imm;
1325 return i;
1328 MIPSInstr *MIPSInstr_Alu(MIPSAluOp op, HReg dst, HReg srcL, MIPSRH * srcR)
1330 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1331 i->tag = Min_Alu;
1332 i->Min.Alu.op = op;
1333 i->Min.Alu.dst = dst;
1334 i->Min.Alu.srcL = srcL;
1335 i->Min.Alu.srcR = srcR;
1336 return i;
1339 MIPSInstr *MIPSInstr_Shft(MIPSShftOp op, Bool sz32, HReg dst, HReg srcL,
1340 MIPSRH * srcR)
1342 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1343 i->tag = Min_Shft;
1344 i->Min.Shft.op = op;
1345 i->Min.Shft.sz32 = sz32;
1346 i->Min.Shft.dst = dst;
1347 i->Min.Shft.srcL = srcL;
1348 i->Min.Shft.srcR = srcR;
1349 return i;
1352 MIPSInstr *MIPSInstr_Unary(MIPSUnaryOp op, HReg dst, HReg src)
1354 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1355 i->tag = Min_Unary;
1356 i->Min.Unary.op = op;
1357 i->Min.Unary.dst = dst;
1358 i->Min.Unary.src = src;
1359 return i;
1362 MIPSInstr *MIPSInstr_Cmp(Bool syned, Bool sz32, HReg dst, HReg srcL, HReg srcR,
1363 MIPSCondCode cond)
1365 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1366 i->tag = Min_Cmp;
1367 i->Min.Cmp.syned = syned;
1368 i->Min.Cmp.sz32 = sz32;
1369 i->Min.Cmp.dst = dst;
1370 i->Min.Cmp.srcL = srcL;
1371 i->Min.Cmp.srcR = srcR;
1372 i->Min.Cmp.cond = cond;
1373 return i;
1376 /* mul */
1377 MIPSInstr *MIPSInstr_Mul(HReg dst, HReg srcL, HReg srcR)
1379 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1380 i->tag = Min_Mul;
1381 i->Min.Mul.dst = dst;
1382 i->Min.Mul.srcL = srcL;
1383 i->Min.Mul.srcR = srcR;
1384 return i;
1387 /* mult, multu / dmult, dmultu */
1388 MIPSInstr *MIPSInstr_Mult(Bool syned, HReg srcL, HReg srcR)
1390 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1391 i->tag = Min_Mult;
1392 i->Min.Mult.syned = syned;
1393 i->Min.Mult.srcL = srcL;
1394 i->Min.Mult.srcR = srcR;
1395 return i;
1398 /* ext / dext, dextm, dextu */
1399 MIPSInstr *MIPSInstr_Ext(HReg dst, HReg src, UInt pos, UInt size)
1401 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1402 i->tag = Min_Ext;
1403 i->Min.Ext.dst = dst;
1404 i->Min.Ext.src = src;
1405 i->Min.Ext.pos = pos;
1406 i->Min.Ext.size = size;
1407 return i;
1410 MIPSInstr *MIPSInstr_Mulr6(Bool syned, Bool sz32, Bool low, HReg dst,
1411 HReg srcL, HReg srcR)
1413 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1414 i->tag = Min_Mulr6;
1415 i->Min.Mulr6.syned = syned;
1416 i->Min.Mulr6.sz32 = sz32; /* True = 32 bits */
1417 i->Min.Mulr6.low = low;
1418 i->Min.Mulr6.dst = dst;
1419 i->Min.Mulr6.srcL = srcL;
1420 i->Min.Mulr6.srcR = srcR;
1421 return i;
1424 /* msub */
1425 MIPSInstr *MIPSInstr_Msub(Bool syned, HReg srcL, HReg srcR)
1427 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1428 i->tag = Min_Macc;
1430 i->Min.Macc.op = Macc_SUB;
1431 i->Min.Macc.syned = syned;
1432 i->Min.Macc.srcL = srcL;
1433 i->Min.Macc.srcR = srcR;
1434 return i;
1437 /* madd */
1438 MIPSInstr *MIPSInstr_Madd(Bool syned, HReg srcL, HReg srcR)
1440 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1441 i->tag = Min_Macc;
1443 i->Min.Macc.op = Macc_ADD;
1444 i->Min.Macc.syned = syned;
1445 i->Min.Macc.srcL = srcL;
1446 i->Min.Macc.srcR = srcR;
1447 return i;
1450 /* div */
1451 MIPSInstr *MIPSInstr_Div(Bool syned, Bool sz32, HReg srcL, HReg srcR)
1453 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1454 i->tag = Min_Div;
1455 i->Min.Div.syned = syned;
1456 i->Min.Div.sz32 = sz32; /* True = 32 bits */
1457 i->Min.Div.srcL = srcL;
1458 i->Min.Div.srcR = srcR;
1459 return i;
1462 MIPSInstr *MIPSInstr_Divr6(Bool syned, Bool sz32, Bool mod, HReg dst,
1463 HReg srcL, HReg srcR)
1465 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1466 i->tag = Min_Divr6;
1467 i->Min.Divr6.syned = syned;
1468 i->Min.Divr6.sz32 = sz32; /* True = 32 bits */
1469 i->Min.Divr6.mod = mod;
1470 i->Min.Divr6.dst = dst;
1471 i->Min.Divr6.srcL = srcL;
1472 i->Min.Divr6.srcR = srcR;
1473 return i;
1476 MIPSInstr *MIPSInstr_Call ( MIPSCondCode cond, Addr64 target, UInt argiregs,
1477 HReg src, RetLoc rloc )
1479 UInt mask;
1480 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1481 i->tag = Min_Call;
1482 i->Min.Call.cond = cond;
1483 i->Min.Call.target = target;
1484 i->Min.Call.argiregs = argiregs;
1485 i->Min.Call.src = src;
1486 i->Min.Call.rloc = rloc;
1487 /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
1488 mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
1489 | (1 << 10) | (1 << 11);
1490 vassert(0 == (argiregs & ~mask));
1491 vassert(is_sane_RetLoc(rloc));
1492 return i;
1495 MIPSInstr *MIPSInstr_CallAlways ( MIPSCondCode cond, Addr64 target,
1496 UInt argiregs, RetLoc rloc )
1498 UInt mask;
1499 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1500 i->tag = Min_Call;
1501 i->Min.Call.cond = cond;
1502 i->Min.Call.target = target;
1503 i->Min.Call.argiregs = argiregs;
1504 i->Min.Call.rloc = rloc;
1505 /* Only $4 .. $7/$11 inclusive may be used as arg regs. */
1506 mask = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7) | (1 << 8) | (1 << 9)
1507 | (1 << 10) | (1 << 11);
1508 vassert(0 == (argiregs & ~mask));
1509 vassert(is_sane_RetLoc(rloc));
1510 return i;
1513 MIPSInstr *MIPSInstr_XDirect ( Addr64 dstGA, MIPSAMode* amPC,
1514 MIPSCondCode cond, Bool toFastEP ) {
1515 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1516 i->tag = Min_XDirect;
1517 i->Min.XDirect.dstGA = dstGA;
1518 i->Min.XDirect.amPC = amPC;
1519 i->Min.XDirect.cond = cond;
1520 i->Min.XDirect.toFastEP = toFastEP;
1521 return i;
1524 MIPSInstr *MIPSInstr_XIndir ( HReg dstGA, MIPSAMode* amPC,
1525 MIPSCondCode cond ) {
1526 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1527 i->tag = Min_XIndir;
1528 i->Min.XIndir.dstGA = dstGA;
1529 i->Min.XIndir.amPC = amPC;
1530 i->Min.XIndir.cond = cond;
1531 return i;
1534 MIPSInstr *MIPSInstr_XAssisted ( HReg dstGA, MIPSAMode* amPC,
1535 MIPSCondCode cond, IRJumpKind jk ) {
1536 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1537 i->tag = Min_XAssisted;
1538 i->Min.XAssisted.dstGA = dstGA;
1539 i->Min.XAssisted.amPC = amPC;
1540 i->Min.XAssisted.cond = cond;
1541 i->Min.XAssisted.jk = jk;
1542 return i;
1545 MIPSInstr *MIPSInstr_Load(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
1547 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1548 i->tag = Min_Load;
1549 i->Min.Load.sz = sz;
1550 i->Min.Load.src = src;
1551 i->Min.Load.dst = dst;
1552 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
1554 if (sz == 8)
1555 vassert(mode64);
1556 return i;
1559 MIPSInstr *MIPSInstr_Store(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
1561 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1562 i->tag = Min_Store;
1563 i->Min.Store.sz = sz;
1564 i->Min.Store.src = src;
1565 i->Min.Store.dst = dst;
1566 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
1568 if (sz == 8)
1569 vassert(mode64);
1570 return i;
1573 MIPSInstr *MIPSInstr_LoadL(UChar sz, HReg dst, MIPSAMode * src, Bool mode64)
1575 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1576 i->tag = Min_LoadL;
1577 i->Min.LoadL.sz = sz;
1578 i->Min.LoadL.src = src;
1579 i->Min.LoadL.dst = dst;
1580 vassert(sz == 4 || sz == 8);
1582 if (sz == 8)
1583 vassert(mode64);
1584 return i;
1587 MIPSInstr *MIPSInstr_Cas(UChar sz, HReg old, HReg addr,
1588 HReg expd, HReg data, Bool mode64)
1590 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1591 i->tag = Min_Cas;
1592 i->Min.Cas.sz = sz;
1593 i->Min.Cas.old = old;
1594 i->Min.Cas.addr = addr;
1595 i->Min.Cas.expd = expd;
1596 i->Min.Cas.data = data;
1597 vassert(sz == 1 || sz == 2 || sz == 4 || sz == 8);
1599 if (sz == 8)
1600 vassert(mode64);
1601 return i;
1604 MIPSInstr *MIPSInstr_StoreC(UChar sz, MIPSAMode * dst, HReg src, Bool mode64)
1606 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1607 i->tag = Min_StoreC;
1608 i->Min.StoreC.sz = sz;
1609 i->Min.StoreC.src = src;
1610 i->Min.StoreC.dst = dst;
1611 vassert(sz == 4 || sz == 8);
1613 if (sz == 8)
1614 vassert(mode64);
1615 return i;
1618 MIPSInstr *MIPSInstr_Mthi(HReg src)
1620 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1621 i->tag = Min_Mthi;
1622 i->Min.MtHL.src = src;
1623 return i;
1626 MIPSInstr *MIPSInstr_Mtlo(HReg src)
1628 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1629 i->tag = Min_Mtlo;
1630 i->Min.MtHL.src = src;
1631 return i;
1634 MIPSInstr *MIPSInstr_Mfhi(HReg dst)
1636 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1637 i->tag = Min_Mfhi;
1638 i->Min.MfHL.dst = dst;
1639 return i;
1642 MIPSInstr *MIPSInstr_Mflo(HReg dst)
1644 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1645 i->tag = Min_Mflo;
1646 i->Min.MfHL.dst = dst;
1647 return i;
1650 /* Read/Write Link Register */
1651 MIPSInstr *MIPSInstr_RdWrLR(Bool wrLR, HReg gpr)
1653 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1654 i->tag = Min_RdWrLR;
1655 i->Min.RdWrLR.wrLR = wrLR;
1656 i->Min.RdWrLR.gpr = gpr;
1657 return i;
1660 MIPSInstr *MIPSInstr_FpLdSt(Bool isLoad, UChar sz, HReg reg, MIPSAMode * addr)
1662 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1663 i->tag = Min_FpLdSt;
1664 i->Min.FpLdSt.isLoad = isLoad;
1665 i->Min.FpLdSt.sz = sz;
1666 i->Min.FpLdSt.reg = reg;
1667 i->Min.FpLdSt.addr = addr;
1668 vassert(sz == 4 || sz == 8);
1669 return i;
1672 MIPSInstr *MIPSInstr_FpUnary(MIPSFpOp op, HReg dst, HReg src)
1674 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1675 i->tag = Min_FpUnary;
1676 i->Min.FpUnary.op = op;
1677 i->Min.FpUnary.dst = dst;
1678 i->Min.FpUnary.src = src;
1679 return i;
1682 MIPSInstr *MIPSInstr_FpBinary(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
1684 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1685 i->tag = Min_FpBinary;
1686 i->Min.FpBinary.op = op;
1687 i->Min.FpBinary.dst = dst;
1688 i->Min.FpBinary.srcL = srcL;
1689 i->Min.FpBinary.srcR = srcR;
1690 return i;
1693 MIPSInstr *MIPSInstr_FpTernary ( MIPSFpOp op, HReg dst, HReg src1, HReg src2,
1694 HReg src3 )
1696 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1697 i->tag = Min_FpTernary;
1698 i->Min.FpTernary.op = op;
1699 i->Min.FpTernary.dst = dst;
1700 i->Min.FpTernary.src1 = src1;
1701 i->Min.FpTernary.src2 = src2;
1702 i->Min.FpTernary.src3 = src3;
1703 return i;
1706 MIPSInstr *MIPSInstr_FpConvert(MIPSFpOp op, HReg dst, HReg src)
1708 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1709 i->tag = Min_FpConvert;
1710 i->Min.FpConvert.op = op;
1711 i->Min.FpConvert.dst = dst;
1712 i->Min.FpConvert.src = src;
1713 return i;
1717 MIPSInstr *MIPSInstr_FpCompare(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
1719 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1720 i->tag = Min_FpCompare;
1721 i->Min.FpCompare.op = op;
1722 i->Min.FpCompare.dst = dst;
1723 i->Min.FpCompare.srcL = srcL;
1724 i->Min.FpCompare.srcR = srcR;
1725 return i;
1728 MIPSInstr *MIPSInstr_FpMinMax(MIPSFpOp op, HReg dst, HReg srcL, HReg srcR)
1730 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1731 i->tag = Min_FpMinMax;
1732 i->Min.FpMinMax.op = op;
1733 i->Min.FpMinMax.dst = dst;
1734 i->Min.FpMinMax.srcL = srcL;
1735 i->Min.FpMinMax.srcR = srcR;
1736 return i;
1740 MIPSInstr *MIPSInstr_MtFCSR(HReg src)
1742 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1743 i->tag = Min_MtFCSR;
1744 i->Min.MtFCSR.src = src;
1745 return i;
1748 MIPSInstr *MIPSInstr_MfFCSR(HReg dst)
1750 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1751 i->tag = Min_MfFCSR;
1752 i->Min.MfFCSR.dst = dst;
1753 return i;
1756 MIPSInstr *MIPSInstr_FpGpMove ( MIPSFpGpMoveOp op, HReg dst, HReg src )
1758 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1759 i->tag = Min_FpGpMove;
1760 i->Min.FpGpMove.op = op;
1761 i->Min.FpGpMove.dst = dst;
1762 i->Min.FpGpMove.src = src;
1763 return i;
1766 MIPSInstr *MIPSInstr_MoveCond ( MIPSMoveCondOp op, HReg dst, HReg src,
1767 HReg cond )
1769 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1770 i->tag = Min_MoveCond;
1771 i->Min.MoveCond.op = op;
1772 i->Min.MoveCond.dst = dst;
1773 i->Min.MoveCond.src = src;
1774 i->Min.MoveCond.cond = cond;
1775 return i;
1778 MIPSInstr *MIPSInstr_EvCheck ( MIPSAMode* amCounter,
1779 MIPSAMode* amFailAddr ) {
1780 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1781 i->tag = Min_EvCheck;
1782 i->Min.EvCheck.amCounter = amCounter;
1783 i->Min.EvCheck.amFailAddr = amFailAddr;
1784 return i;
1787 MIPSInstr* MIPSInstr_ProfInc ( void ) {
1788 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1789 i->tag = Min_ProfInc;
1790 return i;
1794 MIPSInstr* MIPSInstr_MsaMi10(MSAMI10Op op, UInt s10, HReg rs, HReg wd,
1795 MSADF df) {
1796 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1797 i->tag = Msa_MI10;
1798 i->Min.MsaMi10.op = op;
1799 i->Min.MsaMi10.s10 = s10;
1800 i->Min.MsaMi10.rs = rs;
1801 i->Min.MsaMi10.wd = wd;
1802 i->Min.MsaMi10.df = df;
1803 return i;
1806 MIPSInstr* MIPSInstr_MsaElm(MSAELMOp op, HReg ws, HReg wd, UInt dfn ) {
1807 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1808 i->tag = Msa_ELM;
1809 i->Min.MsaElm.op = op;
1810 i->Min.MsaElm.ws = ws;
1811 i->Min.MsaElm.wd = wd;
1812 i->Min.MsaElm.dfn = dfn;
1813 return i;
1816 MIPSInstr* MIPSInstr_Msa2R(MSA2ROp op, MSADF df, HReg ws, HReg wd ) {
1817 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1818 i->tag = Msa_2R;
1819 i->Min.Msa2R.op = op;
1820 i->Min.Msa2R.df = df;
1821 i->Min.Msa2R.ws = ws;
1822 i->Min.Msa2R.wd = wd;
1823 return i;
1826 MIPSInstr* MIPSInstr_Msa3R(MSA3ROp op, MSADF df, HReg wd, HReg ws, HReg wt) {
1827 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1828 i->tag = Msa_3R;
1829 i->Min.Msa3R.op = op;
1830 i->Min.Msa3R.df = df;
1831 i->Min.Msa3R.wd = wd;
1832 i->Min.Msa3R.wt = wt;
1833 i->Min.Msa3R.ws = ws;
1834 return i;
1837 MIPSInstr* MIPSInstr_MsaVec(MSAVECOp op, HReg wd, HReg ws, HReg wt) {
1838 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1839 i->tag = Msa_VEC;
1840 i->Min.MsaVec.op = op;
1841 i->Min.MsaVec.wd = wd;
1842 i->Min.MsaVec.wt = wt;
1843 i->Min.MsaVec.ws = ws;
1844 return i;
1847 MIPSInstr* MIPSInstr_MsaBit(MSABITOp op, MSADF df, UChar ms, HReg ws, HReg wd) {
1848 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1849 i->tag = Msa_BIT;
1850 i->Min.MsaBit.op = op;
1851 i->Min.MsaBit.df = df;
1852 i->Min.MsaBit.ws = ws;
1853 i->Min.MsaBit.wd = wd;
1854 i->Min.MsaBit.ms = ms;
1855 return i;
1858 MIPSInstr* MIPSInstr_Msa3RF(MSA3RFOp op, MSADFFlx df, HReg wd, HReg ws,
1859 HReg wt) {
1860 MIPSInstr* i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1861 i->tag = Msa_3RF;
1862 i->Min.Msa3RF.op = op;
1863 i->Min.Msa3RF.df = df;
1864 i->Min.Msa3RF.wd = wd;
1865 i->Min.Msa3RF.wt = wt;
1866 i->Min.Msa3RF.ws = ws;
1867 return i;
1870 MIPSInstr* MIPSInstr_Msa2RF(MSA2RFOp op, MSADFFlx df, HReg wd, HReg ws) {
1871 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1872 i->tag = Msa_2RF;
1873 i->Min.Msa2RF.op = op;
1874 i->Min.Msa2RF.df = df;
1875 i->Min.Msa2RF.wd = wd;
1876 i->Min.Msa2RF.ws = ws;
1877 return i;
1880 MIPSInstr* MIPSInstr_Bitswap(MIPSRotxOp op, HReg rd, HReg rt, HReg shift, HReg shiftx, HReg stripe) {
1881 MIPSInstr *i = LibVEX_Alloc_inline(sizeof(MIPSInstr));
1882 i->tag = Min_Rotx;
1883 i->Min.Rotx.op = op;
1884 i->Min.Rotx.rd = rd;
1885 i->Min.Rotx.rt = rt;
1886 i->Min.Rotx.shift = shift;
1887 i->Min.Rotx.shiftx = shiftx;
1888 i->Min.Rotx.stripe = stripe;
1889 return i;
1892 /* -------- Pretty Print instructions ------------- */
1893 static void ppLoadImm(HReg dst, ULong imm, Bool mode64)
1895 vex_printf("li ");
1896 ppHRegMIPS(dst, mode64);
1897 vex_printf(",0x%016llx", imm);
1900 static void MSAdfn(UInt dfn, MSADF* df, UInt* n) {
1901 if ((dfn & 0x3e) == MSA_DFN_D) {
1902 *df = MSA_D;
1903 *n = dfn & 1;
1904 return;
1907 if ((dfn & 0x3c) == MSA_DFN_W) {
1908 *df = MSA_W;
1909 *n = dfn & 3;
1910 return;
1913 if ((dfn & 0x38) == MSA_DFN_H) {
1914 *df = MSA_H;
1915 *n = dfn & 7;
1916 return;
1919 *df = MSA_B;
1920 *n = dfn & 3;
1923 void ppMIPSInstr(const MIPSInstr * i, Bool mode64)
1925 switch (i->tag) {
1926 case Min_LI:
1927 ppLoadImm(i->Min.LI.dst, i->Min.LI.imm, mode64);
1928 break;
1929 case Min_Alu: {
1930 HReg r_srcL = i->Min.Alu.srcL;
1931 MIPSRH *rh_srcR = i->Min.Alu.srcR;
1932 /* generic */
1933 vex_printf("%s ", showMIPSAluOp(i->Min.Alu.op,
1934 toBool(rh_srcR->tag == Mrh_Imm)));
1935 ppHRegMIPS(i->Min.Alu.dst, mode64);
1936 vex_printf(",");
1937 ppHRegMIPS(r_srcL, mode64);
1938 vex_printf(",");
1939 ppMIPSRH(rh_srcR, mode64);
1940 return;
1942 case Min_Shft: {
1943 HReg r_srcL = i->Min.Shft.srcL;
1944 MIPSRH *rh_srcR = i->Min.Shft.srcR;
1945 vex_printf("%s ", showMIPSShftOp(i->Min.Shft.op,
1946 toBool(rh_srcR->tag == Mrh_Imm),
1947 i->Min.Shft.sz32));
1948 ppHRegMIPS(i->Min.Shft.dst, mode64);
1949 vex_printf(",");
1950 ppHRegMIPS(r_srcL, mode64);
1951 vex_printf(",");
1952 ppMIPSRH(rh_srcR, mode64);
1953 return;
1955 case Min_Rotx: {
1956 HReg r_src = i->Min.Rotx.rt;
1957 vex_printf("rotx ");
1958 ppHRegMIPS(i->Min.Rotx.rd, mode64);
1959 vex_printf(",");
1960 ppHRegMIPS(r_src, mode64);
1961 return;
1963 case Min_Unary: {
1964 vex_printf("%s ", showMIPSUnaryOp(i->Min.Unary.op));
1965 ppHRegMIPS(i->Min.Unary.dst, mode64);
1966 vex_printf(",");
1967 ppHRegMIPS(i->Min.Unary.src, mode64);
1968 return;
1970 case Min_Cmp: {
1971 vex_printf("word_compare ");
1972 ppHRegMIPS(i->Min.Cmp.dst, mode64);
1973 vex_printf(" = %s ( ", showMIPSCondCode(i->Min.Cmp.cond));
1974 ppHRegMIPS(i->Min.Cmp.srcL, mode64);
1975 vex_printf(", ");
1976 ppHRegMIPS(i->Min.Cmp.srcR, mode64);
1977 vex_printf(" )");
1979 return;
1981 case Min_Mul: {
1982 vex_printf("mul ");
1983 ppHRegMIPS(i->Min.Mul.dst, mode64);
1984 vex_printf(", ");
1985 ppHRegMIPS(i->Min.Mul.srcL, mode64);
1986 vex_printf(", ");
1987 ppHRegMIPS(i->Min.Mul.srcR, mode64);
1988 return;
1990 case Min_Mult: {
1991 vex_printf("%s%s ", mode64 ? "dmult" : "mult",
1992 i->Min.Mult.syned ? "" : "u");
1993 ppHRegMIPS(i->Min.Mult.srcL, mode64);
1994 vex_printf(", ");
1995 ppHRegMIPS(i->Min.Mult.srcR, mode64);
1996 return;
1998 case Min_Ext: {
1999 vassert(mode64);
2000 vassert(i->Min.Ext.pos < 32);
2001 vassert(i->Min.Ext.size > 0);
2002 vassert(i->Min.Ext.size <= 32);
2003 vassert(i->Min.Ext.size + i->Min.Ext.pos > 0);
2004 vassert(i->Min.Ext.size + i->Min.Ext.pos <= 63);
2005 vex_printf("dext ");
2006 ppHRegMIPS(i->Min.Ext.dst, mode64);
2007 vex_printf(", ");
2008 ppHRegMIPS(i->Min.Ext.src, mode64);
2009 vex_printf(", %u, %u", i->Min.Ext.pos, i->Min.Ext.size);
2010 return;
2012 case Min_Mulr6: {
2013 if(i->Min.Mulr6.sz32) {
2014 if(i->Min.Mulr6.low)vex_printf("mul");
2015 else vex_printf("muh");
2016 if(i->Min.Mulr6.syned)vex_printf("u ");
2017 else vex_printf(" ");
2018 } else {
2019 if(i->Min.Mulr6.low)
2020 vex_printf("%s%s ", "dmul",
2021 i->Min.Mulr6.syned ? "" : "u");
2022 else
2023 vex_printf("%s%s ","dmuh",
2024 i->Min.Mulr6.syned ? "" : "u");
2026 ppHRegMIPS(i->Min.Mulr6.dst, mode64);
2027 vex_printf(", ");
2028 ppHRegMIPS(i->Min.Mulr6.srcL, mode64);
2029 vex_printf(", ");
2030 ppHRegMIPS(i->Min.Mulr6.srcR, mode64);
2031 break;
2033 case Min_Mthi: {
2034 vex_printf("mthi ");
2035 ppHRegMIPS(i->Min.MtHL.src, mode64);
2036 return;
2038 case Min_Mtlo: {
2039 vex_printf("mtlo ");
2040 ppHRegMIPS(i->Min.MtHL.src, mode64);
2041 return;
2043 case Min_Mfhi: {
2044 vex_printf("mfhi ");
2045 ppHRegMIPS(i->Min.MfHL.dst, mode64);
2046 return;
2048 case Min_Mflo: {
2049 vex_printf("mflo ");
2050 ppHRegMIPS(i->Min.MfHL.dst, mode64);
2051 return;
2053 case Min_Macc: {
2054 vex_printf("%s ", showMIPSMaccOp(i->Min.Macc.op, i->Min.Macc.syned));
2055 ppHRegMIPS(i->Min.Macc.srcL, mode64);
2056 vex_printf(", ");
2057 ppHRegMIPS(i->Min.Macc.srcR, mode64);
2058 return;
2060 case Min_Div: {
2061 if (!i->Min.Div.sz32)
2062 vex_printf("d");
2063 vex_printf("div");
2064 vex_printf("%s ", i->Min.Div.syned ? "s" : "u");
2065 ppHRegMIPS(i->Min.Div.srcL, mode64);
2066 vex_printf(", ");
2067 ppHRegMIPS(i->Min.Div.srcR, mode64);
2068 return;
2070 case Min_Divr6: {
2071 if(i->Min.Divr6.sz32) {
2072 if(i->Min.Divr6.mod)vex_printf("mod");
2073 else vex_printf("div");
2074 if(i->Min.Divr6.syned)vex_printf("u ");
2075 else vex_printf(" ");
2076 } else {
2077 if(i->Min.Divr6.mod)
2078 vex_printf("%s%s ", "dmod",
2079 i->Min.Divr6.syned ? "" : "u");
2080 else
2081 vex_printf("%s%s ","ddiv",
2082 i->Min.Divr6.syned ? "" : "u");
2084 ppHRegMIPS(i->Min.Divr6.dst, mode64);
2085 vex_printf(", ");
2086 ppHRegMIPS(i->Min.Divr6.srcL, mode64);
2087 vex_printf(", ");
2088 ppHRegMIPS(i->Min.Divr6.srcR, mode64);
2089 break;
2091 case Min_Call: {
2092 Int n;
2093 vex_printf("call: ");
2094 if (i->Min.Call.cond != MIPScc_AL) {
2095 vex_printf("if (%s) ", showMIPSCondCode(i->Min.Call.cond));
2097 vex_printf(" {");
2098 if (!mode64)
2099 vex_printf(" addiu $29, $29, -16");
2101 ppLoadImm(hregMIPS_GPR25(mode64), i->Min.Call.target, mode64);
2103 vex_printf(" ; jarl $31, $25; # args [");
2104 for (n = 0; n < 32; n++) {
2105 if (i->Min.Call.argiregs & (1 << n)) {
2106 vex_printf("$%d", n);
2107 if ((i->Min.Call.argiregs >> n) > 1)
2108 vex_printf(",");
2111 vex_printf("] nop; ");
2112 if (!mode64)
2113 vex_printf("addiu $29, $29, 16; ]");
2115 break;
2117 case Min_XDirect:
2118 vex_printf("(xDirect) ");
2119 vex_printf("if (guest_COND.%s) { ",
2120 showMIPSCondCode(i->Min.XDirect.cond));
2121 vex_printf("move $9, 0x%x,", (UInt)i->Min.XDirect.dstGA);
2122 vex_printf("; sw $9, ");
2123 ppMIPSAMode(i->Min.XDirect.amPC, mode64);
2124 vex_printf("; move $9, $disp_cp_chain_me_to_%sEP; jalr $9; nop}",
2125 i->Min.XDirect.toFastEP ? "fast" : "slow");
2126 return;
2127 case Min_XIndir:
2128 vex_printf("(xIndir) ");
2129 vex_printf("if (guest_COND.%s) { sw ",
2130 showMIPSCondCode(i->Min.XIndir.cond));
2131 ppHRegMIPS(i->Min.XIndir.dstGA, mode64);
2132 vex_printf(", ");
2133 ppMIPSAMode(i->Min.XIndir.amPC, mode64);
2134 vex_printf("; move $9, $disp_indir; jalr $9; nop}");
2135 return;
2136 case Min_XAssisted:
2137 vex_printf("(xAssisted) ");
2138 vex_printf("if (guest_COND.%s) { ",
2139 showMIPSCondCode(i->Min.XAssisted.cond));
2140 vex_printf("sw ");
2141 ppHRegMIPS(i->Min.XAssisted.dstGA, mode64);
2142 vex_printf(", ");
2143 ppMIPSAMode(i->Min.XAssisted.amPC, mode64);
2144 vex_printf("; move $9, $IRJumpKind_to_TRCVAL(%d)",
2145 (Int)i->Min.XAssisted.jk);
2146 vex_printf("; move $9, $disp_assisted; jalr $9; nop; }");
2147 return;
2148 case Min_Load: {
2149 Bool idxd = toBool(i->Min.Load.src->tag == Mam_RR);
2150 UChar sz = i->Min.Load.sz;
2151 HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
2152 vex_printf("l%c%s ", c_sz, idxd ? "x" : "");
2153 ppHRegMIPS(i->Min.Load.dst, mode64);
2154 vex_printf(",");
2155 ppMIPSAMode(i->Min.Load.src, mode64);
2156 return;
2158 case Min_Store: {
2159 UChar sz = i->Min.Store.sz;
2160 Bool idxd = toBool(i->Min.Store.dst->tag == Mam_RR);
2161 HChar c_sz = sz == 1 ? 'b' : sz == 2 ? 'h' : sz == 4 ? 'w' : 'd';
2162 vex_printf("s%c%s ", c_sz, idxd ? "x" : "");
2163 ppHRegMIPS(i->Min.Store.src, mode64);
2164 vex_printf(",");
2165 ppMIPSAMode(i->Min.Store.dst, mode64);
2166 return;
2168 case Min_LoadL: {
2169 vex_printf("ll ");
2170 ppHRegMIPS(i->Min.LoadL.dst, mode64);
2171 vex_printf(",");
2172 ppMIPSAMode(i->Min.LoadL.src, mode64);
2173 return;
2175 case Min_Cas: {
2176 Bool sz8 = toBool(i->Min.Cas.sz == 8);
2178 * ll(d) old, 0(addr)
2179 * bne old, expd, end
2180 * nop
2181 * (d)addiu old, old, 1
2182 * sc(d) data, 0(addr)
2183 * movn old, expd, data
2184 * end:
2186 // ll(d) old, 0(addr)
2187 vex_printf("cas: ");
2189 vex_printf("%s ", sz8 ? "lld" : "ll");
2190 ppHRegMIPS(i->Min.Cas.old , mode64);
2191 vex_printf(", 0(");
2192 ppHRegMIPS(i->Min.Cas.addr , mode64);
2193 vex_printf(")\n");
2195 vex_printf("bne ");
2196 ppHRegMIPS(i->Min.Cas.old , mode64);
2197 vex_printf(", ");
2198 ppHRegMIPS(i->Min.Cas.expd , mode64);
2199 vex_printf(", end\n");
2201 vex_printf("nop\n");
2203 vex_printf("%s ", sz8 ? "daddiu" : "addiu");
2204 ppHRegMIPS(i->Min.Cas.old , mode64);
2205 vex_printf(", ");
2206 ppHRegMIPS(i->Min.Cas.old , mode64);
2207 vex_printf(", 1\n");
2209 vex_printf("%s ", sz8 ? "scd" : "sc");
2210 ppHRegMIPS(i->Min.Cas.data , mode64);
2211 vex_printf(", 0(");
2212 ppHRegMIPS(i->Min.Cas.addr , mode64);
2213 vex_printf(")\n");
2215 vex_printf("movn ");
2216 ppHRegMIPS(i->Min.Cas.old , mode64);
2217 vex_printf(", ");
2218 ppHRegMIPS(i->Min.Cas.expd , mode64);
2219 vex_printf(", ");
2220 ppHRegMIPS(i->Min.Cas.data , mode64);
2221 vex_printf("\nend:");
2222 return;
2224 case Min_StoreC: {
2225 vex_printf("sc ");
2226 ppHRegMIPS(i->Min.StoreC.src, mode64);
2227 vex_printf(",");
2228 ppMIPSAMode(i->Min.StoreC.dst, mode64);
2229 return;
2231 case Min_RdWrLR: {
2232 vex_printf("%s ", i->Min.RdWrLR.wrLR ? "mtlr" : "mflr");
2233 ppHRegMIPS(i->Min.RdWrLR.gpr, mode64);
2234 return;
2236 case Min_FpUnary:
2237 vex_printf("%s ", showMIPSFpOp(i->Min.FpUnary.op));
2238 ppHRegMIPS(i->Min.FpUnary.dst, mode64);
2239 vex_printf(",");
2240 ppHRegMIPS(i->Min.FpUnary.src, mode64);
2241 return;
2242 case Min_FpBinary:
2243 vex_printf("%s", showMIPSFpOp(i->Min.FpBinary.op));
2244 ppHRegMIPS(i->Min.FpBinary.dst, mode64);
2245 vex_printf(",");
2246 ppHRegMIPS(i->Min.FpBinary.srcL, mode64);
2247 vex_printf(",");
2248 ppHRegMIPS(i->Min.FpBinary.srcR, mode64);
2249 return;
2250 case Min_FpTernary:
2251 vex_printf("%s", showMIPSFpOp(i->Min.FpTernary.op));
2252 ppHRegMIPS(i->Min.FpTernary.dst, mode64);
2253 vex_printf(",");
2254 ppHRegMIPS(i->Min.FpTernary.src1, mode64);
2255 vex_printf(",");
2256 ppHRegMIPS(i->Min.FpTernary.src2, mode64);
2257 vex_printf(",");
2258 ppHRegMIPS(i->Min.FpTernary.src3, mode64);
2259 return;
2260 case Min_FpConvert:
2261 vex_printf("%s", showMIPSFpOp(i->Min.FpConvert.op));
2262 ppHRegMIPS(i->Min.FpConvert.dst, mode64);
2263 vex_printf(",");
2264 ppHRegMIPS(i->Min.FpConvert.src, mode64);
2265 return;
2266 case Min_FpCompare:
2267 vex_printf("%s ", showMIPSFpOp(i->Min.FpCompare.op));
2268 ppHRegMIPS(i->Min.FpCompare.srcL, mode64);
2269 vex_printf(",");
2270 ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
2271 return;
2272 case Min_FpMinMax:
2273 vex_printf("%s ", showMIPSFpOp(i->Min.FpMinMax.op));
2274 ppHRegMIPS(i->Min.FpCompare.srcL, mode64);
2275 vex_printf(",");
2276 ppHRegMIPS(i->Min.FpCompare.srcR, mode64);
2277 return;
2278 case Min_FpMulAcc:
2279 vex_printf("%s ", showMIPSFpOp(i->Min.FpMulAcc.op));
2280 ppHRegMIPS(i->Min.FpMulAcc.dst, mode64);
2281 vex_printf(",");
2282 ppHRegMIPS(i->Min.FpMulAcc.srcML, mode64);
2283 vex_printf(",");
2284 ppHRegMIPS(i->Min.FpMulAcc.srcMR, mode64);
2285 vex_printf(",");
2286 ppHRegMIPS(i->Min.FpMulAcc.srcAcc, mode64);
2287 return;
2288 case Min_FpLdSt: {
2289 if (i->Min.FpLdSt.sz == 4) {
2290 if (i->Min.FpLdSt.isLoad) {
2291 vex_printf("lwc1 ");
2292 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
2293 vex_printf(",");
2294 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
2295 } else {
2296 vex_printf("swc1 ");
2297 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
2298 vex_printf(",");
2299 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
2301 } else if (i->Min.FpLdSt.sz == 8) {
2302 if (i->Min.FpLdSt.isLoad) {
2303 vex_printf("ldc1 ");
2304 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
2305 vex_printf(",");
2306 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
2307 } else {
2308 vex_printf("sdc1 ");
2309 ppHRegMIPS(i->Min.FpLdSt.reg, mode64);
2310 vex_printf(",");
2311 ppMIPSAMode(i->Min.FpLdSt.addr, mode64);
2314 return;
2316 case Min_MtFCSR: {
2317 vex_printf("ctc1 ");
2318 ppHRegMIPS(i->Min.MtFCSR.src, mode64);
2319 vex_printf(", $31");
2320 return;
2322 case Min_MfFCSR: {
2323 vex_printf("cfc1 ");
2324 ppHRegMIPS(i->Min.MfFCSR.dst, mode64);
2325 vex_printf(", $31");
2326 return;
2328 case Min_FpGpMove: {
2329 vex_printf("%s ", showMIPSFpGpMoveOp(i->Min.FpGpMove.op));
2330 ppHRegMIPS(i->Min.FpGpMove.dst, mode64);
2331 vex_printf(", ");
2332 ppHRegMIPS(i->Min.FpGpMove.src, mode64);
2333 return;
2335 case Min_MoveCond: {
2336 vex_printf("%s", showMIPSMoveCondOp(i->Min.MoveCond.op));
2337 ppHRegMIPS(i->Min.MoveCond.dst, mode64);
2338 vex_printf(", ");
2339 ppHRegMIPS(i->Min.MoveCond.src, mode64);
2340 vex_printf(", ");
2341 ppHRegMIPS(i->Min.MoveCond.cond, mode64);
2342 return;
2344 case Min_EvCheck:
2345 vex_printf("(evCheck) lw $9, ");
2346 ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
2347 vex_printf("; addiu $9, $9, -1");
2348 vex_printf("; sw $9, ");
2349 ppMIPSAMode(i->Min.EvCheck.amCounter, mode64);
2350 vex_printf("; bgez $t9, nofail; jalr *");
2351 ppMIPSAMode(i->Min.EvCheck.amFailAddr, mode64);
2352 vex_printf("; nofail:");
2353 return;
2354 case Min_ProfInc:
2355 if (mode64)
2356 vex_printf("(profInc) move $9, ($NotKnownYet); "
2357 "ld $8, 0($9); "
2358 "daddiu $8, $8, 1; "
2359 "sd $8, 0($9); " );
2360 else
2361 vex_printf("(profInc) move $9, ($NotKnownYet); "
2362 "lw $8, 0($9); "
2363 "addiu $8, $8, 1; "
2364 "sw $8, 0($9); "
2365 "sltiu $1, $8, 1; "
2366 "lw $8, 4($9); "
2367 "addu $8, $8, $1; "
2368 "sw $8, 4($9); " );
2369 return;
2370 case Msa_MI10: {
2371 Int imm = (i->Min.MsaMi10.s10 << 22) >> 22;
2373 switch (i->Min.MsaMi10.df) {
2374 case MSA_B:
2375 break;
2377 case MSA_H:
2378 imm <<= 1;
2379 break;
2381 case MSA_W:
2382 imm <<= 2;
2383 break;
2385 case MSA_D:
2386 imm <<= 3;
2387 break;
2390 vex_printf("%s.%c ", showMsaMI10op(i->Min.MsaMi10.op),
2391 showMsaDF(i->Min.MsaMi10.df));
2392 ppHRegMIPS(i->Min.MsaMi10.wd, mode64);
2393 vex_printf(", (%d)", imm);
2394 ppHRegMIPS(i->Min.MsaMi10.rs, mode64);
2395 return;
2398 case Msa_ELM:
2399 switch (i->Min.MsaElm.op) {
2400 case MSA_MOVE:
2401 vex_printf("move.v ");
2402 ppHRegMIPS(i->Min.MsaElm.wd, mode64);
2403 vex_printf(", ");
2404 ppHRegMIPS(i->Min.MsaElm.ws, mode64);
2405 break;
2407 case MSA_SLDI: {
2408 MSADF df;
2409 UInt n;
2410 MSAdfn(i->Min.MsaElm.dfn, &df, &n);
2411 vex_printf("%s.%c ", showMsaElmOp(i->Min.MsaElm.op),
2412 showMsaDF(df));
2413 ppHRegMIPS(i->Min.MsaElm.wd, mode64);
2414 vex_printf(", ");
2415 ppHRegMIPS(i->Min.MsaElm.ws, mode64);
2416 vex_printf("[%u]", n);
2417 break;
2420 case MSA_INSVE: {
2421 MSADF df;
2422 UInt n;
2423 MSAdfn(i->Min.MsaElm.dfn, &df, &n);
2424 vex_printf("%s.%c ", showMsaElmOp(i->Min.MsaElm.op),
2425 showMsaDF(df));
2426 ppHRegMIPS(i->Min.MsaElm.wd, mode64);
2427 vex_printf("[%u], ", n);
2428 ppHRegMIPS(i->Min.MsaElm.ws, mode64);
2429 vex_printf("[0]");
2430 break;
2433 case MSA_COPY_S:
2434 case MSA_COPY_U: {
2435 MSADF df;
2436 UInt n;
2437 MSAdfn(i->Min.MsaElm.dfn, &df, &n);
2438 vex_printf("%s.%c ", showMsaElmOp(i->Min.MsaElm.op),
2439 showMsaDF(df));
2440 ppHRegMIPS(i->Min.MsaElm.wd, mode64);
2441 vex_printf(", ");
2442 ppHRegMIPS(i->Min.MsaElm.ws, mode64);
2443 vex_printf("[%u]", n);
2444 break;
2447 case MSA_INSERT: {
2448 MSADF df;
2449 UInt n;
2450 MSAdfn(i->Min.MsaElm.dfn, &df, &n);
2451 vex_printf("%s.%c ", showMsaElmOp(i->Min.MsaElm.op),
2452 showMsaDF(df));
2453 ppHRegMIPS(i->Min.MsaElm.wd, mode64);
2454 vex_printf("[%u], ", n);
2455 ppHRegMIPS(i->Min.MsaElm.ws, mode64);
2456 break;
2459 case MSA_CFCMSA:
2460 vex_printf("cfcmsa ");
2461 ppHRegMIPS(i->Min.MsaElm.wd, mode64);
2462 vex_printf(", $1");
2463 break;
2465 case MSA_CTCMSA:
2466 vex_printf("ctcmsa $1, ");
2467 ppHRegMIPS(i->Min.MsaElm.ws, mode64);
2468 break;
2471 return;
2473 case Msa_3R:
2474 vex_printf("%s.%c ",
2475 showMsa3ROp(i->Min.Msa3R.op), showMsaDF(i->Min.Msa3R.df));
2476 ppHRegMIPS(i->Min.Msa3R.wd, mode64);
2477 vex_printf(", ");
2478 ppHRegMIPS(i->Min.Msa3R.ws, mode64);
2479 vex_printf(", ");
2480 ppHRegMIPS(i->Min.Msa3R.wt, mode64);
2481 return;
2483 case Msa_2R:
2484 vex_printf("%s.%c ",
2485 showMsa2ROp(i->Min.Msa2R.op), showMsaDF(i->Min.Msa2R.df));
2486 ppHRegMIPS(i->Min.Msa2R.wd, mode64);
2487 vex_printf(", ");
2488 ppHRegMIPS(i->Min.Msa2R.ws, mode64);
2489 return;
2491 case Msa_VEC:
2492 vex_printf("%s ", showMsaVecOp(i->Min.MsaVec.op));
2493 ppHRegMIPS(i->Min.MsaVec.wd, mode64);
2494 vex_printf(", ");
2495 ppHRegMIPS(i->Min.MsaVec.ws, mode64);
2496 vex_printf(", ");
2497 ppHRegMIPS(i->Min.MsaVec.wt, mode64);
2498 return;
2500 case Msa_BIT:
2501 vex_printf("%s.%c ", showMsaBitOp(i->Min.MsaBit.op),
2502 showMsaDF(i->Min.MsaBit.df));
2503 ppHRegMIPS(i->Min.MsaBit.wd, mode64);
2504 vex_printf(", ");
2505 ppHRegMIPS(i->Min.MsaBit.ws, mode64);
2506 vex_printf(", %d ", i->Min.MsaBit.ms);
2507 return;
2509 case Msa_3RF:
2510 vex_printf("%s.%c ", showMsa3RFOp(i->Min.Msa3RF.op),
2511 showMsaDFF(i->Min.Msa3RF.df, i->Min.Msa3RF.op));
2512 ppHRegMIPS(i->Min.Msa3RF.wd, mode64);
2513 vex_printf(", ");
2514 ppHRegMIPS(i->Min.Msa3RF.ws, mode64);
2515 vex_printf(", ");
2516 ppHRegMIPS(i->Min.Msa3RF.wt, mode64);
2517 return;
2519 case Msa_2RF:
2520 vex_printf("%s.%c ", showMsa2RFOp(i->Min.Msa2RF.op),
2521 showMsaDFF(i->Min.Msa2RF.df, i->Min.Msa2RF.op));
2522 ppHRegMIPS(i->Min.Msa2RF.wd, mode64);
2523 vex_printf(", ");
2524 ppHRegMIPS(i->Min.Msa2RF.ws, mode64);
2525 return;
2526 default:
2527 vpanic("ppMIPSInstr");
2528 break;
2532 /* --------- Helpers for register allocation. --------- */
2534 void getRegUsage_MIPSInstr(HRegUsage * u, const MIPSInstr * i, Bool mode64)
2536 initHRegUsage(u);
2537 switch (i->tag) {
2538 case Min_LI:
2539 addHRegUse(u, HRmWrite, i->Min.LI.dst);
2540 break;
2541 case Min_Alu:
2542 addHRegUse(u, HRmRead, i->Min.Alu.srcL);
2543 addRegUsage_MIPSRH(u, i->Min.Alu.srcR);
2544 addHRegUse(u, HRmWrite, i->Min.Alu.dst);
2546 /* or Rd,Rs,Rs == mr Rd,Rs */
2547 if ((i->Min.Alu.op == Malu_OR)
2548 && (i->Min.Alu.srcR->tag == Mrh_Reg)
2549 && sameHReg(i->Min.Alu.srcR->Mrh.Reg.reg, i->Min.Alu.srcL)) {
2550 u->isRegRegMove = True;
2551 u->regMoveSrc = i->Min.Alu.srcL;
2552 u->regMoveDst = i->Min.Alu.dst;
2554 return;
2555 case Min_Shft:
2556 addHRegUse(u, HRmRead, i->Min.Shft.srcL);
2557 addRegUsage_MIPSRH(u, i->Min.Shft.srcR);
2558 addHRegUse(u, HRmWrite, i->Min.Shft.dst);
2559 return;
2560 case Min_Rotx:
2561 addHRegUse(u, HRmRead, i->Min.Rotx.rt);
2562 addHRegUse(u, HRmWrite, i->Min.Rotx.rd);
2563 return;
2564 case Min_Cmp:
2565 addHRegUse(u, HRmRead, i->Min.Cmp.srcL);
2566 addHRegUse(u, HRmRead, i->Min.Cmp.srcR);
2567 addHRegUse(u, HRmWrite, i->Min.Cmp.dst);
2568 return;
2569 case Min_Unary:
2570 addHRegUse(u, HRmRead, i->Min.Unary.src);
2571 addHRegUse(u, HRmWrite, i->Min.Unary.dst);
2572 return;
2573 case Min_Mul:
2574 addHRegUse(u, HRmWrite, i->Min.Mul.dst);
2575 addHRegUse(u, HRmRead, i->Min.Mul.srcL);
2576 addHRegUse(u, HRmRead, i->Min.Mul.srcR);
2577 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
2578 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
2579 return;
2580 case Min_Mult:
2581 addHRegUse(u, HRmRead, i->Min.Mult.srcL);
2582 addHRegUse(u, HRmRead, i->Min.Mult.srcR);
2583 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
2584 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
2585 return;
2586 case Min_Ext:
2587 addHRegUse(u, HRmWrite, i->Min.Ext.dst);
2588 addHRegUse(u, HRmRead, i->Min.Ext.src);
2589 return;
2590 case Min_Mulr6:
2591 addHRegUse(u, HRmWrite, i->Min.Mulr6.dst);
2592 addHRegUse(u, HRmRead, i->Min.Mulr6.srcL);
2593 addHRegUse(u, HRmRead, i->Min.Mulr6.srcR);
2594 return;
2595 case Min_Mthi:
2596 case Min_Mtlo:
2597 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
2598 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
2599 addHRegUse(u, HRmRead, i->Min.MtHL.src);
2600 return;
2601 case Min_Mfhi:
2602 case Min_Mflo:
2603 addHRegUse(u, HRmRead, hregMIPS_HI(mode64));
2604 addHRegUse(u, HRmRead, hregMIPS_LO(mode64));
2605 addHRegUse(u, HRmWrite, i->Min.MfHL.dst);
2606 return;
2607 case Msa_MI10:
2608 addHRegUse(u, HRmRead, i->Min.MsaMi10.rs);
2610 switch (i->Min.MsaMi10.op) {
2611 case MSA_LD:
2612 addHRegUse(u, HRmWrite, i->Min.MsaMi10.wd);
2613 break;
2615 case MSA_ST:
2616 addHRegUse(u, HRmRead, i->Min.MsaMi10.wd);
2617 break;
2620 return;
2622 case Msa_ELM:
2623 if (LIKELY(i->Min.MsaElm.op != MSA_CFCMSA))
2624 addHRegUse(u, HRmRead, i->Min.MsaElm.ws);
2626 switch (i->Min.MsaElm.op) {
2627 case MSA_COPY_S:
2628 case MSA_COPY_U:
2629 case MSA_MOVE:
2630 case MSA_CFCMSA:
2631 addHRegUse(u, HRmWrite, i->Min.MsaElm.wd);
2632 break;
2634 case MSA_SLDI:
2635 case MSA_INSERT:
2636 case MSA_INSVE:
2637 addHRegUse(u, HRmModify, i->Min.MsaElm.wd);
2638 break;
2639 case MSA_CTCMSA:
2640 break;
2643 return;
2645 case Msa_3R:
2646 addHRegUse(u, HRmRead, i->Min.Msa3R.ws);
2647 addHRegUse(u, HRmRead, i->Min.Msa3R.wt);
2649 if (i->Min.Msa3R.op == MSA_SLD ||
2650 i->Min.Msa3R.op == MSA_VSHF) {
2651 addHRegUse(u, HRmModify, i->Min.Msa3R.wd);
2652 } else {
2653 addHRegUse(u, HRmWrite, i->Min.Msa3R.wd);
2656 return;
2658 case Msa_2R:
2659 addHRegUse(u, HRmWrite, i->Min.Msa2R.wd);
2660 addHRegUse(u, HRmRead, i->Min.Msa2R.ws);
2661 return;
2663 case Msa_VEC:
2664 addHRegUse(u, HRmRead, i->Min.MsaVec.ws);
2665 addHRegUse(u, HRmRead, i->Min.MsaVec.wt);
2666 addHRegUse(u, HRmWrite, i->Min.MsaVec.wd);
2667 return;
2669 case Msa_BIT:
2670 addHRegUse(u, HRmRead, i->Min.MsaBit.ws);
2671 addHRegUse(u, HRmWrite, i->Min.MsaBit.wd);
2672 return;
2674 case Msa_3RF:
2675 addHRegUse(u, HRmRead, i->Min.Msa3RF.ws);
2676 addHRegUse(u, HRmRead, i->Min.Msa3RF.wt);
2677 addHRegUse(u, HRmWrite, i->Min.Msa3RF.wd);
2678 return;
2680 case Msa_2RF:
2681 addHRegUse(u, HRmRead, i->Min.Msa2RF.ws);
2682 addHRegUse(u, HRmWrite, i->Min.Msa2RF.wd);
2683 return;
2685 case Min_MtFCSR:
2686 addHRegUse(u, HRmRead, i->Min.MtFCSR.src);
2687 return;
2688 case Min_MfFCSR:
2689 addHRegUse(u, HRmWrite, i->Min.MfFCSR.dst);
2690 return;
2691 case Min_Macc:
2692 addHRegUse(u, HRmModify, hregMIPS_HI(mode64));
2693 addHRegUse(u, HRmModify, hregMIPS_LO(mode64));
2694 addHRegUse(u, HRmRead, i->Min.Macc.srcL);
2695 addHRegUse(u, HRmRead, i->Min.Macc.srcR);
2696 return;
2697 case Min_Div:
2698 addHRegUse(u, HRmWrite, hregMIPS_HI(mode64));
2699 addHRegUse(u, HRmWrite, hregMIPS_LO(mode64));
2700 addHRegUse(u, HRmRead, i->Min.Div.srcL);
2701 addHRegUse(u, HRmRead, i->Min.Div.srcR);
2702 return;
2703 case Min_Divr6:
2704 addHRegUse(u, HRmWrite, i->Min.Divr6.dst);
2705 addHRegUse(u, HRmRead, i->Min.Divr6.srcL);
2706 addHRegUse(u, HRmRead, i->Min.Divr6.srcR);
2707 return;
2708 case Min_Call: {
2709 /* Logic and comments copied/modified from x86, ppc and arm back end.
2710 First off, claim it trashes all the caller-saved regs
2711 which fall within the register allocator's jurisdiction. */
2712 if (i->Min.Call.cond != MIPScc_AL)
2713 addHRegUse(u, HRmRead, i->Min.Call.src);
2714 UInt argir;
2715 addHRegUse(u, HRmWrite, hregMIPS_GPR1(mode64));
2717 addHRegUse(u, HRmWrite, hregMIPS_GPR2(mode64));
2718 addHRegUse(u, HRmWrite, hregMIPS_GPR3(mode64));
2720 addHRegUse(u, HRmWrite, hregMIPS_GPR4(mode64));
2721 addHRegUse(u, HRmWrite, hregMIPS_GPR5(mode64));
2722 addHRegUse(u, HRmWrite, hregMIPS_GPR6(mode64));
2723 addHRegUse(u, HRmWrite, hregMIPS_GPR7(mode64));
2725 addHRegUse(u, HRmWrite, hregMIPS_GPR8(mode64));
2726 addHRegUse(u, HRmWrite, hregMIPS_GPR9(mode64));
2727 addHRegUse(u, HRmWrite, hregMIPS_GPR10(mode64));
2728 addHRegUse(u, HRmWrite, hregMIPS_GPR11(mode64));
2729 addHRegUse(u, HRmWrite, hregMIPS_GPR12(mode64));
2730 addHRegUse(u, HRmWrite, hregMIPS_GPR13(mode64));
2731 addHRegUse(u, HRmWrite, hregMIPS_GPR14(mode64));
2732 addHRegUse(u, HRmWrite, hregMIPS_GPR15(mode64));
2734 addHRegUse(u, HRmWrite, hregMIPS_GPR24(mode64));
2735 addHRegUse(u, HRmWrite, hregMIPS_GPR25(mode64));
2736 addHRegUse(u, HRmWrite, hregMIPS_GPR31(mode64));
2738 /* Now we have to state any parameter-carrying registers
2739 which might be read. This depends on the argiregs field. */
2740 argir = i->Min.Call.argiregs;
2741 if (argir & (1<<11)) addHRegUse(u, HRmRead, hregMIPS_GPR11(mode64));
2742 if (argir & (1<<10)) addHRegUse(u, HRmRead, hregMIPS_GPR10(mode64));
2743 if (argir & (1<<9)) addHRegUse(u, HRmRead, hregMIPS_GPR9(mode64));
2744 if (argir & (1<<8)) addHRegUse(u, HRmRead, hregMIPS_GPR8(mode64));
2745 if (argir & (1<<7)) addHRegUse(u, HRmRead, hregMIPS_GPR7(mode64));
2746 if (argir & (1<<6)) addHRegUse(u, HRmRead, hregMIPS_GPR6(mode64));
2747 if (argir & (1<<5)) addHRegUse(u, HRmRead, hregMIPS_GPR5(mode64));
2748 if (argir & (1<<4)) addHRegUse(u, HRmRead, hregMIPS_GPR4(mode64));
2750 vassert(0 == (argir & ~((1 << 4) | (1 << 5) | (1 << 6)
2751 | (1 << 7) | (1 << 8) | (1 << 9) | (1 << 10)
2752 | (1 << 11))));
2754 return;
2756 /* XDirect/XIndir/XAssisted are also a bit subtle. They
2757 conditionally exit the block. Hence we only need to list (1)
2758 the registers that they read, and (2) the registers that they
2759 write in the case where the block is not exited. (2) is
2760 empty, hence only (1) is relevant here. */
2761 case Min_XDirect:
2762 addRegUsage_MIPSAMode(u, i->Min.XDirect.amPC);
2763 return;
2764 case Min_XIndir:
2765 addHRegUse(u, HRmRead, i->Min.XIndir.dstGA);
2766 addRegUsage_MIPSAMode(u, i->Min.XIndir.amPC);
2767 return;
2768 case Min_XAssisted:
2769 addHRegUse(u, HRmRead, i->Min.XAssisted.dstGA);
2770 addRegUsage_MIPSAMode(u, i->Min.XAssisted.amPC);
2771 return;
2772 case Min_Load:
2773 addRegUsage_MIPSAMode(u, i->Min.Load.src);
2774 addHRegUse(u, HRmWrite, i->Min.Load.dst);
2775 return;
2776 case Min_Store:
2777 addHRegUse(u, HRmRead, i->Min.Store.src);
2778 addRegUsage_MIPSAMode(u, i->Min.Store.dst);
2779 return;
2780 case Min_LoadL:
2781 addRegUsage_MIPSAMode(u, i->Min.LoadL.src);
2782 addHRegUse(u, HRmWrite, i->Min.LoadL.dst);
2783 return;
2784 case Min_Cas:
2785 addHRegUse(u, HRmWrite, i->Min.Cas.old);
2786 addHRegUse(u, HRmRead, i->Min.Cas.addr);
2787 addHRegUse(u, HRmRead, i->Min.Cas.expd);
2788 addHRegUse(u, HRmModify, i->Min.Cas.data);
2789 return;
2790 case Min_StoreC:
2791 addHRegUse(u, HRmWrite, i->Min.StoreC.src);
2792 addHRegUse(u, HRmRead, i->Min.StoreC.src);
2793 addRegUsage_MIPSAMode(u, i->Min.StoreC.dst);
2794 return;
2795 case Min_RdWrLR:
2796 addHRegUse(u, (i->Min.RdWrLR.wrLR ? HRmRead : HRmWrite),
2797 i->Min.RdWrLR.gpr);
2798 return;
2799 case Min_FpLdSt:
2800 if (i->Min.FpLdSt.sz == 4) {
2801 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
2802 i->Min.FpLdSt.reg);
2803 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
2804 return;
2805 } else if (i->Min.FpLdSt.sz == 8) {
2806 addHRegUse(u, (i->Min.FpLdSt.isLoad ? HRmWrite : HRmRead),
2807 i->Min.FpLdSt.reg);
2808 addRegUsage_MIPSAMode(u, i->Min.FpLdSt.addr);
2809 return;
2811 break;
2812 case Min_FpUnary:
2813 addHRegUse(u, HRmWrite, i->Min.FpUnary.dst);
2814 addHRegUse(u, HRmRead, i->Min.FpUnary.src);
2815 return;
2816 case Min_FpBinary:
2817 addHRegUse(u, HRmWrite, i->Min.FpBinary.dst);
2818 addHRegUse(u, HRmRead, i->Min.FpBinary.srcL);
2819 addHRegUse(u, HRmRead, i->Min.FpBinary.srcR);
2820 return;
2821 case Min_FpTernary:
2822 addHRegUse(u, HRmWrite, i->Min.FpTernary.dst);
2823 addHRegUse(u, HRmRead, i->Min.FpTernary.src1);
2824 addHRegUse(u, HRmRead, i->Min.FpTernary.src2);
2825 addHRegUse(u, HRmRead, i->Min.FpTernary.src3);
2826 return;
2827 case Min_FpConvert:
2828 addHRegUse(u, HRmWrite, i->Min.FpConvert.dst);
2829 addHRegUse(u, HRmRead, i->Min.FpConvert.src);
2830 return;
2831 case Min_FpCompare:
2832 addHRegUse(u, HRmWrite, i->Min.FpCompare.dst);
2833 addHRegUse(u, HRmRead, i->Min.FpCompare.srcL);
2834 addHRegUse(u, HRmRead, i->Min.FpCompare.srcR);
2835 return;
2836 case Min_FpMinMax:
2837 addHRegUse(u, HRmWrite, i->Min.FpMinMax.dst);
2838 addHRegUse(u, HRmRead, i->Min.FpMinMax.srcL);
2839 addHRegUse(u, HRmRead, i->Min.FpMinMax.srcR);
2840 return;
2841 case Min_FpGpMove:
2842 addHRegUse(u, HRmWrite, i->Min.FpGpMove.dst);
2843 addHRegUse(u, HRmRead, i->Min.FpGpMove.src);
2844 return;
2845 case Min_MoveCond:
2846 addHRegUse(u, HRmWrite, i->Min.MoveCond.dst);
2847 addHRegUse(u, HRmRead, i->Min.MoveCond.src);
2848 addHRegUse(u, HRmRead, i->Min.MoveCond.cond);
2849 return;
2850 case Min_EvCheck:
2851 /* We expect both amodes only to mention %ebp, so this is in
2852 fact pointless, since %ebp isn't allocatable, but anyway.. */
2853 addRegUsage_MIPSAMode(u, i->Min.EvCheck.amCounter);
2854 addRegUsage_MIPSAMode(u, i->Min.EvCheck.amFailAddr);
2855 return;
2856 case Min_ProfInc:
2857 /* does not use any registers. */
2858 return;
2859 default:
2860 ppMIPSInstr(i, mode64);
2861 vpanic("getRegUsage_MIPSInstr");
2862 break;
2866 /* local helper */
2867 static void mapReg(HRegRemap * m, HReg * r)
2869 *r = lookupHRegRemap(m, *r);
2872 void mapRegs_MIPSInstr(HRegRemap * m, MIPSInstr * i, Bool mode64)
2874 switch (i->tag) {
2875 case Min_LI:
2876 mapReg(m, &i->Min.LI.dst);
2877 break;
2878 case Min_Alu:
2879 mapReg(m, &i->Min.Alu.srcL);
2880 mapRegs_MIPSRH(m, i->Min.Alu.srcR);
2881 mapReg(m, &i->Min.Alu.dst);
2882 return;
2883 case Min_Shft:
2884 mapReg(m, &i->Min.Shft.srcL);
2885 mapRegs_MIPSRH(m, i->Min.Shft.srcR);
2886 mapReg(m, &i->Min.Shft.dst);
2887 return;
2888 case Min_Rotx:
2889 mapReg(m, &i->Min.Rotx.rt);
2890 mapReg(m, &i->Min.Rotx.rd);
2891 return;
2892 case Min_Cmp:
2893 mapReg(m, &i->Min.Cmp.srcL);
2894 mapReg(m, &i->Min.Cmp.srcR);
2895 mapReg(m, &i->Min.Cmp.dst);
2896 return;
2897 case Min_Unary:
2898 mapReg(m, &i->Min.Unary.src);
2899 mapReg(m, &i->Min.Unary.dst);
2900 return;
2901 case Min_Mul:
2902 mapReg(m, &i->Min.Mul.dst);
2903 mapReg(m, &i->Min.Mul.srcL);
2904 mapReg(m, &i->Min.Mul.srcR);
2905 return;
2906 case Min_Mult:
2907 mapReg(m, &i->Min.Mult.srcL);
2908 mapReg(m, &i->Min.Mult.srcR);
2909 return;
2910 case Min_Ext:
2911 mapReg(m, &i->Min.Ext.src);
2912 mapReg(m, &i->Min.Ext.dst);
2913 return;
2914 case Min_Mulr6:
2915 mapReg(m, &i->Min.Mulr6.dst);
2916 mapReg(m, &i->Min.Mulr6.srcL);
2917 mapReg(m, &i->Min.Mulr6.srcR);
2918 return;
2919 case Min_Mthi:
2920 case Min_Mtlo:
2921 mapReg(m, &i->Min.MtHL.src);
2922 return;
2923 case Min_Mfhi:
2924 case Min_Mflo:
2925 mapReg(m, &i->Min.MfHL.dst);
2926 return;
2927 case Min_Macc:
2928 mapReg(m, &i->Min.Macc.srcL);
2929 mapReg(m, &i->Min.Macc.srcR);
2930 return;
2931 case Min_Div:
2932 mapReg(m, &i->Min.Div.srcL);
2933 mapReg(m, &i->Min.Div.srcR);
2934 return;
2936 case Min_Divr6:
2937 mapReg(m, &i->Min.Divr6.dst);
2938 mapReg(m, &i->Min.Divr6.srcL);
2939 mapReg(m, &i->Min.Divr6.srcR);
2940 return;
2942 case Min_Call:
2943 if (i->Min.Call.cond != MIPScc_AL)
2944 mapReg(m, &i->Min.Call.src);
2945 return;
2947 case Msa_MI10:
2948 mapReg(m, &i->Min.MsaMi10.rs);
2949 mapReg(m, &i->Min.MsaMi10.wd);
2950 return;
2952 case Msa_ELM:
2953 mapReg(m, &i->Min.MsaElm.ws);
2954 mapReg(m, &i->Min.MsaElm.wd);
2955 return;
2957 case Msa_2R:
2958 mapReg(m, &i->Min.Msa2R.wd);
2959 mapReg(m, &i->Min.Msa2R.ws);
2960 return;
2962 case Msa_3R:
2963 mapReg(m, &i->Min.Msa3R.wt);
2964 mapReg(m, &i->Min.Msa3R.ws);
2965 mapReg(m, &i->Min.Msa3R.wd);
2966 return;
2968 case Msa_VEC:
2969 mapReg(m, &i->Min.MsaVec.wt);
2970 mapReg(m, &i->Min.MsaVec.ws);
2971 mapReg(m, &i->Min.MsaVec.wd);
2972 return;
2974 case Msa_BIT:
2975 mapReg(m, &i->Min.MsaBit.ws);
2976 mapReg(m, &i->Min.MsaBit.wd);
2977 return;
2979 case Msa_3RF:
2980 mapReg(m, &i->Min.Msa3RF.wt);
2981 mapReg(m, &i->Min.Msa3RF.ws);
2982 mapReg(m, &i->Min.Msa3RF.wd);
2983 return;
2985 case Msa_2RF:
2986 mapReg(m, &i->Min.Msa2RF.ws);
2987 mapReg(m, &i->Min.Msa2RF.wd);
2988 return;
2990 case Min_XDirect:
2991 mapRegs_MIPSAMode(m, i->Min.XDirect.amPC);
2992 return;
2993 case Min_XIndir:
2994 mapReg(m, &i->Min.XIndir.dstGA);
2995 mapRegs_MIPSAMode(m, i->Min.XIndir.amPC);
2996 return;
2997 case Min_XAssisted:
2998 mapReg(m, &i->Min.XAssisted.dstGA);
2999 mapRegs_MIPSAMode(m, i->Min.XAssisted.amPC);
3000 return;
3001 case Min_Load:
3002 mapRegs_MIPSAMode(m, i->Min.Load.src);
3003 mapReg(m, &i->Min.Load.dst);
3004 return;
3005 case Min_Store:
3006 mapReg(m, &i->Min.Store.src);
3007 mapRegs_MIPSAMode(m, i->Min.Store.dst);
3008 return;
3009 case Min_LoadL:
3010 mapRegs_MIPSAMode(m, i->Min.LoadL.src);
3011 mapReg(m, &i->Min.LoadL.dst);
3012 return;
3013 case Min_Cas:
3014 mapReg(m, &i->Min.Cas.old);
3015 mapReg(m, &i->Min.Cas.addr);
3016 mapReg(m, &i->Min.Cas.expd);
3017 mapReg(m, &i->Min.Cas.data);
3018 return;
3019 case Min_StoreC:
3020 mapReg(m, &i->Min.StoreC.src);
3021 mapRegs_MIPSAMode(m, i->Min.StoreC.dst);
3022 return;
3023 case Min_RdWrLR:
3024 mapReg(m, &i->Min.RdWrLR.gpr);
3025 return;
3026 case Min_FpLdSt:
3027 if (i->Min.FpLdSt.sz == 4) {
3028 mapReg(m, &i->Min.FpLdSt.reg);
3029 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
3030 return;
3031 } else if (i->Min.FpLdSt.sz == 8) {
3032 mapReg(m, &i->Min.FpLdSt.reg);
3033 mapRegs_MIPSAMode(m, i->Min.FpLdSt.addr);
3034 return;
3036 break;
3037 case Min_FpUnary:
3038 mapReg(m, &i->Min.FpUnary.dst);
3039 mapReg(m, &i->Min.FpUnary.src);
3040 return;
3041 case Min_FpBinary:
3042 mapReg(m, &i->Min.FpBinary.dst);
3043 mapReg(m, &i->Min.FpBinary.srcL);
3044 mapReg(m, &i->Min.FpBinary.srcR);
3045 return;
3046 case Min_FpTernary:
3047 mapReg(m, &i->Min.FpTernary.dst);
3048 mapReg(m, &i->Min.FpTernary.src1);
3049 mapReg(m, &i->Min.FpTernary.src2);
3050 mapReg(m, &i->Min.FpTernary.src3);
3051 return;
3052 case Min_FpConvert:
3053 mapReg(m, &i->Min.FpConvert.dst);
3054 mapReg(m, &i->Min.FpConvert.src);
3055 return;
3056 case Min_FpCompare:
3057 mapReg(m, &i->Min.FpCompare.dst);
3058 mapReg(m, &i->Min.FpCompare.srcL);
3059 mapReg(m, &i->Min.FpCompare.srcR);
3060 return;
3061 case Min_FpMinMax:
3062 mapReg(m, &i->Min.FpMinMax.dst);
3063 mapReg(m, &i->Min.FpMinMax.srcL);
3064 mapReg(m, &i->Min.FpMinMax.srcR);
3065 return;
3066 case Min_MtFCSR:
3067 mapReg(m, &i->Min.MtFCSR.src);
3068 return;
3069 case Min_MfFCSR:
3070 mapReg(m, &i->Min.MfFCSR.dst);
3071 return;
3072 case Min_FpGpMove:
3073 mapReg(m, &i->Min.FpGpMove.dst);
3074 mapReg(m, &i->Min.FpGpMove.src);
3075 return;
3076 case Min_MoveCond:
3077 mapReg(m, &i->Min.MoveCond.dst);
3078 mapReg(m, &i->Min.MoveCond.src);
3079 mapReg(m, &i->Min.MoveCond.cond);
3080 return;
3081 case Min_EvCheck:
3082 /* We expect both amodes only to mention %ebp, so this is in
3083 fact pointless, since %ebp isn't allocatable, but anyway.. */
3084 mapRegs_MIPSAMode(m, i->Min.EvCheck.amCounter);
3085 mapRegs_MIPSAMode(m, i->Min.EvCheck.amFailAddr);
3086 return;
3087 case Min_ProfInc:
3088 /* does not use any registers. */
3089 return;
3090 default:
3091 ppMIPSInstr(i, mode64);
3092 vpanic("mapRegs_MIPSInstr");
3093 break;
3098 /* Generate mips spill/reload instructions under the direction of the
3099 register allocator. */
3100 void genSpill_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
3101 Int offsetB, Bool mode64)
3103 MIPSAMode *am;
3104 vassert(offsetB >= 0);
3105 vassert(!hregIsVirtual(rreg));
3106 *i1 = *i2 = NULL;
3107 am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
3109 switch (hregClass(rreg)) {
3110 case HRcInt64:
3111 vassert(mode64);
3112 *i1 = MIPSInstr_Store(8, am, rreg, mode64);
3113 break;
3114 case HRcInt32:
3115 vassert(!mode64);
3116 *i1 = MIPSInstr_Store(4, am, rreg, mode64);
3117 break;
3118 case HRcFlt32:
3119 vassert(!mode64);
3120 *i1 = MIPSInstr_FpLdSt(False /*Store */ , 4, rreg, am);
3121 break;
3122 case HRcFlt64:
3123 *i1 = MIPSInstr_FpLdSt(False /*Store */ , 8, rreg, am);
3124 break;
3125 case HRcVec128:
3126 *i1 = MIPSInstr_MsaMi10(MSA_ST, (offsetB>>3),
3127 GuestStatePointer(mode64), rreg, MSA_D);
3128 break;
3129 default:
3130 ppHRegClass(hregClass(rreg));
3131 vpanic("genSpill_MIPS: unimplemented regclass");
3132 break;
3136 void genReload_MIPS( /*OUT*/ HInstr ** i1, /*OUT*/ HInstr ** i2, HReg rreg,
3137 Int offsetB, Bool mode64)
3139 MIPSAMode *am;
3140 vassert(!hregIsVirtual(rreg));
3141 am = MIPSAMode_IR(offsetB, GuestStatePointer(mode64));
3143 switch (hregClass(rreg)) {
3144 case HRcInt64:
3145 vassert(mode64);
3146 *i1 = MIPSInstr_Load(8, rreg, am, mode64);
3147 break;
3148 case HRcInt32:
3149 vassert(!mode64);
3150 *i1 = MIPSInstr_Load(4, rreg, am, mode64);
3151 break;
3152 case HRcFlt32:
3153 if (mode64)
3154 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
3155 else
3156 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 4, rreg, am);
3157 break;
3158 case HRcFlt64:
3159 *i1 = MIPSInstr_FpLdSt(True /*Load */ , 8, rreg, am);
3160 break;
3161 case HRcVec128:
3162 *i1 = MIPSInstr_MsaMi10(MSA_LD, (offsetB>>3),
3163 GuestStatePointer(mode64), rreg, MSA_D);
3164 break;
3165 default:
3166 ppHRegClass(hregClass(rreg));
3167 vpanic("genReload_MIPS: unimplemented regclass");
3168 break;
3172 MIPSInstr* genMove_MIPS(HReg from, HReg to, Bool mode64)
3174 switch (hregClass(from)) {
3175 case HRcInt32:
3176 case HRcInt64:
3177 return MIPSInstr_Alu(Malu_OR, to, from, MIPSRH_Reg(from));
3178 default:
3179 ppHRegClass(hregClass(from));
3180 vpanic("genMove_MIPS: unimplemented regclass");
3184 /* --------- The mips assembler --------- */
3186 inline static UInt iregNo(HReg r, Bool mode64)
3188 UInt n;
3189 vassert(hregClass(r) == (mode64 ? HRcInt64 : HRcInt32));
3190 vassert(!hregIsVirtual(r));
3191 n = hregEncoding(r);
3192 vassert(n <= 32);
3193 return n;
3196 inline static UInt fregNo(HReg r, Bool mode64)
3198 UInt n;
3199 vassert(!hregIsVirtual(r));
3200 n = hregEncoding(r);
3201 vassert(n <= 31);
3202 return n;
3205 inline static UInt dregNo(HReg r)
3207 UInt n;
3208 vassert(!hregIsVirtual(r));
3209 n = hregEncoding(r);
3210 vassert(n <= 31);
3211 return n;
3214 inline static UInt qregEnc ( HReg r )
3216 UInt n;
3217 vassert(!hregIsVirtual(r));
3218 n = hregEncoding(r);
3219 vassert(n <= 31);
3220 return n;
3223 /* Emit 32bit instruction */
3224 static UChar *emit32(UChar * p, UInt w32)
3226 #if defined (_MIPSEL)
3227 *p++ = toUChar(w32 & 0x000000FF);
3228 *p++ = toUChar((w32 >> 8) & 0x000000FF);
3229 *p++ = toUChar((w32 >> 16) & 0x000000FF);
3230 *p++ = toUChar((w32 >> 24) & 0x000000FF);
3231 /* HACK !!!!
3232 MIPS endianess is decided at compile time using gcc defined
3233 symbols _MIPSEL or _MIPSEB. When compiling libvex in a cross-arch
3234 setup, then none of these is defined. We just choose here by default
3235 mips Big Endian to allow libvexmultiarch_test to work when using
3236 a mips host architecture.
3237 A cleaner way would be to either have mips using 'dynamic endness'
3238 (like ppc64be or le, decided at runtime) or at least defining
3239 by default _MIPSEB when compiling on a non mips system.
3240 #elif defined (_MIPSEB).
3242 #else
3243 *p++ = toUChar((w32 >> 24) & 0x000000FF);
3244 *p++ = toUChar((w32 >> 16) & 0x000000FF);
3245 *p++ = toUChar((w32 >> 8) & 0x000000FF);
3246 *p++ = toUChar(w32 & 0x000000FF);
3247 #endif
3248 return p;
3250 /* Fetch an instruction */
3251 static UInt fetch32 ( UChar* p )
3253 UInt w32 = 0;
3254 #if defined (_MIPSEL)
3255 w32 |= ((0xFF & (UInt)p[0]) << 0);
3256 w32 |= ((0xFF & (UInt)p[1]) << 8);
3257 w32 |= ((0xFF & (UInt)p[2]) << 16);
3258 w32 |= ((0xFF & (UInt)p[3]) << 24);
3259 #elif defined (_MIPSEB)
3260 w32 |= ((0xFF & (UInt)p[0]) << 24);
3261 w32 |= ((0xFF & (UInt)p[1]) << 16);
3262 w32 |= ((0xFF & (UInt)p[2]) << 8);
3263 w32 |= ((0xFF & (UInt)p[3]) << 0);
3264 #endif
3265 return w32;
3268 /* physical structure of mips instructions */
3269 /* type I : opcode - 6 bits
3270 rs - 5 bits
3271 rt - 5 bits
3272 immediate - 16 bits
3274 static UChar *mkFormI(UChar * p, UInt opc, UInt rs, UInt rt, UInt imm)
3276 UInt theInstr;
3277 vassert(opc < 0x40);
3278 vassert(rs < 0x20);
3279 vassert(rt < 0x20);
3280 imm = imm & 0xFFFF;
3281 theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (imm));
3282 return emit32(p, theInstr);
3285 /* type R: opcode - 6 bits
3286 rs - 5 bits
3287 rt - 5 bits
3288 rd - 5 bits
3289 sa - 5 bits
3290 func - 6 bits
3292 static UChar *mkFormR(UChar * p, UInt opc, UInt rs, UInt rt, UInt rd, UInt sa,
3293 UInt func)
3295 if (rs >= 0x20)
3296 vex_printf("rs = %u\n", rs);
3297 UInt theInstr;
3298 vassert(opc < 0x40);
3299 vassert(rs < 0x20);
3300 vassert(rt < 0x20);
3301 vassert(rd < 0x20);
3302 vassert(sa < 0x20);
3303 func = func & 0xFFFF;
3304 theInstr = ((opc << 26) | (rs << 21) | (rt << 16) | (rd << 11) | (sa << 6) |
3305 (func));
3307 return emit32(p, theInstr);
3310 static UChar *mkFormS(UChar * p, UInt opc1, UInt rRD, UInt rRS, UInt rRT,
3311 UInt sa, UInt opc2)
3313 UInt theInstr;
3314 vassert(opc1 <= 0x3F);
3315 vassert(rRD < 0x20);
3316 vassert(rRS < 0x20);
3317 vassert(rRT < 0x20);
3318 vassert(opc2 <= 0x3F);
3319 vassert(sa <= 0x3F);
3321 theInstr = ((opc1 << 26) | (rRS << 21) | (rRT << 16) | (rRD << 11) |
3322 ((sa & 0x1F) << 6) | (opc2));
3324 return emit32(p, theInstr);
3327 static UChar *mkFormMI10(UChar * p, UInt msa, UInt s10, UInt rRS, UInt rWD,
3328 UInt opc, UInt rDF) {
3329 UInt theInstr;
3330 vassert(rDF < 0x04);
3331 vassert(opc < 0x10);
3332 vassert(rWD < 0x20);
3333 vassert(rRS < 0x20);
3334 vassert(s10 < 0x400);
3335 vassert(msa < 0x40);
3336 theInstr = ((msa << 26) | (s10 << 16) | (rRS << 11) | (rWD << 6) |
3337 ((opc << 2) | rDF));
3338 return emit32(p, theInstr);
3341 static UChar *mkFormELM(UChar *p, UInt msa, UInt op, UInt df, UInt ws, UInt wd,
3342 UInt opc) {
3343 UInt theInstr;
3344 vassert(msa < 0x40);
3345 vassert(ws < 0x20);
3346 vassert(wd < 0x20);
3347 vassert(opc < 0x40);
3348 theInstr = ((msa << 26) | (op << 22) | (df << 16) | (ws << 11) |
3349 ((wd << 6) | opc));
3350 return emit32(p, theInstr);
3353 static UChar *mkForm2R(UChar *p, UInt msa, UInt op, UInt df, UInt ws, UInt wd,
3354 UInt opc) {
3355 UInt theInstr;
3356 theInstr = ((msa << 26) | (op << 18) | (df << 16) | (ws << 11) |
3357 (wd << 6) | opc);
3358 return emit32(p, theInstr);
3361 static UChar *mkForm3R(UChar *p, UInt op, UInt df, UInt wd, UInt ws, UInt wt) {
3362 UInt theInstr;
3363 vassert(op < 0x3800040);
3364 vassert(df < 0x40);
3365 vassert(wt < 0x20);
3366 vassert(ws < 0x20);
3367 vassert(wd < 0x20);
3368 theInstr = OPC_MSA | op | (df << 21) | (wt << 16) | (ws << 11) |
3369 (wd << 6);
3370 return emit32(p, theInstr);
3373 static UChar *mkFormVEC(UChar *p, UInt op, UInt wt, UInt ws, UInt wd) {
3374 UInt theInstr;
3375 vassert(op < 0x20);
3376 vassert(wt < 0x20);
3377 vassert(ws < 0x20);
3378 vassert(wd < 0x20);
3379 theInstr = OPC_MSA | (op << 21) | (wt << 16) | (ws << 11) |
3380 (wd << 6) | 0x1E;
3381 return emit32(p, theInstr);
3384 static UChar *mkFormBIT(UChar *p, UInt op, UInt df, UInt ms, UInt ws, UInt wd) {
3385 UInt theInstr;
3386 UInt dfm = 0;
3387 vassert(op < 0x3800040);
3388 vassert(df < 0x40);
3389 vassert(ms < 0x100);
3390 vassert(ws < 0x20);
3391 vassert(wd < 0x20);
3393 switch (df) {
3394 case 0:
3395 dfm |= 0x10;
3396 /* fallthrough */
3397 case 1:
3398 dfm |= 0x20;
3399 /* fallthrough */
3400 case 2:
3401 dfm |= 0x40;
3404 dfm |= ms;
3405 theInstr = OPC_MSA | op | (dfm << 16) | (ws << 11) |
3406 (wd << 6);
3407 return emit32(p, theInstr);
3410 static UChar *mkForm3RF(UChar *p, UInt op, UInt df, UInt wd, UInt ws, UInt wt) {
3411 UInt theInstr;
3412 vassert(op < 0x3C0001D);
3413 vassert(df < 0x40);
3414 vassert(wt < 0x20);
3415 vassert(ws < 0x20);
3416 vassert(wd < 0x20);
3417 theInstr = OPC_MSA | op | (df << 21) | (wt << 16) | (ws << 11) |
3418 (wd << 6);
3419 return emit32(p, theInstr);
3422 static UChar *mkForm2RF(UChar *p, UInt op, UInt df, UInt ws, UInt wd,
3423 UInt opc) {
3424 UInt theInstr;
3425 theInstr = OPC_MSA | (op << 17) | (df << 16) | (ws << 11) | (wd << 6) | opc;
3426 return emit32(p, theInstr);
3429 static UChar *doAMode_IR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
3430 Bool mode64)
3432 UInt rA, idx, r_dst;
3433 vassert(am->tag == Mam_IR);
3434 vassert(am->Mam.IR.index < 0x10000);
3436 rA = iregNo(am->Mam.IR.base, mode64);
3437 idx = am->Mam.IR.index;
3439 if (rSD == 33 || rSD == 34)
3440 r_dst = 24;
3441 else
3442 r_dst = rSD;
3444 if (opc1 < 40) {
3445 /* load */
3446 if (rSD == 33)
3447 /* mfhi */
3448 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
3449 else if (rSD == 34)
3450 /* mflo */
3451 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
3454 p = mkFormI(p, opc1, rA, r_dst, idx);
3456 if (opc1 >= 40) {
3457 /* store */
3458 if (rSD == 33)
3459 /* mthi */
3460 p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
3461 else if (rSD == 34)
3462 /* mtlo */
3463 p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
3466 return p;
3469 static UChar *doAMode_RR(UChar * p, UInt opc1, UInt rSD, MIPSAMode * am,
3470 Bool mode64)
3472 UInt rA, rB, r_dst;
3473 vassert(am->tag == Mam_RR);
3475 rA = iregNo(am->Mam.RR.base, mode64);
3476 rB = iregNo(am->Mam.RR.index, mode64);
3478 if (rSD == 33 || rSD == 34)
3479 r_dst = 24;
3480 else
3481 r_dst = rSD;
3483 if (opc1 < 40) {
3484 /* load */
3485 if (rSD == 33)
3486 /* mfhi */
3487 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
3488 else if (rSD == 34)
3489 /* mflo */
3490 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
3493 if (mode64) {
3494 /* daddu rA, rA, rB$
3495 sd/ld r_dst, 0(rA)$
3496 dsubu rA, rA, rB */
3497 p = mkFormR(p, 0, rA, rB, rA, 0, 45);
3498 p = mkFormI(p, opc1, rA, r_dst, 0);
3499 p = mkFormR(p, 0, rA, rB, rA, 0, 47);
3500 } else {
3501 /* addu rA, rA, rB
3502 sw/lw r_dst, 0(rA)
3503 subu rA, rA, rB */
3504 p = mkFormR(p, 0, rA, rB, rA, 0, 33);
3505 p = mkFormI(p, opc1, rA, r_dst, 0);
3506 p = mkFormR(p, 0, rA, rB, rA, 0, 35);
3508 if (opc1 >= 40) {
3509 /* store */
3510 if (rSD == 33)
3511 /* mthi */
3512 p = mkFormR(p, 0, r_dst, 0, 0, 0, 17);
3513 else if (rSD == 34)
3514 /* mtlo */
3515 p = mkFormR(p, 0, r_dst, 0, 0, 0, 19);
3518 return p;
3521 /* Load imm to r_dst */
3522 static UChar *mkLoadImm(UChar * p, UInt r_dst, ULong imm, Bool mode64)
3524 if (!mode64) {
3525 vassert(r_dst < 0x20);
3526 UInt u32 = (UInt) imm;
3527 Int s32 = (Int) u32;
3528 Long s64 = (Long) s32;
3529 imm = (ULong) s64;
3532 if (imm >= 0xFFFFFFFFFFFF8000ULL || imm < 0x8000) {
3533 /* sign-extendable from 16 bits
3534 addiu r_dst, 0, imm => li r_dst, imm */
3535 p = mkFormI(p, 9, 0, r_dst, imm & 0xFFFF);
3536 } else {
3537 if (imm >= 0xFFFFFFFF80000000ULL || imm < 0x80000000ULL) {
3538 /* sign-extendable from 32 bits
3539 addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
3540 lui r_dst, (imm >> 16) */
3541 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
3542 /* ori r_dst, r_dst, (imm & 0xFFFF) */
3543 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
3544 } else {
3545 vassert(mode64);
3546 /* lui load in upper half of low word */
3547 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
3548 /* ori */
3549 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
3550 /* shift */
3551 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
3552 /* ori */
3553 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
3554 /* shift */
3555 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
3556 /* ori */
3557 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
3560 return p;
3563 /* A simplified version of mkLoadImm that always generates 2 or 6
3564 instructions (32 or 64 bits respectively) even if it could generate
3565 fewer. This is needed for generating fixed sized patchable
3566 sequences. */
3567 static UChar* mkLoadImm_EXACTLY2or6 ( UChar* p,
3568 UInt r_dst, ULong imm, Bool mode64)
3570 vassert(r_dst < 0x20);
3572 if (!mode64) {
3573 /* In 32-bit mode, make sure the top 32 bits of imm are a sign
3574 extension of the bottom 32 bits. (Probably unnecessary.) */
3575 UInt u32 = (UInt)imm;
3576 Int s32 = (Int)u32;
3577 Long s64 = (Long)s32;
3578 imm = (ULong)s64;
3581 if (!mode64) {
3582 /* sign-extendable from 32 bits
3583 addiu r_dst, r0, (imm >> 16) => lis r_dst, (imm >> 16)
3584 lui r_dst, (imm >> 16) */
3585 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
3586 /* ori r_dst, r_dst, (imm & 0xFFFF) */
3587 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
3588 } else {
3589 /* full 64bit immediate load: 6 (six!) insns. */
3590 vassert(mode64);
3591 /* lui load in upper half of low word */
3592 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
3593 /* ori */
3594 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
3595 /* shift */
3596 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
3597 /* ori */
3598 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
3599 /* shift */
3600 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
3601 /* ori */
3602 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
3604 return p;
3607 /* Checks whether the sequence of bytes at p was indeed created
3608 by mkLoadImm_EXACTLY2or6 with the given parameters. */
3609 static Bool isLoadImm_EXACTLY2or6 ( UChar* p_to_check,
3610 UInt r_dst, ULong imm, Bool mode64 )
3612 vassert(r_dst < 0x20);
3613 Bool ret;
3614 if (!mode64) {
3615 /* In 32-bit mode, make sure the top 32 bits of imm are a sign
3616 extension of the bottom 32 bits. (Probably unnecessary.) */
3617 UInt u32 = (UInt)imm;
3618 Int s32 = (Int)u32;
3619 Long s64 = (Long)s32;
3620 imm = (ULong)s64;
3623 if (!mode64) {
3624 UInt expect[2] = { 0, 0 };
3625 UChar* p = (UChar*)&expect[0];
3626 /* lui r_dst, (immi >> 16) */
3627 p = mkFormI(p, 15, 0, r_dst, (imm >> 16) & 0xFFFF);
3628 /* ori r_dst, r_dst, (imm & 0xFFFF) */
3629 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
3630 vassert(p == (UChar*)&expect[2]);
3632 ret = fetch32(p_to_check + 0) == expect[0]
3633 && fetch32(p_to_check + 4) == expect[1];
3634 } else {
3635 UInt expect[6] = { 0, 0, 0, 0, 0, 0};
3636 UChar* p = (UChar*)&expect[0];
3637 /* lui load in upper half of low word */
3638 p = mkFormI(p, 15, 0, r_dst, (imm >> 48) & 0xFFFF);
3639 /* ori */
3640 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 32) & 0xFFFF);
3641 /* shift */
3642 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
3643 /* ori */
3644 p = mkFormI(p, 13, r_dst, r_dst, (imm >> 16) & 0xFFFF);
3645 /* shift */
3646 p = mkFormS(p, 0, r_dst, 0, r_dst, 16, 56);
3647 /* ori */
3648 p = mkFormI(p, 13, r_dst, r_dst, imm & 0xFFFF);
3649 vassert(p == (UChar*)&expect[6]);
3651 ret = fetch32(p_to_check + 0) == expect[0]
3652 && fetch32(p_to_check + 4) == expect[1]
3653 && fetch32(p_to_check + 8) == expect[2]
3654 && fetch32(p_to_check + 12) == expect[3]
3655 && fetch32(p_to_check + 16) == expect[4]
3656 && fetch32(p_to_check + 20) == expect[5];
3658 return ret;
3661 /* Generate a machine-word sized load or store. Simplified version of
3662 the Min_Load and Min_Store cases below.
3663 This will generate 32-bit load/store on MIPS32, and 64-bit load/store on
3664 MIPS64 platforms.
3666 static UChar* do_load_or_store_machine_word ( UChar* p, Bool isLoad, UInt reg,
3667 MIPSAMode* am, Bool mode64 )
3669 if (isLoad) { /* load */
3670 switch (am->tag) {
3671 case Mam_IR:
3672 if (mode64) {
3673 vassert(0 == (am->Mam.IR.index & 3));
3675 p = doAMode_IR(p, mode64 ? 55 : 35, reg, am, mode64);
3676 break;
3677 case Mam_RR:
3678 /* we could handle this case, but we don't expect to ever
3679 need to. */
3680 vassert(0);
3681 break;
3682 default:
3683 vassert(0);
3684 break;
3686 } else /* store */ {
3687 switch (am->tag) {
3688 case Mam_IR:
3689 if (mode64) {
3690 vassert(0 == (am->Mam.IR.index & 3));
3692 p = doAMode_IR(p, mode64 ? 63 : 43, reg, am, mode64);
3693 break;
3694 case Mam_RR:
3695 /* we could handle this case, but we don't expect to ever
3696 need to. */
3697 vassert(0);
3698 break;
3699 default:
3700 vassert(0);
3701 break;
3704 return p;
3707 /* Generate a 32-bit sized load or store. Simplified version of
3708 do_load_or_store_machine_word above. */
3709 static UChar* do_load_or_store_word32 ( UChar* p, Bool isLoad, UInt reg,
3710 MIPSAMode* am, Bool mode64 )
3712 if (isLoad) { /* load */
3713 switch (am->tag) {
3714 case Mam_IR:
3715 if (mode64) {
3716 vassert(0 == (am->Mam.IR.index & 3));
3718 p = doAMode_IR(p, 35, reg, am, mode64);
3719 break;
3720 case Mam_RR:
3721 /* we could handle this case, but we don't expect to ever
3722 need to. */
3723 vassert(0);
3724 break;
3725 default:
3726 vassert(0);
3727 break;
3729 } else /* store */ {
3730 switch (am->tag) {
3731 case Mam_IR:
3732 if (mode64) {
3733 vassert(0 == (am->Mam.IR.index & 3));
3735 p = doAMode_IR(p, 43, reg, am, mode64);
3736 break;
3737 case Mam_RR:
3738 /* we could handle this case, but we don't expect to ever
3739 need to. */
3740 vassert(0);
3741 break;
3742 default:
3743 vassert(0);
3744 break;
3747 return p;
3750 /* Move r_dst to r_src */
3751 static UChar *mkMoveReg(UChar * p, UInt r_dst, UInt r_src)
3753 vassert(r_dst < 0x20);
3754 vassert(r_src < 0x20);
3756 if (r_dst != r_src) {
3757 /* or r_dst, r_src, r_src */
3758 p = mkFormR(p, 0, r_src, r_src, r_dst, 0, 37);
3760 return p;
3763 /* Emit an instruction into buf and return the number of bytes used.
3764 Note that buf is not the insn's final place, and therefore it is
3765 imperative to emit position-independent code. If the emitted
3766 instruction was a profiler inc, set *is_profInc to True, else
3767 leave it unchanged. */
3768 Int emit_MIPSInstr ( /*MB_MOD*/Bool* is_profInc,
3769 UChar* buf, Int nbuf, const MIPSInstr* i,
3770 Bool mode64,
3771 VexEndness endness_host,
3772 const void* disp_cp_chain_me_to_slowEP,
3773 const void* disp_cp_chain_me_to_fastEP,
3774 const void* disp_cp_xindir,
3775 const void* disp_cp_xassisted )
3777 UChar *p = &buf[0];
3778 UChar *ptmp = p;
3779 vassert(nbuf >= 32);
3782 switch (i->tag) {
3783 case Min_LI:
3784 p = mkLoadImm(p, iregNo(i->Min.LI.dst, mode64), i->Min.LI.imm, mode64);
3785 goto done;
3787 case Min_Alu: {
3788 MIPSRH *srcR = i->Min.Alu.srcR;
3789 Bool immR = toBool(srcR->tag == Mrh_Imm);
3790 UInt r_dst = iregNo(i->Min.Alu.dst, mode64);
3791 UInt r_srcL = iregNo(i->Min.Alu.srcL, mode64);
3792 UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
3793 mode64);
3794 switch (i->Min.Alu.op) {
3795 /* Malu_ADD, Malu_SUB, Malu_AND, Malu_OR, Malu_NOR, Malu_XOR, Malu_SLT */
3796 case Malu_ADD:
3797 if (immR) {
3798 vassert(srcR->Mrh.Imm.syned);
3799 /* addiu */
3800 p = mkFormI(p, 9, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
3801 } else {
3802 /* addu */
3803 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 33);
3805 break;
3806 case Malu_SUB:
3807 if (immR) {
3808 /* addiu , but with negated imm */
3809 vassert(srcR->Mrh.Imm.syned);
3810 vassert(srcR->Mrh.Imm.imm16 != 0x8000);
3811 p = mkFormI(p, 9, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
3812 } else {
3813 /* subu */
3814 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 35);
3816 break;
3817 case Malu_AND:
3818 if (immR) {
3819 /* andi */
3820 vassert(!srcR->Mrh.Imm.syned);
3821 p = mkFormI(p, 12, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
3822 } else {
3823 /* and */
3824 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 36);
3826 break;
3827 case Malu_OR:
3828 if (immR) {
3829 /* ori */
3830 vassert(!srcR->Mrh.Imm.syned);
3831 p = mkFormI(p, 13, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
3832 } else {
3833 /* or */
3834 if (r_srcL == 33)
3835 /* MFHI */
3836 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
3837 else if (r_srcL == 34)
3838 /* MFLO */
3839 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
3840 else if (r_dst == 33)
3841 /* MTHI */
3842 p = mkFormR(p, 0, r_srcL, 0, 0, 0, 17);
3843 else if (r_dst == 34)
3844 /* MTLO */
3845 p = mkFormR(p, 0, r_srcL, 0, 0, 0, 19);
3846 else
3847 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 37);
3849 break;
3850 case Malu_NOR:
3851 /* nor */
3852 vassert(!immR);
3853 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 39);
3854 break;
3855 case Malu_XOR:
3856 if (immR) {
3857 /* xori */
3858 vassert(!srcR->Mrh.Imm.syned);
3859 p = mkFormI(p, 14, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
3860 } else {
3861 /* xor */
3862 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
3864 break;
3865 case Malu_DADD:
3866 if (immR) {
3867 vassert(srcR->Mrh.Imm.syned);
3868 vassert(srcR->Mrh.Imm.imm16 != 0x8000);
3869 p = mkFormI(p, 25, r_srcL, r_dst, srcR->Mrh.Imm.imm16);
3870 } else {
3871 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 45);
3873 break;
3874 case Malu_DSUB:
3875 if (immR) {
3876 p = mkFormI(p, 25, r_srcL, r_dst, (-srcR->Mrh.Imm.imm16));
3877 } else {
3878 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 47);
3880 break;
3881 case Malu_SLT:
3882 if (immR) {
3883 goto bad;
3884 } else {
3885 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
3887 break;
3889 default:
3890 goto bad;
3892 goto done;
3895 case Msa_MI10: {
3896 UInt v_reg = qregEnc(i->Min.MsaMi10.wd);
3897 UInt r_reg = iregNo(i->Min.MsaMi10.rs, mode64);
3898 p = mkFormMI10(p, 0x1E, i->Min.MsaMi10.s10, r_reg, v_reg, i->Min.MsaMi10.op,
3899 i->Min.MsaMi10.df);
3900 goto done;
3903 case Msa_ELM: {
3904 UInt v_src, v_dst;
3906 switch (i->Min.MsaElm.op) {
3907 case MSA_INSERT:
3908 v_src = iregNo(i->Min.MsaElm.ws, mode64);
3909 v_dst = qregEnc(i->Min.MsaElm.wd);
3910 break;
3912 case MSA_COPY_S:
3913 case MSA_COPY_U:
3914 v_src = qregEnc(i->Min.MsaElm.ws);
3915 v_dst = iregNo(i->Min.MsaElm.wd, mode64);
3916 break;
3918 case MSA_CTCMSA:
3919 v_src = iregNo(i->Min.MsaElm.ws, mode64);
3920 v_dst = 1;
3921 break;
3923 case MSA_CFCMSA:
3924 v_src = 1;
3925 v_dst = iregNo(i->Min.MsaElm.wd, mode64);
3926 break;
3928 default:
3929 v_src = qregEnc(i->Min.MsaElm.ws);
3930 v_dst = qregEnc(i->Min.MsaElm.wd);
3931 break;
3934 switch (i->Min.MsaElm.op) {
3935 case MSA_MOVE:
3936 case MSA_CTCMSA:
3937 case MSA_CFCMSA:
3938 p = mkFormELM(p, 0x1E, 0, i->Min.MsaElm.op, v_src, v_dst, 25);
3939 break;
3941 default:
3942 p = mkFormELM(p, 0x1E, i->Min.MsaElm.op, i->Min.MsaElm.dfn, v_src, v_dst, 25);
3943 break;
3946 goto done;
3949 case Msa_3R: {
3950 UInt v_wt;
3952 switch (i->Min.Msa3R.op) {
3953 case MSA_SLD:
3954 case MSA_SPLAT:
3955 v_wt = iregNo(i->Min.Msa3R.wt, mode64);
3956 break;
3958 default:
3959 v_wt = qregEnc(i->Min.Msa3R.wt);
3960 break;
3963 UInt v_ws = qregEnc(i->Min.Msa3R.ws);
3964 UInt v_wd = qregEnc(i->Min.Msa3R.wd);;
3965 p = mkForm3R(p, i->Min.Msa3R.op, i->Min.Msa3R.df, v_wd, v_ws, v_wt);
3966 goto done;
3969 case Msa_2R: {
3970 UInt v_src;
3971 UInt v_dst;
3973 switch (i->Min.Msa2R.op) {
3974 case MSA_FILL:
3975 v_src = iregNo(i->Min.Msa2R.ws, mode64);
3976 v_dst = qregEnc(i->Min.Msa2R.wd);
3977 break;
3979 default:
3980 v_src = qregEnc(i->Min.Msa2R.ws);
3981 v_dst = qregEnc(i->Min.Msa2R.wd);
3982 break;
3985 p = mkForm2R(p, 0x1E, i->Min.Msa2R.op, i->Min.Msa2R.df, v_src, v_dst, 0x1E);
3986 goto done;
3989 case Msa_2RF: {
3990 UInt v_src = qregEnc(i->Min.Msa2RF.ws);
3991 UInt v_dst = qregEnc(i->Min.Msa2RF.wd);
3992 p = mkForm2RF(p, i->Min.Msa2RF.op, i->Min.Msa2RF.df, v_src, v_dst, 0x1E);
3993 goto done;
3996 case Msa_VEC: {
3997 UInt v_wt = qregEnc(i->Min.MsaVec.wt);
3998 UInt v_ws = qregEnc(i->Min.MsaVec.ws);
3999 UInt v_wd = qregEnc(i->Min.MsaVec.wd);
4000 p = mkFormVEC(p, i->Min.MsaVec.op, v_wt, v_ws, v_wd);
4001 goto done;
4004 case Msa_BIT: {
4005 UInt v_ws = qregEnc(i->Min.MsaBit.ws);
4006 UInt v_wd = qregEnc(i->Min.MsaBit.wd);
4007 p = mkFormBIT(p, i->Min.MsaBit.op, i->Min.Msa3R.df, i->Min.MsaBit.ms, v_ws,
4008 v_wd);
4009 goto done;
4012 case Msa_3RF: {
4013 UInt v_wt = qregEnc(i->Min.Msa3RF.wt);
4014 UInt v_ws = qregEnc(i->Min.Msa3RF.ws);
4015 UInt v_wd = qregEnc(i->Min.Msa3RF.wd);;
4016 p = mkForm3RF(p, i->Min.Msa3RF.op, i->Min.Msa3RF.df, v_wd, v_ws, v_wt);
4017 goto done;
4020 case Min_Shft: {
4021 MIPSRH *srcR = i->Min.Shft.srcR;
4022 Bool sz32 = i->Min.Shft.sz32;
4023 Bool immR = toBool(srcR->tag == Mrh_Imm);
4024 UInt r_dst = iregNo(i->Min.Shft.dst, mode64);
4025 UInt r_srcL = iregNo(i->Min.Shft.srcL, mode64);
4026 UInt r_srcR = immR ? (-1) /*bogus */ : iregNo(srcR->Mrh.Reg.reg,
4027 mode64);
4028 if (!mode64)
4029 vassert(sz32);
4030 switch (i->Min.Shft.op) {
4031 case Mshft_SLL:
4032 if (sz32) {
4033 if (immR) {
4034 UInt n = srcR->Mrh.Imm.imm16;
4035 vassert(n >= 0 && n <= 32);
4036 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 0);
4037 } else {
4038 /* shift variable */
4039 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 4);
4041 } else {
4042 if (immR) {
4043 UInt n = srcR->Mrh.Imm.imm16;
4044 vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
4045 if (n >= 0 && n < 32) {
4046 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 56);
4047 } else {
4048 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 60);
4050 } else {
4051 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 20);
4054 break;
4056 case Mshft_SRL:
4057 if (sz32) {
4058 /* SRL, SRLV */
4059 if (immR) {
4060 UInt n = srcR->Mrh.Imm.imm16;
4061 vassert(n >= 0 && n < 32);
4062 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 2);
4063 } else {
4064 /* shift variable */
4065 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 6);
4067 } else {
4068 /* DSRL, DSRL32, DSRLV */
4069 if (immR) {
4070 UInt n = srcR->Mrh.Imm.imm16;
4071 vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
4072 if (n >= 0 && n < 32) {
4073 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 58);
4074 } else {
4075 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 62);
4077 } else {
4078 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 22);
4081 break;
4083 case Mshft_SRA:
4084 if (sz32) {
4085 /* SRA, SRAV */
4086 if (immR) {
4087 UInt n = srcR->Mrh.Imm.imm16;
4088 vassert(n >= 0 && n < 32);
4089 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 3);
4090 } else {
4091 /* shift variable */
4092 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 7);
4094 } else {
4095 /* DSRA, DSRA32, DSRAV */
4096 if (immR) {
4097 UInt n = srcR->Mrh.Imm.imm16;
4098 vassert((n >= 0 && n < 32) || (n > 31 && n < 64));
4099 if (n >= 0 && n < 32) {
4100 p = mkFormS(p, 0, r_dst, 0, r_srcL, n, 59);
4101 } else {
4102 p = mkFormS(p, 0, r_dst, 0, r_srcL, n - 32, 63);
4104 } else {
4105 p = mkFormS(p, 0, r_dst, r_srcR, r_srcL, 0, 23);
4108 break;
4110 default:
4111 goto bad;
4114 goto done;
4117 case Min_Rotx: {
4118 UInt r_dst = iregNo(i->Min.Rotx.rd, mode64);
4119 UInt r_src = iregNo(i->Min.Rotx.rt, mode64);
4120 switch(i->Min.Rotx.op) {
4121 case Rotx32:
4122 p = mkFormR(p, 31, 0, r_src, r_dst, 0, 32);
4123 break;
4124 case Rotx64:
4125 p = mkFormR(p, 31, 0, r_src, r_dst, 0, 36);
4126 break;
4128 goto done;
4130 case Min_Unary: {
4131 UInt r_dst = iregNo(i->Min.Unary.dst, mode64);
4132 UInt r_src = iregNo(i->Min.Unary.src, mode64);
4134 switch (i->Min.Unary.op) {
4135 /* Mun_CLO, Mun_CLZ, Mun_NOP, Mun_DCLO, Mun_DCLZ */
4136 #if (__mips_isa_rev >= 6)
4137 case Mun_CLO: /* clo */
4138 p = mkFormR(p, 0, r_src, 0, r_dst, 1, 17);
4139 break;
4140 case Mun_CLZ: /* clz */
4141 p = mkFormR(p, 0, r_src, 0, r_dst, 1, 16);
4142 break;
4143 case Mun_DCLO: /* clo */
4144 p = mkFormR(p, 0, r_src, 0, r_dst, 1, 19);
4145 break;
4146 case Mun_DCLZ: /* clz */
4147 p = mkFormR(p, 0, r_src, 0, r_dst, 1, 18);
4148 break;
4149 #else
4150 case Mun_CLO: /* clo */
4151 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 33);
4152 break;
4153 case Mun_CLZ: /* clz */
4154 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 32);
4155 break;
4156 case Mun_DCLO: /* clo */
4157 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 37);
4158 break;
4159 case Mun_DCLZ: /* clz */
4160 p = mkFormR(p, 28, r_src, r_dst , r_dst, 0, 36);
4161 break;
4162 #endif
4163 case Mun_NOP: /* nop (sll r0,r0,0) */
4164 p = mkFormR(p, 0, 0, 0, 0, 0, 0);
4165 break;
4167 goto done;
4170 case Min_Cmp: {
4171 UInt r_srcL = iregNo(i->Min.Cmp.srcL, mode64);
4172 UInt r_srcR = iregNo(i->Min.Cmp.srcR, mode64);
4173 UInt r_dst = iregNo(i->Min.Cmp.dst, mode64);
4175 switch (i->Min.Cmp.cond) {
4176 case MIPScc_EQ:
4177 /* xor r_dst, r_srcL, r_srcR
4178 sltiu r_dst, r_dst, 1 */
4179 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
4180 p = mkFormI(p, 11, r_dst, r_dst, 1);
4181 break;
4182 case MIPScc_NE:
4183 /* xor r_dst, r_srcL, r_srcR
4184 sltu r_dst, zero, r_dst */
4185 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 38);
4186 p = mkFormR(p, 0, 0, r_dst, r_dst, 0, 43);
4187 break;
4188 case MIPScc_LT:
4189 /* slt r_dst, r_srcL, r_srcR */
4190 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 42);
4191 break;
4192 case MIPScc_LO:
4193 /* sltu r_dst, r_srcL, r_srcR */
4194 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, 0, 43);
4195 break;
4196 case MIPScc_LE:
4197 /* slt r_dst, r_srcR, r_srcL
4198 xori r_dst, r_dst, 1 */
4199 p = mkFormR(p, 0, r_srcR, r_srcL, r_dst, 0, 42);
4200 p = mkFormI(p, 14, r_dst, r_dst, 1);
4201 break;
4202 case MIPScc_LS:
4203 /* sltu r_dst, rsrcR, r_srcL
4204 xori r_dsr, r_dst, 1 */
4205 p = mkFormR(p, 0, r_srcR, r_srcL, r_dst, 0, 43);
4206 p = mkFormI(p, 14, r_dst, r_dst, 1);
4207 break;
4208 default:
4209 goto bad;
4211 goto done;
4214 case Min_Mul: {
4215 UInt r_srcL = iregNo(i->Min.Mul.srcL, mode64);
4216 UInt r_srcR = iregNo(i->Min.Mul.srcR, mode64);
4217 UInt r_dst = iregNo(i->Min.Mul.dst, mode64);
4218 /* mul r_dst, r_srcL, r_srcR */
4219 p = mkFormR(p, 28, r_srcL, r_srcR, r_dst, 0, 2);
4220 goto done;
4223 case Min_Mult: {
4224 Bool syned = i->Min.Mult.syned;
4225 UInt r_srcL = iregNo(i->Min.Mult.srcL, mode64);
4226 UInt r_srcR = iregNo(i->Min.Mult.srcR, mode64);
4227 if (mode64) {
4228 if (syned)
4229 /* dmult r_srcL, r_srcR */
4230 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 28);
4231 else
4232 /* dmultu r_srcL, r_srcR */
4233 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 29);
4234 } else {
4235 if (syned)
4236 /* mult r_srcL, r_srcR */
4237 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 24);
4238 else
4239 /* multu r_srcL, r_srcR */
4240 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 25);
4242 goto done;
4245 case Min_Ext: {
4246 UInt r_src = iregNo(i->Min.Ext.src, mode64);
4247 UInt r_dst = iregNo(i->Min.Ext.dst, mode64);
4248 /* For now, only DEXT is implemented. */
4249 vassert(mode64);
4250 vassert(i->Min.Ext.pos < 32);
4251 vassert(i->Min.Ext.size > 0);
4252 vassert(i->Min.Ext.size <= 32);
4253 vassert(i->Min.Ext.size + i->Min.Ext.pos > 0);
4254 vassert(i->Min.Ext.size + i->Min.Ext.pos <= 63);
4255 /* DEXT r_dst, r_src, pos, size */
4256 p = mkFormR(p, 0x1F, r_src, r_dst,
4257 i->Min.Ext.size - 1, i->Min.Ext.pos, 3);
4258 goto done;
4261 case Min_Mulr6: {
4262 Bool syned = i->Min.Mulr6.syned;
4263 Bool sz32 = i->Min.Mulr6.sz32;
4264 UInt r_srcL = iregNo(i->Min.Mulr6.srcL, mode64);
4265 UInt r_srcR = iregNo(i->Min.Mulr6.srcR, mode64);
4266 UInt r_dst = iregNo(i->Min.Mulr6.dst, mode64);
4267 int low = i->Min.Mulr6.low?2:3;
4268 if (sz32) {
4269 if (syned)
4270 /* mul/muh */
4271 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 24);
4272 else
4273 /* mulu/muhu */
4274 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 25);
4275 } else {
4276 if (syned) /* DMUL/DMUH r_dst,r_srcL,r_srcR */
4277 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 28);
4278 else /* DMULU/DMUHU r_dst,r_srcL,r_srcR */
4279 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, low, 29);
4281 goto done;
4284 case Min_Macc: {
4285 Bool syned = i->Min.Macc.syned;
4286 UInt r_srcL = iregNo(i->Min.Macc.srcL, mode64);
4287 UInt r_srcR = iregNo(i->Min.Macc.srcR, mode64);
4289 if (syned) {
4290 switch (i->Min.Macc.op) {
4291 case Macc_ADD:
4292 /* madd */
4293 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0, 0);
4294 break;
4295 case Macc_SUB:
4296 /* msub */
4297 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
4299 break;
4300 default:
4301 goto bad;
4303 } else {
4304 switch (i->Min.Macc.op) {
4305 case Macc_ADD:
4306 /* maddu */
4307 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
4309 break;
4310 case Macc_SUB:
4311 /* msubu */
4312 p = mkFormR(p, 28, r_srcL, r_srcR, 0, 0,
4314 break;
4315 default:
4316 goto bad;
4320 goto done;
4323 case Min_Div: {
4324 Bool syned = i->Min.Div.syned;
4325 Bool sz32 = i->Min.Div.sz32;
4326 UInt r_srcL = iregNo(i->Min.Div.srcL, mode64);
4327 UInt r_srcR = iregNo(i->Min.Div.srcR, mode64);
4328 if (sz32) {
4329 if (syned) {
4330 /* div */
4331 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 26);
4332 } else
4333 /* divu */
4334 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 27);
4335 goto done;
4336 } else {
4337 if (syned) {
4338 /* ddiv */
4339 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 30);
4340 } else
4341 /* ddivu */
4342 p = mkFormR(p, 0, r_srcL, r_srcR, 0, 0, 31);
4343 goto done;
4346 case Min_Divr6: {
4347 Bool syned = i->Min.Divr6.syned;
4348 Bool sz32 = i->Min.Divr6.sz32;
4349 UInt r_srcL = iregNo(i->Min.Divr6.srcL, mode64);
4350 UInt r_srcR = iregNo(i->Min.Divr6.srcR, mode64);
4351 UInt r_dst = iregNo(i->Min.Divr6.dst, mode64);
4352 int mod = i->Min.Divr6.mod?3:2;
4353 if (sz32) {
4354 if (syned)
4355 /* mul/muh */
4356 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 26);
4357 else
4358 /* mulu/muhu */
4359 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 27);
4360 } else {
4361 if (syned) /* DMUL/DMUH r_dst,r_srcL,r_srcR */
4362 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 30);
4363 else /* DMULU/DMUHU r_dst,r_srcL,r_srcR */
4364 p = mkFormR(p, 0, r_srcL, r_srcR, r_dst, mod, 31);
4366 goto done;
4368 case Min_Mthi: {
4369 UInt r_src = iregNo(i->Min.MtHL.src, mode64);
4370 p = mkFormR(p, 0, r_src, 0, 0, 0, 17);
4371 goto done;
4374 case Min_Mtlo: {
4375 UInt r_src = iregNo(i->Min.MtHL.src, mode64);
4376 p = mkFormR(p, 0, r_src, 0, 0, 0, 19);
4377 goto done;
4380 case Min_Mfhi: {
4381 UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
4382 p = mkFormR(p, 0, 0, 0, r_dst, 0, 16);
4383 goto done;
4386 case Min_Mflo: {
4387 UInt r_dst = iregNo(i->Min.MfHL.dst, mode64);
4388 p = mkFormR(p, 0, 0, 0, r_dst, 0, 18);
4389 goto done;
4392 case Min_MtFCSR: {
4393 UInt r_src = iregNo(i->Min.MtFCSR.src, mode64);
4394 /* ctc1 */
4395 p = mkFormR(p, 17, 6, r_src, 31, 0, 0);
4396 goto done;
4399 case Min_MfFCSR: {
4400 UInt r_dst = iregNo(i->Min.MfFCSR.dst, mode64);
4401 /* cfc1 */
4402 p = mkFormR(p, 17, 2, r_dst, 31, 0, 0);
4403 goto done;
4406 case Min_Call: {
4407 if (i->Min.Call.cond != MIPScc_AL
4408 && i->Min.Call.rloc.pri != RLPri_None) {
4409 /* The call might not happen (it isn't unconditional) and
4410 it returns a result. In this case we will need to
4411 generate a control flow diamond to put 0x555..555 in
4412 the return register(s) in the case where the call
4413 doesn't happen. If this ever becomes necessary, maybe
4414 copy code from the ARM equivalent. Until that day,
4415 just give up. */
4416 goto bad;
4418 MIPSCondCode cond = i->Min.Call.cond;
4419 UInt r_dst = 25; /* using %r25 as address temporary -
4420 see getRegUsage_MIPSInstr */
4422 /* jump over the following insns if condition does not hold */
4423 if (cond != MIPScc_AL) {
4424 /* jmp fwds if !condition */
4425 /* don't know how many bytes to jump over yet...
4426 make space for a jump instruction + nop!!! and fill in later. */
4427 ptmp = p; /* fill in this bit later */
4428 p += 8; /* p += 8 */
4431 if (!mode64) {
4432 /* addiu $29, $29, -16 */
4433 p = mkFormI(p, 9, 29, 29, 0xFFF0);
4436 /* load target to r_dst; p += 4|8 */
4437 p = mkLoadImm(p, r_dst, i->Min.Call.target, mode64);
4439 /* jalr r_dst */
4440 p = mkFormR(p, 0, r_dst, 0, 31, 0, 9); /* p += 4 */
4441 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
4443 if (!mode64) {
4444 /* addiu $29, $29, 16 */
4445 p = mkFormI(p, 9, 29, 29, 0x0010);
4448 /* Fix up the conditional jump, if there was one. */
4449 if (cond != MIPScc_AL) {
4450 UInt r_src = iregNo(i->Min.Call.src, mode64);
4451 Int delta = p - ptmp;
4453 vassert(delta >= 20 && delta <= 32);
4454 /* blez r_src, delta/4-1
4455 nop */
4456 ptmp = mkFormI(ptmp, 6, r_src, 0, delta / 4 - 1);
4457 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
4459 goto done;
4462 case Min_XDirect: {
4463 /* NB: what goes on here has to be very closely coordinated
4464 with the chainXDirect_MIPS and unchainXDirect_MIPS below. */
4465 /* We're generating chain-me requests here, so we need to be
4466 sure this is actually allowed -- no-redir translations
4467 can't use chain-me's. Hence: */
4468 vassert(disp_cp_chain_me_to_slowEP != NULL);
4469 vassert(disp_cp_chain_me_to_fastEP != NULL);
4471 /* Use ptmp for backpatching conditional jumps. */
4472 ptmp = NULL;
4474 /* First off, if this is conditional, create a conditional
4475 jump over the rest of it. Or at least, leave a space for
4476 it that we will shortly fill in. */
4477 if (i->Min.XDirect.cond != MIPScc_AL) {
4478 vassert(i->Min.XDirect.cond != MIPScc_NV);
4479 ptmp = p;
4480 p += 12;
4483 /* Update the guest PC. */
4484 /* move r9, dstGA */
4485 /* sw/sd r9, amPC */
4486 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, (ULong)i->Min.XDirect.dstGA,
4487 mode64);
4488 p = do_load_or_store_machine_word(p, False /*!isLoad*/ , /*r*/ 9,
4489 i->Min.XDirect.amPC, mode64);
4491 /* --- FIRST PATCHABLE BYTE follows --- */
4492 /* VG_(disp_cp_chain_me_to_{slowEP,fastEP}) (where we're
4493 calling to) backs up the return address, so as to find the
4494 address of the first patchable byte. So: don't change the
4495 number of instructions (3) below. */
4496 /* move r9, VG_(disp_cp_chain_me_to_{slowEP,fastEP}) */
4497 /* jr r9 */
4498 const void* disp_cp_chain_me
4499 = i->Min.XDirect.toFastEP ? disp_cp_chain_me_to_fastEP
4500 : disp_cp_chain_me_to_slowEP;
4501 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
4502 (Addr)disp_cp_chain_me, mode64);
4503 /* jalr $9 */
4504 /* nop */
4505 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
4506 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
4507 /* --- END of PATCHABLE BYTES --- */
4509 /* Fix up the conditional jump, if there was one. */
4510 if (i->Min.XDirect.cond != MIPScc_AL) {
4511 Int delta = p - ptmp;
4512 delta = delta / 4 - 3;
4513 vassert(delta > 0 && delta < 40);
4515 /* lw $9, COND_OFFSET(GuestSP)
4516 beq $9, $0, 2
4517 nop */
4518 ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
4519 ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
4520 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
4522 goto done;
4525 case Min_XIndir: {
4526 /* We're generating transfers that could lead indirectly to a
4527 chain-me, so we need to be sure this is actually allowed --
4528 no-redir translations are not allowed to reach normal
4529 translations without going through the scheduler. That means
4530 no XDirects or XIndirs out from no-redir translations.
4531 Hence: */
4532 vassert(disp_cp_xindir != NULL);
4534 /* Use ptmp for backpatching conditional jumps. */
4535 ptmp = NULL;
4537 /* First off, if this is conditional, create a conditional
4538 jump over the rest of it. */
4539 if (i->Min.XIndir.cond != MIPScc_AL) {
4540 vassert(i->Min.XIndir.cond != MIPScc_NV);
4541 ptmp = p;
4542 p += 12;
4545 /* Update the guest PC. */
4546 /* sw/sd r-dstGA, amPC */
4547 p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
4548 iregNo(i->Min.XIndir.dstGA, mode64),
4549 i->Min.XIndir.amPC, mode64);
4551 /* move r9, VG_(disp_cp_xindir) */
4552 /* jalr r9 */
4553 /* nop */
4554 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
4555 (Addr)disp_cp_xindir, mode64);
4556 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
4557 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
4559 /* Fix up the conditional jump, if there was one. */
4560 if (i->Min.XIndir.cond != MIPScc_AL) {
4561 Int delta = p - ptmp;
4562 delta = delta / 4 - 3;
4563 vassert(delta > 0 && delta < 40);
4565 /* lw $9, COND_OFFSET($GuestSP)
4566 beq $9, $0, 2
4567 nop */
4568 ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
4569 ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
4570 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
4572 goto done;
4575 case Min_XAssisted: {
4576 /* First off, if this is conditional, create a conditional jump
4577 over the rest of it. Or at least, leave a space for it that
4578 we will shortly fill in. */
4579 ptmp = NULL;
4580 if (i->Min.XAssisted.cond != MIPScc_AL) {
4581 vassert(i->Min.XAssisted.cond != MIPScc_NV);
4582 ptmp = p;
4583 p += 12;
4586 /* Update the guest PC. */
4587 /* sw/sd r-dstGA, amPC */
4588 p = do_load_or_store_machine_word(p, False /*!isLoad*/ ,
4589 iregNo(i->Min.XIndir.dstGA, mode64),
4590 i->Min.XIndir.amPC, mode64);
4592 /* imm32/64 r31, $magic_number */
4593 UInt trcval = 0;
4594 switch (i->Min.XAssisted.jk) {
4595 case Ijk_ClientReq: trcval = VEX_TRC_JMP_CLIENTREQ; break;
4596 case Ijk_Sys_syscall: trcval = VEX_TRC_JMP_SYS_SYSCALL; break;
4597 /* case Ijk_Sys_int128: trcval = VEX_TRC_JMP_SYS_INT128; break; */
4598 case Ijk_Yield: trcval = VEX_TRC_JMP_YIELD; break;
4599 case Ijk_EmWarn: trcval = VEX_TRC_JMP_EMWARN; break;
4600 case Ijk_EmFail: trcval = VEX_TRC_JMP_EMFAIL; break;
4601 /* case Ijk_MapFail: trcval = VEX_TRC_JMP_MAPFAIL; break; */
4602 case Ijk_NoDecode: trcval = VEX_TRC_JMP_NODECODE; break;
4603 case Ijk_InvalICache: trcval = VEX_TRC_JMP_INVALICACHE; break;
4604 case Ijk_NoRedir: trcval = VEX_TRC_JMP_NOREDIR; break;
4605 case Ijk_SigILL: trcval = VEX_TRC_JMP_SIGILL; break;
4606 case Ijk_SigTRAP: trcval = VEX_TRC_JMP_SIGTRAP; break;
4607 /* case Ijk_SigSEGV: trcval = VEX_TRC_JMP_SIGSEGV; break; */
4608 case Ijk_SigBUS: trcval = VEX_TRC_JMP_SIGBUS; break;
4609 case Ijk_SigFPE_IntDiv: trcval = VEX_TRC_JMP_SIGFPE_INTDIV; break;
4610 case Ijk_SigFPE_IntOvf: trcval = VEX_TRC_JMP_SIGFPE_INTOVF; break;
4611 case Ijk_Boring: trcval = VEX_TRC_JMP_BORING; break;
4612 /* We don't expect to see the following being assisted.
4613 case Ijk_Ret:
4614 case Ijk_Call:
4615 fallthrough */
4616 default:
4617 ppIRJumpKind(i->Min.XAssisted.jk);
4618 vpanic("emit_MIPSInstr.Min_XAssisted: unexpected jump kind");
4620 vassert(trcval != 0);
4621 p = mkLoadImm_EXACTLY2or6(p, /*r*/ GuestSP, trcval, mode64);
4623 /* move r9, VG_(disp_cp_xassisted) */
4624 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
4625 (ULong)(Addr)disp_cp_xassisted, mode64);
4626 /* jalr $9
4627 nop */
4628 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
4629 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
4631 /* Fix up the conditional jump, if there was one. */
4632 if (i->Min.XAssisted.cond != MIPScc_AL) {
4633 Int delta = p - ptmp;
4634 delta = delta / 4 - 3;
4635 vassert(delta > 0 && delta < 40);
4637 /* lw $9, COND_OFFSET($GuestSP)
4638 beq $9, $0, 2
4639 nop */
4640 ptmp = mkFormI(ptmp, 35, GuestSP, 9, COND_OFFSET(mode64));
4641 ptmp = mkFormI(ptmp, 4, 0, 9, (delta));
4642 mkFormR(ptmp, 0, 0, 0, 0, 0, 0);
4644 goto done;
4647 case Min_Load: {
4648 MIPSAMode *am_addr = i->Min.Load.src;
4649 if (am_addr->tag == Mam_IR) {
4650 UInt r_dst = iregNo(i->Min.Load.dst, mode64);
4651 UInt opc, sz = i->Min.Load.sz;
4652 if (mode64 && (sz == 4 || sz == 8)) {
4653 /* should be guaranteed to us by iselWordExpr_AMode */
4654 vassert(0 == (am_addr->Mam.IR.index & 3));
4656 switch (sz) {
4657 case 1:
4658 opc = 32;
4659 break;
4660 case 2:
4661 opc = 33;
4662 break;
4663 case 4:
4664 opc = 35;
4665 break;
4666 case 8:
4667 opc = 55;
4668 vassert(mode64);
4669 break;
4670 default:
4671 goto bad;
4674 p = doAMode_IR(p, opc, r_dst, am_addr, mode64);
4675 goto done;
4676 } else if (am_addr->tag == Mam_RR) {
4677 UInt r_dst = iregNo(i->Min.Load.dst, mode64);
4678 UInt opc, sz = i->Min.Load.sz;
4680 switch (sz) {
4681 case 1:
4682 opc = 32;
4683 break;
4684 case 2:
4685 opc = 33;
4686 break;
4687 case 4:
4688 opc = 35;
4689 break;
4690 case 8:
4691 opc = 55;
4692 vassert(mode64);
4693 break;
4694 default:
4695 goto bad;
4698 p = doAMode_RR(p, opc, r_dst, am_addr, mode64);
4699 goto done;
4701 break;
4704 case Min_Store: {
4705 MIPSAMode *am_addr = i->Min.Store.dst;
4706 if (am_addr->tag == Mam_IR) {
4707 UInt r_src = iregNo(i->Min.Store.src, mode64);
4708 UInt opc, sz = i->Min.Store.sz;
4709 if (mode64 && (sz == 4 || sz == 8)) {
4710 /* should be guaranteed to us by iselWordExpr_AMode */
4711 vassert(0 == (am_addr->Mam.IR.index & 3));
4713 switch (sz) {
4714 case 1:
4715 opc = 40;
4716 break;
4717 case 2:
4718 opc = 41;
4719 break;
4720 case 4:
4721 opc = 43;
4722 break;
4723 case 8:
4724 vassert(mode64);
4725 opc = 63;
4726 break;
4727 default:
4728 goto bad;
4731 p = doAMode_IR(p, opc, r_src, am_addr, mode64);
4732 goto done;
4733 } else if (am_addr->tag == Mam_RR) {
4734 UInt r_src = iregNo(i->Min.Store.src, mode64);
4735 UInt opc, sz = i->Min.Store.sz;
4737 switch (sz) {
4738 case 1:
4739 opc = 40;
4740 break;
4741 case 2:
4742 opc = 41;
4743 break;
4744 case 4:
4745 opc = 43;
4746 break;
4747 case 8:
4748 vassert(mode64);
4749 opc = 63;
4750 break;
4751 default:
4752 goto bad;
4755 p = doAMode_RR(p, opc, r_src, am_addr, mode64);
4756 goto done;
4758 break;
4760 case Min_LoadL: {
4761 MIPSAMode *am_addr = i->Min.LoadL.src;
4762 UInt r_src = iregNo(am_addr->Mam.IR.base, mode64);
4763 UInt idx = am_addr->Mam.IR.index;
4764 UInt r_dst = iregNo(i->Min.LoadL.dst, mode64);
4765 #if (__mips_isa_rev >= 6)
4766 if (i->Min.LoadL.sz == 4)
4767 p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x36);
4768 else
4769 p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x37);
4770 #else
4771 if (i->Min.LoadL.sz == 4)
4772 p = mkFormI(p, 0x30, r_src, r_dst, idx);
4773 else
4774 p = mkFormI(p, 0x34, r_src, r_dst, idx);
4775 #endif
4776 goto done;
4778 case Min_StoreC: {
4779 MIPSAMode *am_addr = i->Min.StoreC.dst;
4780 UInt r_src = iregNo(i->Min.StoreC.src, mode64);
4781 UInt idx = am_addr->Mam.IR.index;
4782 UInt r_dst = iregNo(am_addr->Mam.IR.base, mode64);
4783 #if (__mips_isa_rev >= 6)
4784 if (i->Min.LoadL.sz == 4)
4785 p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x26);
4786 else
4787 p = mkFormI(p, 0x1F, r_src, r_dst, ((idx << 7) & 0xff80) | 0x27);
4788 #else
4789 if (i->Min.StoreC.sz == 4)
4790 p = mkFormI(p, 0x38, r_dst, r_src, idx);
4791 else
4792 p = mkFormI(p, 0x3C, r_dst, r_src, idx);
4793 #endif
4794 goto done;
4796 case Min_Cas: {
4797 if (i->Min.Cas.sz != 8 && i->Min.Cas.sz != 4)
4798 goto bad;
4799 UInt old = iregNo(i->Min.Cas.old, mode64);
4800 UInt addr = iregNo(i->Min.Cas.addr, mode64);
4801 UInt expd = iregNo(i->Min.Cas.expd, mode64);
4802 UInt data = iregNo(i->Min.Cas.data, mode64);
4803 Bool sz8 = toBool(i->Min.Cas.sz == 8);
4806 * ll(d) old, 0(addr)
4807 * bne old, expd, end
4808 * nop
4809 * (d)addiu old, old, 1
4810 * sc(d) data, 0(addr)
4811 * movn old, expd, data
4812 * end:
4814 #if (__mips_isa_rev >= 6)
4815 // ll(d) old, 0(addr)
4816 p = mkFormI(p, 0x1F, addr, old, sz8? 0x37: 0x36);
4817 // bne old, expd, end
4818 p = mkFormI(p, 5, old, expd, 5);
4819 #else
4820 // ll(d) old, 0(addr)
4821 p = mkFormI(p, sz8 ? 0x34 : 0x30, addr, old, 0);
4822 // bne old, expd, end
4823 p = mkFormI(p, 5, old, expd, 4);
4824 #endif
4825 // nop
4826 p = mkFormR(p, 0, 0, 0, 0, 0, 0);
4827 // (d)addiu old, old, 1
4828 p = mkFormI(p, sz8 ? 25 : 9, old, old, 4);
4830 #if (__mips_isa_rev >= 6)
4831 // sc(d) data, 0(addr)
4832 p = mkFormI(p, 0x1F, addr, data, sz8? 0x27: 0x26);
4833 //beqzc
4834 p = mkFormI(p, 0x36, data, 0, 1);
4835 //or
4836 p = mkFormR(p, 0, 0, expd, old, 0, 0x25 );
4837 #else
4838 // sc(d) data, 0(addr)
4839 p = mkFormI(p, sz8 ? 0x3C : 0x38, addr, data, 0);
4840 // movn old, expd, data
4841 p = mkFormR(p, 0, expd, data, old, 0, 0xb);
4842 #endif
4844 goto done;
4846 case Min_RdWrLR: {
4847 UInt reg = iregNo(i->Min.RdWrLR.gpr, mode64);
4848 Bool wrLR = i->Min.RdWrLR.wrLR;
4849 if (wrLR)
4850 p = mkMoveReg(p, 31, reg);
4851 else
4852 p = mkMoveReg(p, reg, 31);
4853 goto done;
4856 /* Floating point */
4857 case Min_FpLdSt: {
4858 MIPSAMode *am_addr = i->Min.FpLdSt.addr;
4859 UChar sz = i->Min.FpLdSt.sz;
4860 vassert(sz == 4 || sz == 8);
4861 if (sz == 4) {
4862 UInt f_reg = fregNo(i->Min.FpLdSt.reg, mode64);
4863 if (i->Min.FpLdSt.isLoad) {
4864 if (am_addr->tag == Mam_IR)
4865 p = doAMode_IR(p, 0x31, f_reg, am_addr, mode64);
4866 else if (am_addr->tag == Mam_RR)
4867 p = doAMode_RR(p, 0x31, f_reg, am_addr, mode64);
4868 } else {
4869 if (am_addr->tag == Mam_IR)
4870 p = doAMode_IR(p, 0x39, f_reg, am_addr, mode64);
4871 else if (am_addr->tag == Mam_RR)
4872 p = doAMode_RR(p, 0x39, f_reg, am_addr, mode64);
4874 } else if (sz == 8) {
4875 UInt f_reg = dregNo(i->Min.FpLdSt.reg);
4876 if (i->Min.FpLdSt.isLoad) {
4877 if (am_addr->tag == Mam_IR) {
4878 p = doAMode_IR(p, 0x35, f_reg, am_addr, mode64);
4879 } else if (am_addr->tag == Mam_RR) {
4880 p = doAMode_RR(p, 0x35, f_reg, am_addr, mode64);
4882 } else {
4883 if (am_addr->tag == Mam_IR) {
4884 p = doAMode_IR(p, 0x3d, f_reg, am_addr, mode64);
4885 } else if (am_addr->tag == Mam_RR) {
4886 p = doAMode_RR(p, 0x3d, f_reg, am_addr, mode64);
4890 goto done;
4893 case Min_FpUnary: {
4894 switch (i->Min.FpUnary.op) {
4895 case Mfp_MOVS: { /* FP move */
4896 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
4897 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
4898 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x6);
4899 break;
4901 case Mfp_MOVD: { /* FP move */
4902 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
4903 UInt fr_src = dregNo(i->Min.FpUnary.src);
4904 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x6);
4905 break;
4907 case Mfp_ABSS: { /* ABS.S */
4908 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
4909 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
4910 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x5);
4911 break;
4913 case Mfp_ABSD: { /* ABS.D */
4914 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
4915 UInt fr_src = dregNo(i->Min.FpUnary.src);
4916 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x5);
4917 break;
4919 case Mfp_NEGS: { /* NEG.S */
4920 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
4921 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
4922 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x7);
4923 break;
4925 case Mfp_NEGD: { /* NEG.D */
4926 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
4927 UInt fr_src = dregNo(i->Min.FpUnary.src);
4928 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x7);
4929 break;
4931 case Mfp_SQRTS: { /* SQRT.S */
4932 UInt fr_dst = fregNo(i->Min.FpUnary.dst, mode64);
4933 UInt fr_src = fregNo(i->Min.FpUnary.src, mode64);
4934 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x04);
4935 break;
4937 case Mfp_SQRTD: { /* SQRT.D */
4938 UInt fr_dst = dregNo(i->Min.FpUnary.dst);
4939 UInt fr_src = dregNo(i->Min.FpUnary.src);
4940 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x04);
4941 break;
4943 default:
4944 goto bad;
4946 goto done;
4949 case Min_FpBinary: {
4950 switch (i->Min.FpBinary.op) {
4951 case Mfp_ADDS: {
4952 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
4953 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
4954 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
4955 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 0);
4956 break;
4958 case Mfp_SUBS: {
4959 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
4960 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
4961 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
4962 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 1);
4963 break;
4965 case Mfp_MULS: {
4966 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
4967 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
4968 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
4969 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 2);
4970 break;
4972 case Mfp_DIVS: {
4973 UInt fr_dst = fregNo(i->Min.FpBinary.dst, mode64);
4974 UInt fr_srcL = fregNo(i->Min.FpBinary.srcL, mode64);
4975 UInt fr_srcR = fregNo(i->Min.FpBinary.srcR, mode64);
4976 p = mkFormR(p, 0x11, 0x10, fr_srcR, fr_srcL, fr_dst, 3);
4977 break;
4979 case Mfp_ADDD: {
4980 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
4981 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
4982 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
4983 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 0);
4984 break;
4986 case Mfp_SUBD: {
4987 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
4988 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
4989 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
4990 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 1);
4991 break;
4993 case Mfp_MULD: {
4994 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
4995 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
4996 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
4997 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 2);
4998 break;
5000 case Mfp_DIVD: {
5001 UInt fr_dst = dregNo(i->Min.FpBinary.dst);
5002 UInt fr_srcL = dregNo(i->Min.FpBinary.srcL);
5003 UInt fr_srcR = dregNo(i->Min.FpBinary.srcR);
5004 p = mkFormR(p, 0x11, 0x11, fr_srcR, fr_srcL, fr_dst, 3);
5005 break;
5007 default:
5008 goto bad;
5010 goto done;
5013 case Min_FpTernary: {
5014 switch (i->Min.FpTernary.op) {
5015 case Mfp_MADDS: {
5016 UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
5017 UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
5018 UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
5019 UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
5020 #if (__mips_isa_rev >= 6)
5021 p = mkFormR(p, 0x11, 0x10 , 0x0, fr_src1, fr_dst, 0x6);
5022 p = mkFormR(p, 0x11, 0x10, fr_src3, fr_src2, fr_dst, 0x18);
5023 #else
5024 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x20);
5025 #endif
5026 break;
5028 case Mfp_MADDD: {
5029 UInt fr_dst = dregNo(i->Min.FpTernary.dst);
5030 UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
5031 UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
5032 UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
5033 #if (__mips_isa_rev >= 6)
5034 p = mkFormR(p, 0x11, 0x11 , 0x0, fr_src1, fr_dst, 0x6);
5035 p = mkFormR(p, 0x11, 0x11, fr_src3, fr_src2, fr_dst, 0x18);
5036 #else
5037 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x21);
5038 #endif
5039 break;
5041 case Mfp_MSUBS: {
5042 UInt fr_dst = fregNo(i->Min.FpTernary.dst, mode64);
5043 UInt fr_src1 = fregNo(i->Min.FpTernary.src1, mode64);
5044 UInt fr_src2 = fregNo(i->Min.FpTernary.src2, mode64);
5045 UInt fr_src3 = fregNo(i->Min.FpTernary.src3, mode64);
5046 #if (__mips_isa_rev >= 6)
5047 p = mkFormR(p, 0x11, 0x10 , 0x0, fr_src1, fr_dst, 0x6);
5048 p = mkFormR(p, 0x11, 0x10, fr_src3, fr_src2, fr_dst, 0x19);
5049 #else
5050 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x28);
5051 #endif
5052 break;
5054 case Mfp_MSUBD: {
5055 UInt fr_dst = dregNo(i->Min.FpTernary.dst);
5056 UInt fr_src1 = dregNo(i->Min.FpTernary.src1);
5057 UInt fr_src2 = dregNo(i->Min.FpTernary.src2);
5058 UInt fr_src3 = dregNo(i->Min.FpTernary.src3);
5059 #if (__mips_isa_rev >= 6)
5060 p = mkFormR(p, 0x11, 0x11 , 0x0, fr_src1, fr_dst, 0x6);
5061 p = mkFormR(p, 0x11, 0x11, fr_src3, fr_src2, fr_dst, 0x19);
5062 #else
5063 p = mkFormR(p, 0x13, fr_src1, fr_src2, fr_src3, fr_dst, 0x29);
5064 #endif
5065 break;
5067 default:
5068 goto bad;
5070 goto done;
5073 case Min_FpConvert: {
5074 switch (i->Min.FpConvert.op) {
5075 UInt fr_dst, fr_src;
5076 case Mfp_CVTSD:
5077 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5078 fr_src = dregNo(i->Min.FpConvert.src);
5079 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x20);
5080 break;
5081 case Mfp_CVTSW:
5082 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5083 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5084 p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x20);
5085 break;
5086 case Mfp_CVTWD:
5087 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5088 fr_src = dregNo(i->Min.FpConvert.src);
5089 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x24);
5090 break;
5091 case Mfp_CVTWS:
5092 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5093 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5094 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x24);
5095 break;
5096 case Mfp_CVTDW:
5097 fr_dst = dregNo(i->Min.FpConvert.dst);
5098 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5099 p = mkFormR(p, 0x11, 0x14, 0, fr_src, fr_dst, 0x21);
5100 break;
5101 case Mfp_CVTDL:
5102 fr_dst = dregNo(i->Min.FpConvert.dst);
5103 fr_src = dregNo(i->Min.FpConvert.src);
5104 p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x21);
5105 break;
5106 case Mfp_CVTDS:
5107 fr_dst = dregNo(i->Min.FpConvert.dst);
5108 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5109 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x21);
5110 break;
5111 case Mfp_CVTSL:
5112 fr_dst = dregNo(i->Min.FpConvert.dst);
5113 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5114 p = mkFormR(p, 0x11, 0x15, 0, fr_src, fr_dst, 0x20);
5115 break;
5116 case Mfp_CVTLS:
5117 if (mode64) {
5118 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5119 fr_src = dregNo(i->Min.FpConvert.src);
5120 } else {
5121 fr_dst = dregNo(i->Min.FpConvert.dst);
5122 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5124 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x25);
5125 break;
5126 case Mfp_CVTLD:
5127 fr_dst = dregNo(i->Min.FpConvert.dst);
5128 fr_src = dregNo(i->Min.FpConvert.src);
5129 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x25);
5130 break;
5131 case Mfp_TRUWS:
5132 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5133 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5134 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0D);
5135 break;
5136 case Mfp_TRUWD:
5137 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5138 fr_src = dregNo(i->Min.FpConvert.src);
5139 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0D);
5140 break;
5141 case Mfp_TRULS:
5142 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5143 fr_src = dregNo(i->Min.FpConvert.src);
5144 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x09);
5145 break;
5146 case Mfp_TRULD:
5147 fr_dst = dregNo(i->Min.FpConvert.dst);
5148 fr_src = dregNo(i->Min.FpConvert.src);
5149 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x09);
5150 break;
5151 case Mfp_CEILWS:
5152 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5153 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5154 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0E);
5155 break;
5156 case Mfp_CEILWD:
5157 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5158 fr_src = dregNo(i->Min.FpConvert.src);
5159 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0E);
5160 break;
5161 case Mfp_CEILLS:
5162 fr_dst = dregNo(i->Min.FpConvert.dst);
5163 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5164 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0A);
5165 break;
5166 case Mfp_CEILLD:
5167 fr_dst = dregNo(i->Min.FpConvert.dst);
5168 fr_src = dregNo(i->Min.FpConvert.src);
5169 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0A);
5170 break;
5171 case Mfp_ROUNDWS:
5172 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5173 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5174 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0C);
5175 break;
5176 case Mfp_ROUNDWD:
5177 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5178 fr_src = dregNo(i->Min.FpConvert.src);
5179 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0C);
5180 break;
5181 case Mfp_ROUNDLD:
5182 fr_dst = dregNo(i->Min.FpConvert.dst);
5183 fr_src = dregNo(i->Min.FpConvert.src);
5184 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x08);
5185 break;
5186 case Mfp_FLOORWS:
5187 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5188 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5189 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x0F);
5190 break;
5191 case Mfp_FLOORWD:
5192 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5193 fr_src = dregNo(i->Min.FpConvert.src);
5194 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0F);
5195 break;
5196 case Mfp_FLOORLD:
5197 fr_dst = dregNo(i->Min.FpConvert.dst);
5198 fr_src = dregNo(i->Min.FpConvert.src);
5199 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x0B);
5200 break;
5201 case Mfp_RINTS:
5202 fr_dst = fregNo(i->Min.FpConvert.dst, mode64);
5203 fr_src = fregNo(i->Min.FpConvert.src, mode64);
5204 p = mkFormR(p, 0x11, 0x10, 0, fr_src, fr_dst, 0x1A);
5205 break;
5206 case Mfp_RINTD:
5207 fr_dst = dregNo(i->Min.FpConvert.dst);
5208 fr_src = dregNo(i->Min.FpConvert.src);
5209 p = mkFormR(p, 0x11, 0x11, 0, fr_src, fr_dst, 0x1A);
5210 break;
5212 default:
5213 goto bad;
5215 goto done;
5218 case Min_FpCompare: {
5219 #if (__mips_isa_rev >= 6)
5220 UInt fr_dst;
5221 UInt fr_srcL;
5222 UInt fr_srcR;
5224 UInt op;
5225 UInt format;
5226 switch (i->Min.FpConvert.op) {
5227 case Mfp_CMP_UN:
5228 fr_dst = dregNo(i->Min.FpCompare.dst);
5229 fr_srcL = dregNo(i->Min.FpCompare.srcL);
5230 fr_srcR = dregNo(i->Min.FpCompare.srcR);
5231 format=0x15;
5232 op = 1;
5233 break;
5234 case Mfp_CMP_EQ:
5235 fr_dst = dregNo(i->Min.FpCompare.dst);
5236 fr_srcL = dregNo(i->Min.FpCompare.srcL);
5237 fr_srcR = dregNo(i->Min.FpCompare.srcR);
5238 format=0x15;
5239 op = 2;
5240 break;
5241 case Mfp_CMP_LT:
5242 fr_dst = dregNo(i->Min.FpCompare.dst);
5243 fr_srcL = dregNo(i->Min.FpCompare.srcL);
5244 fr_srcR = dregNo(i->Min.FpCompare.srcR);
5245 format=0x15;
5246 op = 4;
5247 break;
5248 case Mfp_CMP_NGT:
5249 fr_dst = dregNo(i->Min.FpCompare.dst);
5250 fr_srcL = dregNo(i->Min.FpCompare.srcL);
5251 fr_srcR = dregNo(i->Min.FpCompare.srcR);
5252 format=0x15;
5253 op = 5;
5254 break;
5255 case Mfp_CMP_UN_S:
5256 fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
5257 fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
5258 fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
5259 format=0x14;
5260 op = 1;
5261 break;
5262 case Mfp_CMP_EQ_S:
5263 fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
5264 fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
5265 fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
5266 format=0x14;
5267 op = 2;
5268 break;
5269 case Mfp_CMP_LT_S:
5270 fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
5271 fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
5272 fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
5273 format=0x14;
5274 op = 4;
5275 break;
5276 case Mfp_CMP_NGT_S:
5277 fr_dst = fregNo(i->Min.FpCompare.dst, mode64);
5278 fr_srcL = fregNo(i->Min.FpCompare.srcL, mode64);
5279 fr_srcR = fregNo(i->Min.FpCompare.srcR, mode64);
5280 format=0x14;
5281 op = 5;
5282 break;
5283 default:
5284 goto bad;
5286 /* cmp.cond.d fr_srcL, fr_srcR */
5287 p = mkFormR(p, 0x11, format, fr_srcR, fr_srcL, fr_dst, op);
5288 #else
5289 UInt r_dst = iregNo(i->Min.FpCompare.dst, mode64);
5290 UInt fr_srcL = dregNo(i->Min.FpCompare.srcL);
5291 UInt fr_srcR = dregNo(i->Min.FpCompare.srcR);
5293 UInt op;
5294 switch (i->Min.FpConvert.op) {
5295 case Mfp_CMP_UN:
5296 op = 1;
5297 break;
5298 case Mfp_CMP_EQ:
5299 op = 2;
5300 break;
5301 case Mfp_CMP_LT:
5302 op = 12;
5303 break;
5304 case Mfp_CMP_NGT:
5305 op = 15;
5306 break;
5307 default:
5308 goto bad;
5310 /* c.cond.d fr_srcL, fr_srcR
5311 cfc1 r_dst, $31
5312 srl r_dst, r_dst, 23
5313 andi r_dst, r_dst, 1 */
5314 p = mkFormR(p, 0x11, 0x11, fr_srcL, fr_srcR, 0, op + 48);
5315 p = mkFormR(p, 0x11, 0x2, r_dst, 31, 0, 0);
5316 p = mkFormS(p, 0, r_dst, 0, r_dst, 23, 2);
5317 p = mkFormI(p, 12, r_dst, r_dst, 1);
5318 #endif
5319 goto done;
5322 #if (__mips_isa_rev >= 6)
5323 case Min_FpMinMax: {
5324 UInt r_dst = dregNo(i->Min.FpCompare.dst);
5325 UInt fr_srcL = dregNo(i->Min.FpCompare.srcL);
5326 UInt fr_srcR = dregNo(i->Min.FpCompare.srcR);
5327 UInt format;
5328 UInt instr;
5329 switch (i->Min.FpMinMax.op) {
5330 case Mfp_MAXS:
5331 format = 0x10;
5332 instr = 0x1E;
5333 break;
5334 case Mfp_MAXD:
5335 format = 0x11;
5336 instr = 0x1E;
5337 break;
5338 case Mfp_MINS:
5339 format = 0x10;
5340 instr = 0x1C;
5341 break;
5342 case Mfp_MIND:
5343 format = 0x11;
5344 instr = 0x1C;
5345 break;
5346 default:
5347 goto bad;
5349 p = mkFormR(p, 0x11, format, fr_srcR, fr_srcL, r_dst, instr);
5350 goto done;
5352 #endif
5355 case Min_FpGpMove: {
5356 switch (i->Min.FpGpMove.op) {
5357 UInt rt, fs;
5358 case MFpGpMove_mfc1: {
5359 rt = iregNo(i->Min.FpGpMove.dst, mode64);
5360 fs = fregNo(i->Min.FpGpMove.src, mode64);
5361 p = mkFormR(p, 0x11, 0x0, rt, fs, 0x0, 0x0);
5362 break;
5364 case MFpGpMove_dmfc1: {
5365 vassert(mode64);
5366 rt = iregNo(i->Min.FpGpMove.dst, mode64);
5367 fs = fregNo(i->Min.FpGpMove.src, mode64);
5368 p = mkFormR(p, 0x11, 0x1, rt, fs, 0x0, 0x0);
5369 break;
5371 case MFpGpMove_mtc1: {
5372 rt = iregNo(i->Min.FpGpMove.src, mode64);
5373 fs = fregNo(i->Min.FpGpMove.dst, mode64);
5374 p = mkFormR(p, 0x11, 0x4, rt, fs, 0x0, 0x0);
5375 break;
5377 case MFpGpMove_dmtc1: {
5378 vassert(mode64);
5379 rt = iregNo(i->Min.FpGpMove.src, mode64);
5380 fs = fregNo(i->Min.FpGpMove.dst, mode64);
5381 p = mkFormR(p, 0x11, 0x5, rt, fs, 0x0, 0x0);
5382 break;
5384 default:
5385 goto bad;
5387 goto done;
5390 case Min_MoveCond: {
5391 switch (i->Min.MoveCond.op) {
5392 UInt d, s, t;
5393 case MFpMoveCond_movns: {
5394 d = fregNo(i->Min.MoveCond.dst, mode64);
5395 s = fregNo(i->Min.MoveCond.src, mode64);
5396 t = iregNo(i->Min.MoveCond.cond, mode64);
5397 p = mkFormR(p, 0x11, 0x10, t, s, d, 0x13);
5398 break;
5400 case MFpMoveCond_movnd: {
5401 d = dregNo(i->Min.MoveCond.dst);
5402 s = dregNo(i->Min.MoveCond.src);
5403 t = iregNo(i->Min.MoveCond.cond, mode64);
5404 p = mkFormR(p, 0x11, 0x11, t, s, d, 0x13);
5405 break;
5407 case MMoveCond_movn: {
5408 d = iregNo(i->Min.MoveCond.dst, mode64);
5409 s = iregNo(i->Min.MoveCond.src, mode64);
5410 t = iregNo(i->Min.MoveCond.cond, mode64);
5411 p = mkFormR(p, 0, s, t, d, 0, 0xb);
5413 break;
5415 case MSeleqz: {
5416 d = iregNo(i->Min.MoveCond.dst, mode64);
5417 s = iregNo(i->Min.MoveCond.src, mode64);
5418 t = iregNo(i->Min.MoveCond.cond, mode64);
5419 p = mkFormR(p, 0, s, t, d, 0, 0x35);
5420 break;
5422 case MSelnez: {
5423 d = iregNo(i->Min.MoveCond.dst, mode64);
5424 s = iregNo(i->Min.MoveCond.src, mode64);
5425 t = iregNo(i->Min.MoveCond.cond, mode64);
5426 p = mkFormR(p, 0, s, t, d, 0, 0x37);
5427 break;
5429 case MFpSels: {
5430 d = fregNo(i->Min.MoveCond.dst, mode64);
5431 s = fregNo(i->Min.MoveCond.src, mode64);
5432 t = fregNo(i->Min.MoveCond.cond, mode64);
5433 p = mkFormR(p, 0x11, 0x10, t, s, d, 0x10);
5434 break;
5436 case MFpSeld: {
5437 d = fregNo(i->Min.MoveCond.dst, mode64);
5438 s = fregNo(i->Min.MoveCond.src, mode64);
5439 t = fregNo(i->Min.MoveCond.cond, mode64);
5440 p = mkFormR(p, 0x11, 0x11, t, s, d, 0x10);
5441 break;
5443 default:
5444 goto bad;
5446 goto done;
5449 case Min_EvCheck: {
5450 /* This requires a 32-bit dec/test in 32 mode. */
5451 /* We generate:
5452 lw r9, amCounter
5453 addiu r9, r9, -1
5454 sw r9, amCounter
5455 bgez r9, nofail
5456 lw r9, amFailAddr
5457 jalr r9
5459 nofail:
5461 UChar* p0 = p;
5462 /* lw r9, amCounter */
5463 p = do_load_or_store_word32(p, True /*isLoad*/ , /*r*/ 9,
5464 i->Min.EvCheck.amCounter, mode64);
5465 /* addiu r9,r9,-1 */
5466 p = mkFormI(p, 9, 9, 9, 0xFFFF);
5467 /* sw r30, amCounter */
5468 p = do_load_or_store_word32(p, False /*!isLoad*/ , /*r*/ 9,
5469 i->Min.EvCheck.amCounter, mode64);
5470 /* bgez t9, nofail */
5471 p = mkFormI(p, 1, 9, 1, 3);
5472 /* lw/ld r9, amFailAddr */
5473 p = do_load_or_store_machine_word(p, True /*isLoad*/ , /*r*/ 9,
5474 i->Min.EvCheck.amFailAddr, mode64);
5475 /* jalr $9 */
5476 p = mkFormR(p, 0, 9, 0, 31, 0, 9); /* p += 4 */
5477 p = mkFormR(p, 0, 0, 0, 0, 0, 0); /* p += 4 */
5478 /* nofail: */
5480 /* Crosscheck */
5481 vassert(evCheckSzB_MIPS() == (UChar*)p - (UChar*)p0);
5482 goto done;
5485 case Min_ProfInc: {
5486 /* Generate a code template to increment a memory location whose
5487 address will be known later as an immediate value. This code
5488 template will be patched once the memory location is known.
5489 For now we do this with address == 0x65556555. */
5490 if (mode64) {
5491 /* 64-bit:
5492 move r9, 0x6555655565556555ULL
5493 ld r8, 0(r9)
5494 daddiu r8, r8, 1
5495 sd r8, 0(r9) */
5497 /* move r9, 0x6555655565556555ULL */
5498 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x6555655565556555ULL,
5499 True /*mode64*/);
5500 /* ld r8, 0(r9) */
5501 p = mkFormI(p, 55, 9, 8, 0);
5503 /* daddiu r8, r8, 1 */
5504 p = mkFormI(p, 25, 8, 8, 1);
5506 /* sd r8, 0(r9) */
5507 p = mkFormI(p, 63, 9, 8, 0);
5508 } else {
5509 /* 32-bit:
5510 move r9, 0x65556555
5511 lw r8, 0(r9)
5512 addiu r8, r8, 1 # add least significant word
5513 sw r8, 0(r9)
5514 sltiu r1, r8, 1 # set carry-in bit
5515 lw r8, 4(r9)
5516 addu r8, r8, r1
5517 sw r8, 4(r9) */
5519 /* move r9, 0x65556555 */
5520 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9, 0x65556555ULL,
5521 False /*!mode64*/);
5522 /* lw r8, 0(r9) */
5523 p = mkFormI(p, 35, 9, 8, 0);
5525 /* addiu r8, r8, 1 # add least significant word */
5526 p = mkFormI(p, 9, 8, 8, 1);
5528 /* sw r8, 0(r9) */
5529 p = mkFormI(p, 43, 9, 8, 0);
5531 /* sltiu r1, r8, 1 # set carry-in bit */
5532 p = mkFormI(p, 11, 8, 1, 1);
5534 /* lw r8, 4(r9) */
5535 p = mkFormI(p, 35, 9, 8, 4);
5537 /* addu r8, r8, r1 */
5538 p = mkFormR(p, 0, 8, 1, 8, 0, 33);
5540 /* sw r8, 4(r9) */
5541 p = mkFormI(p, 43, 9, 8, 4);
5544 /* Tell the caller .. */
5545 vassert(!(*is_profInc));
5546 *is_profInc = True;
5547 goto done;
5550 default:
5551 goto bad;
5555 bad:
5556 vex_printf("\n=> ");
5557 ppMIPSInstr(i, mode64);
5558 vpanic("emit_MIPSInstr");
5559 /* NOTREACHED */ done:
5560 vassert(p - &buf[0] <= 128);
5561 return p - &buf[0];
5564 /* How big is an event check? See case for Min_EvCheck in
5565 emit_MIPSInstr just above. That crosschecks what this returns, so
5566 we can tell if we're inconsistent. */
5567 Int evCheckSzB_MIPS (void)
5569 UInt kInstrSize = 4;
5570 return 7*kInstrSize;
5573 /* NB: what goes on here has to be very closely coordinated with the
5574 emitInstr case for XDirect, above. */
5575 VexInvalRange chainXDirect_MIPS ( VexEndness endness_host,
5576 void* place_to_chain,
5577 const void* disp_cp_chain_me_EXPECTED,
5578 const void* place_to_jump_to,
5579 Bool mode64 )
5581 vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
5582 /* What we're expecting to see is:
5583 move r9, disp_cp_chain_me_to_EXPECTED
5584 jalr r9
5587 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
5588 0x120F809 # jalr r9
5589 0x00000000 # nop
5591 UChar* p = (UChar*)place_to_chain;
5592 vassert(0 == (3 & (HWord)p));
5593 vassert(isLoadImm_EXACTLY2or6(p, /*r*/9,
5594 (UInt)(Addr)disp_cp_chain_me_EXPECTED,
5595 mode64));
5596 vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
5597 vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
5598 /* And what we want to change it to is either:
5599 move r9, place_to_jump_to
5600 jalr r9
5603 <8 bytes generated by mkLoadImm_EXACTLY2or6>
5604 0x120F809 # jalr r9
5605 0x00000000 # nop
5607 The replacement has the same length as the original.
5610 p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
5611 (Addr)place_to_jump_to, mode64);
5612 p = emit32(p, 0x120F809);
5613 p = emit32(p, 0x00000000);
5615 Int len = p - (UChar*)place_to_chain;
5616 vassert(len == (mode64 ? 32 : 16)); /* stay sane */
5617 VexInvalRange vir = {(HWord)place_to_chain, len};
5618 return vir;
5621 /* NB: what goes on here has to be very closely coordinated with the
5622 emitInstr case for XDirect, above. */
5623 VexInvalRange unchainXDirect_MIPS ( VexEndness endness_host,
5624 void* place_to_unchain,
5625 const void* place_to_jump_to_EXPECTED,
5626 const void* disp_cp_chain_me,
5627 Bool mode64 )
5629 vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
5630 /* What we're expecting to see is:
5631 move r9, place_to_jump_to_EXPECTED
5632 jalr r9
5635 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
5636 0x120F809 # jalr r9
5637 0x00000000 # nop
5639 UChar* p = (UChar*)place_to_unchain;
5640 vassert(0 == (3 & (HWord)p));
5641 vassert(isLoadImm_EXACTLY2or6(p, /*r*/ 9,
5642 (Addr)place_to_jump_to_EXPECTED,
5643 mode64));
5644 vassert(fetch32(p + (mode64 ? 24 : 8) + 0) == 0x120F809);
5645 vassert(fetch32(p + (mode64 ? 24 : 8) + 4) == 0x00000000);
5646 /* And what we want to change it to is:
5647 move r9, disp_cp_chain_me
5648 jalr r9
5651 <8 or 24 bytes generated by mkLoadImm_EXACTLY2or6>
5652 0x120F809 # jalr r9
5653 0x00000000 # nop
5654 The replacement has the same length as the original.
5656 p = mkLoadImm_EXACTLY2or6(p, /*r*/ 9,
5657 (Addr)disp_cp_chain_me, mode64);
5658 p = emit32(p, 0x120F809);
5659 p = emit32(p, 0x00000000);
5661 Int len = p - (UChar*)place_to_unchain;
5662 vassert(len == (mode64 ? 32 : 16)); /* stay sane */
5663 VexInvalRange vir = {(HWord)place_to_unchain, len};
5664 return vir;
5667 /* Patch the counter address into a profile inc point, as previously
5668 created by the Min_ProfInc case for emit_MIPSInstr. */
5669 VexInvalRange patchProfInc_MIPS ( VexEndness endness_host,
5670 void* place_to_patch,
5671 const ULong* location_of_counter,
5672 Bool mode64 )
5674 vassert(endness_host == VexEndnessLE || endness_host == VexEndnessBE);
5675 if (mode64) {
5676 vassert(sizeof(ULong*) == 8);
5677 } else {
5678 vassert(sizeof(ULong*) == 4);
5680 UChar* p = (UChar*)place_to_patch;
5681 vassert(0 == (3 & (HWord)p));
5682 vassert(isLoadImm_EXACTLY2or6((UChar *)p, /*r*/9,
5683 mode64 ? 0x6555655565556555ULL : 0x65556555,
5684 mode64));
5686 if (mode64) {
5687 vassert(fetch32(p + 24 + 0) == 0xDD280000);
5688 vassert(fetch32(p + 24 + 4) == 0x65080001);
5689 vassert(fetch32(p + 24 + 8) == 0xFD280000);
5690 } else {
5691 vassert(fetch32(p + 8 + 0) == 0x8D280000);
5692 vassert(fetch32(p + 8 + 4) == 0x25080001);
5693 vassert(fetch32(p + 8 + 8) == 0xAD280000);
5694 vassert(fetch32(p + 8 + 12) == 0x2d010001);
5695 vassert(fetch32(p + 8 + 16) == 0x8d280004);
5696 vassert(fetch32(p + 8 + 20) == 0x01014021);
5697 vassert(fetch32(p + 8 + 24) == 0xad280004);
5700 p = mkLoadImm_EXACTLY2or6(p, /*r*/9,
5701 (Addr)location_of_counter, mode64);
5703 VexInvalRange vir = {(HWord)p, 8};
5704 return vir;
5708 /*---------------------------------------------------------------*/
5709 /*--- end host_mips_defs.c ---*/
5710 /*---------------------------------------------------------------*/