1 //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the targeting of the Machinelegalizer class for Mips.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "MipsLegalizerInfo.h"
14 #include "MipsTargetMachine.h"
15 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
16 #include "llvm/IR/IntrinsicsMips.h"
20 struct TypesAndMemOps
{
24 bool SystemSupportsUnalignedAccess
;
27 // Assumes power of 2 memory size. Subtargets that have only naturally-aligned
28 // memory access need to perform additional legalization here.
29 static bool isUnalignedMemmoryAccess(uint64_t MemSize
, uint64_t AlignInBits
) {
30 assert(isPowerOf2_64(MemSize
) && "Expected power of 2 memory size");
31 assert(isPowerOf2_64(AlignInBits
) && "Expected power of 2 align");
32 if (MemSize
> AlignInBits
)
38 CheckTy0Ty1MemSizeAlign(const LegalityQuery
&Query
,
39 std::initializer_list
<TypesAndMemOps
> SupportedValues
) {
40 unsigned QueryMemSize
= Query
.MMODescrs
[0].MemoryTy
.getSizeInBits();
42 // Non power of two memory access is never legal.
43 if (!isPowerOf2_64(QueryMemSize
))
46 for (auto &Val
: SupportedValues
) {
47 if (Val
.ValTy
!= Query
.Types
[0])
49 if (Val
.PtrTy
!= Query
.Types
[1])
51 if (Val
.MemSize
!= QueryMemSize
)
53 if (!Val
.SystemSupportsUnalignedAccess
&&
54 isUnalignedMemmoryAccess(QueryMemSize
, Query
.MMODescrs
[0].AlignInBits
))
61 static bool CheckTyN(unsigned N
, const LegalityQuery
&Query
,
62 std::initializer_list
<LLT
> SupportedValues
) {
63 return llvm::is_contained(SupportedValues
, Query
.Types
[N
]);
66 MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget
&ST
) {
67 using namespace TargetOpcode
;
69 const LLT s1
= LLT::scalar(1);
70 const LLT s8
= LLT::scalar(8);
71 const LLT s16
= LLT::scalar(16);
72 const LLT s32
= LLT::scalar(32);
73 const LLT s64
= LLT::scalar(64);
74 const LLT v16s8
= LLT::fixed_vector(16, 8);
75 const LLT v8s16
= LLT::fixed_vector(8, 16);
76 const LLT v4s32
= LLT::fixed_vector(4, 32);
77 const LLT v2s64
= LLT::fixed_vector(2, 64);
78 const LLT p0
= LLT::pointer(0, 32);
80 getActionDefinitionsBuilder({G_ADD
, G_SUB
, G_MUL
})
81 .legalIf([=, &ST
](const LegalityQuery
&Query
) {
82 if (CheckTyN(0, Query
, {s32
}))
84 if (ST
.hasMSA() && CheckTyN(0, Query
, {v16s8
, v8s16
, v4s32
, v2s64
}))
88 .clampScalar(0, s32
, s32
);
90 getActionDefinitionsBuilder({G_UADDO
, G_UADDE
, G_USUBO
, G_USUBE
, G_UMULO
})
91 .lowerFor({{s32
, s1
}});
93 getActionDefinitionsBuilder(G_UMULH
)
97 // MIPS32r6 does not have alignment restrictions for memory access.
98 // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned
99 // to at least a multiple of its own size. There is however a two instruction
100 // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr)
101 // therefore 4 byte load and store are legal and will use NoAlignRequirements.
102 bool NoAlignRequirements
= true;
104 getActionDefinitionsBuilder({G_LOAD
, G_STORE
})
105 .legalIf([=, &ST
](const LegalityQuery
&Query
) {
106 if (CheckTy0Ty1MemSizeAlign(
107 Query
, {{s32
, p0
, 8, NoAlignRequirements
},
108 {s32
, p0
, 16, ST
.systemSupportsUnalignedAccess()},
109 {s32
, p0
, 32, NoAlignRequirements
},
110 {p0
, p0
, 32, NoAlignRequirements
},
111 {s64
, p0
, 64, ST
.systemSupportsUnalignedAccess()}}))
113 if (ST
.hasMSA() && CheckTy0Ty1MemSizeAlign(
114 Query
, {{v16s8
, p0
, 128, NoAlignRequirements
},
115 {v8s16
, p0
, 128, NoAlignRequirements
},
116 {v4s32
, p0
, 128, NoAlignRequirements
},
117 {v2s64
, p0
, 128, NoAlignRequirements
}}))
121 // Custom lower scalar memory access, up to 8 bytes, for:
122 // - non-power-of-2 MemSizes
123 // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older
124 .customIf([=, &ST
](const LegalityQuery
&Query
) {
125 if (!Query
.Types
[0].isScalar() || Query
.Types
[1] != p0
||
126 Query
.Types
[0] == s1
)
129 unsigned Size
= Query
.Types
[0].getSizeInBits();
130 unsigned QueryMemSize
= Query
.MMODescrs
[0].MemoryTy
.getSizeInBits();
131 assert(QueryMemSize
<= Size
&& "Scalar can't hold MemSize");
133 if (Size
> 64 || QueryMemSize
> 64)
136 if (!isPowerOf2_64(Query
.MMODescrs
[0].MemoryTy
.getSizeInBits()))
139 if (!ST
.systemSupportsUnalignedAccess() &&
140 isUnalignedMemmoryAccess(QueryMemSize
,
141 Query
.MMODescrs
[0].AlignInBits
)) {
142 assert(QueryMemSize
!= 32 && "4 byte load and store are legal");
151 getActionDefinitionsBuilder(G_IMPLICIT_DEF
)
152 .legalFor({s32
, s64
});
154 getActionDefinitionsBuilder(G_UNMERGE_VALUES
)
155 .legalFor({{s32
, s64
}});
157 getActionDefinitionsBuilder(G_MERGE_VALUES
)
158 .legalFor({{s64
, s32
}});
160 getActionDefinitionsBuilder({G_ZEXTLOAD
, G_SEXTLOAD
})
161 .legalForTypesWithMemDesc({{s32
, p0
, s8
, 8},
163 .clampScalar(0, s32
, s32
);
165 getActionDefinitionsBuilder({G_ZEXT
, G_SEXT
, G_ANYEXT
})
166 .legalIf([](const LegalityQuery
&Query
) { return false; })
169 getActionDefinitionsBuilder(G_TRUNC
)
170 .legalIf([](const LegalityQuery
&Query
) { return false; })
173 getActionDefinitionsBuilder(G_SELECT
)
174 .legalForCartesianProduct({p0
, s32
, s64
}, {s32
})
178 getActionDefinitionsBuilder(G_BRCOND
)
182 getActionDefinitionsBuilder(G_BRJT
)
183 .legalFor({{p0
, s32
}});
185 getActionDefinitionsBuilder(G_BRINDIRECT
)
188 getActionDefinitionsBuilder(G_PHI
)
189 .legalFor({p0
, s32
, s64
})
192 getActionDefinitionsBuilder({G_AND
, G_OR
, G_XOR
})
194 .clampScalar(0, s32
, s32
);
196 getActionDefinitionsBuilder({G_SDIV
, G_SREM
, G_UDIV
, G_UREM
})
197 .legalIf([=, &ST
](const LegalityQuery
&Query
) {
198 if (CheckTyN(0, Query
, {s32
}))
200 if (ST
.hasMSA() && CheckTyN(0, Query
, {v16s8
, v8s16
, v4s32
, v2s64
}))
207 getActionDefinitionsBuilder({G_SHL
, G_ASHR
, G_LSHR
})
208 .legalFor({{s32
, s32
}})
209 .clampScalar(1, s32
, s32
)
210 .clampScalar(0, s32
, s32
);
212 getActionDefinitionsBuilder(G_ICMP
)
213 .legalForCartesianProduct({s32
}, {s32
, p0
})
214 .clampScalar(1, s32
, s32
)
217 getActionDefinitionsBuilder(G_CONSTANT
)
219 .clampScalar(0, s32
, s32
);
221 getActionDefinitionsBuilder({G_PTR_ADD
, G_INTTOPTR
})
222 .legalFor({{p0
, s32
}});
224 getActionDefinitionsBuilder(G_PTRTOINT
)
225 .legalFor({{s32
, p0
}});
227 getActionDefinitionsBuilder(G_FRAME_INDEX
)
230 getActionDefinitionsBuilder({G_GLOBAL_VALUE
, G_JUMP_TABLE
})
233 getActionDefinitionsBuilder(G_DYN_STACKALLOC
)
234 .lowerFor({{p0
, s32
}});
236 getActionDefinitionsBuilder(G_VASTART
)
239 getActionDefinitionsBuilder(G_BSWAP
)
240 .legalIf([=, &ST
](const LegalityQuery
&Query
) {
241 if (ST
.hasMips32r2() && CheckTyN(0, Query
, {s32
}))
245 .lowerIf([=, &ST
](const LegalityQuery
&Query
) {
246 if (!ST
.hasMips32r2() && CheckTyN(0, Query
, {s32
}))
252 getActionDefinitionsBuilder(G_BITREVERSE
)
256 getActionDefinitionsBuilder(G_CTLZ
)
257 .legalFor({{s32
, s32
}})
260 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF
)
261 .lowerFor({{s32
, s32
}});
263 getActionDefinitionsBuilder(G_CTTZ
)
264 .lowerFor({{s32
, s32
}})
267 getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF
)
268 .lowerFor({{s32
, s32
}, {s64
, s64
}});
270 getActionDefinitionsBuilder(G_CTPOP
)
271 .lowerFor({{s32
, s32
}})
272 .clampScalar(0, s32
, s32
)
273 .clampScalar(1, s32
, s32
);
276 getActionDefinitionsBuilder(G_FCONSTANT
)
277 .legalFor({s32
, s64
});
279 getActionDefinitionsBuilder({G_FADD
, G_FSUB
, G_FMUL
, G_FDIV
, G_FABS
, G_FSQRT
})
280 .legalIf([=, &ST
](const LegalityQuery
&Query
) {
281 if (CheckTyN(0, Query
, {s32
, s64
}))
283 if (ST
.hasMSA() && CheckTyN(0, Query
, {v16s8
, v8s16
, v4s32
, v2s64
}))
288 getActionDefinitionsBuilder(G_FCMP
)
289 .legalFor({{s32
, s32
}, {s32
, s64
}})
292 getActionDefinitionsBuilder({G_FCEIL
, G_FFLOOR
})
293 .libcallFor({s32
, s64
});
295 getActionDefinitionsBuilder(G_FPEXT
)
296 .legalFor({{s64
, s32
}});
298 getActionDefinitionsBuilder(G_FPTRUNC
)
299 .legalFor({{s32
, s64
}});
301 // FP to int conversion instructions
302 getActionDefinitionsBuilder(G_FPTOSI
)
303 .legalForCartesianProduct({s32
}, {s64
, s32
})
304 .libcallForCartesianProduct({s64
}, {s64
, s32
})
307 getActionDefinitionsBuilder(G_FPTOUI
)
308 .libcallForCartesianProduct({s64
}, {s64
, s32
})
309 .lowerForCartesianProduct({s32
}, {s64
, s32
})
312 // Int to FP conversion instructions
313 getActionDefinitionsBuilder(G_SITOFP
)
314 .legalForCartesianProduct({s64
, s32
}, {s32
})
315 .libcallForCartesianProduct({s64
, s32
}, {s64
})
318 getActionDefinitionsBuilder(G_UITOFP
)
319 .libcallForCartesianProduct({s64
, s32
}, {s64
})
320 .customForCartesianProduct({s64
, s32
}, {s32
})
323 getActionDefinitionsBuilder(G_SEXT_INREG
).lower();
325 getActionDefinitionsBuilder({G_MEMCPY
, G_MEMMOVE
, G_MEMSET
}).libcall();
327 getLegacyLegalizerInfo().computeTables();
328 verify(*ST
.getInstrInfo());
331 bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper
&Helper
,
332 MachineInstr
&MI
) const {
333 using namespace TargetOpcode
;
335 MachineIRBuilder
&MIRBuilder
= Helper
.MIRBuilder
;
336 MachineRegisterInfo
&MRI
= *MIRBuilder
.getMRI();
338 const LLT s32
= LLT::scalar(32);
339 const LLT s64
= LLT::scalar(64);
341 switch (MI
.getOpcode()) {
344 unsigned MemSize
= (**MI
.memoperands_begin()).getSize();
345 Register Val
= MI
.getOperand(0).getReg();
346 unsigned Size
= MRI
.getType(Val
).getSizeInBits();
348 MachineMemOperand
*MMOBase
= *MI
.memoperands_begin();
350 assert(MemSize
<= 8 && "MemSize is too large");
351 assert(Size
<= 64 && "Scalar size is too large");
353 // Split MemSize into two, P2HalfMemSize is largest power of two smaller
354 // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1.
355 unsigned P2HalfMemSize
, RemMemSize
;
356 if (isPowerOf2_64(MemSize
)) {
357 P2HalfMemSize
= RemMemSize
= MemSize
/ 2;
359 P2HalfMemSize
= 1 << Log2_32(MemSize
);
360 RemMemSize
= MemSize
- P2HalfMemSize
;
363 Register BaseAddr
= MI
.getOperand(1).getReg();
364 LLT PtrTy
= MRI
.getType(BaseAddr
);
365 MachineFunction
&MF
= MIRBuilder
.getMF();
367 auto P2HalfMemOp
= MF
.getMachineMemOperand(MMOBase
, 0, P2HalfMemSize
);
368 auto RemMemOp
= MF
.getMachineMemOperand(MMOBase
, P2HalfMemSize
, RemMemSize
);
370 if (MI
.getOpcode() == G_STORE
) {
371 // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE.
373 Val
= MIRBuilder
.buildAnyExt(s32
, Val
).getReg(0);
374 if (Size
> 32 && Size
< 64)
375 Val
= MIRBuilder
.buildAnyExt(s64
, Val
).getReg(0);
377 auto C_P2HalfMemSize
= MIRBuilder
.buildConstant(s32
, P2HalfMemSize
);
378 auto Addr
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseAddr
, C_P2HalfMemSize
);
380 if (MI
.getOpcode() == G_STORE
&& MemSize
<= 4) {
381 MIRBuilder
.buildStore(Val
, BaseAddr
, *P2HalfMemOp
);
382 auto C_P2Half_InBits
= MIRBuilder
.buildConstant(s32
, P2HalfMemSize
* 8);
383 auto Shift
= MIRBuilder
.buildLShr(s32
, Val
, C_P2Half_InBits
);
384 MIRBuilder
.buildStore(Shift
, Addr
, *RemMemOp
);
386 auto Unmerge
= MIRBuilder
.buildUnmerge(s32
, Val
);
387 MIRBuilder
.buildStore(Unmerge
.getReg(0), BaseAddr
, *P2HalfMemOp
);
388 MIRBuilder
.buildStore(Unmerge
.getReg(1), Addr
, *RemMemOp
);
392 if (MI
.getOpcode() == G_LOAD
) {
395 // This is anyextending load, use 4 byte lwr/lwl.
396 auto *Load4MMO
= MF
.getMachineMemOperand(MMOBase
, 0, 4);
399 MIRBuilder
.buildLoad(Val
, BaseAddr
, *Load4MMO
);
401 auto Load
= MIRBuilder
.buildLoad(s32
, BaseAddr
, *Load4MMO
);
402 MIRBuilder
.buildTrunc(Val
, Load
.getReg(0));
406 auto C_P2HalfMemSize
= MIRBuilder
.buildConstant(s32
, P2HalfMemSize
);
407 auto Addr
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseAddr
, C_P2HalfMemSize
);
409 auto Load_P2Half
= MIRBuilder
.buildLoad(s32
, BaseAddr
, *P2HalfMemOp
);
410 auto Load_Rem
= MIRBuilder
.buildLoad(s32
, Addr
, *RemMemOp
);
413 MIRBuilder
.buildMerge(Val
, {Load_P2Half
, Load_Rem
});
415 auto Merge
= MIRBuilder
.buildMerge(s64
, {Load_P2Half
, Load_Rem
});
416 MIRBuilder
.buildTrunc(Val
, Merge
);
420 MI
.eraseFromParent();
424 Register Dst
= MI
.getOperand(0).getReg();
425 Register Src
= MI
.getOperand(1).getReg();
426 LLT DstTy
= MRI
.getType(Dst
);
427 LLT SrcTy
= MRI
.getType(Src
);
431 if (DstTy
!= s32
&& DstTy
!= s64
)
434 // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert
435 // unsigned to double. Mantissa has 52 bits so we use following trick:
436 // First make floating point bit mask 0x43300000ABCDEFGH.
437 // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 .
438 // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it.
439 // Done. Trunc double to float if needed.
441 auto C_HiMask
= MIRBuilder
.buildConstant(s32
, UINT32_C(0x43300000));
442 auto Bitcast
= MIRBuilder
.buildMerge(s64
, {Src
, C_HiMask
.getReg(0)});
444 MachineInstrBuilder TwoP52FP
= MIRBuilder
.buildFConstant(
445 s64
, BitsToDouble(UINT64_C(0x4330000000000000)));
448 MIRBuilder
.buildFSub(Dst
, Bitcast
, TwoP52FP
);
450 MachineInstrBuilder ResF64
= MIRBuilder
.buildFSub(s64
, Bitcast
, TwoP52FP
);
451 MIRBuilder
.buildFPTrunc(Dst
, ResF64
);
454 MI
.eraseFromParent();
464 static bool SelectMSA3OpIntrinsic(MachineInstr
&MI
, unsigned Opcode
,
465 MachineIRBuilder
&MIRBuilder
,
466 const MipsSubtarget
&ST
) {
467 assert(ST
.hasMSA() && "MSA intrinsic not supported on target without MSA.");
468 if (!MIRBuilder
.buildInstr(Opcode
)
469 .add(MI
.getOperand(0))
470 .add(MI
.getOperand(2))
471 .add(MI
.getOperand(3))
472 .constrainAllUses(MIRBuilder
.getTII(), *ST
.getRegisterInfo(),
473 *ST
.getRegBankInfo()))
475 MI
.eraseFromParent();
479 static bool MSA3OpIntrinsicToGeneric(MachineInstr
&MI
, unsigned Opcode
,
480 MachineIRBuilder
&MIRBuilder
,
481 const MipsSubtarget
&ST
) {
482 assert(ST
.hasMSA() && "MSA intrinsic not supported on target without MSA.");
483 MIRBuilder
.buildInstr(Opcode
)
484 .add(MI
.getOperand(0))
485 .add(MI
.getOperand(2))
486 .add(MI
.getOperand(3));
487 MI
.eraseFromParent();
491 static bool MSA2OpIntrinsicToGeneric(MachineInstr
&MI
, unsigned Opcode
,
492 MachineIRBuilder
&MIRBuilder
,
493 const MipsSubtarget
&ST
) {
494 assert(ST
.hasMSA() && "MSA intrinsic not supported on target without MSA.");
495 MIRBuilder
.buildInstr(Opcode
)
496 .add(MI
.getOperand(0))
497 .add(MI
.getOperand(2));
498 MI
.eraseFromParent();
502 bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper
&Helper
,
503 MachineInstr
&MI
) const {
504 MachineIRBuilder
&MIRBuilder
= Helper
.MIRBuilder
;
505 const MipsSubtarget
&ST
=
506 static_cast<const MipsSubtarget
&>(MI
.getMF()->getSubtarget());
507 const MipsInstrInfo
&TII
= *ST
.getInstrInfo();
508 const MipsRegisterInfo
&TRI
= *ST
.getRegisterInfo();
509 const RegisterBankInfo
&RBI
= *ST
.getRegBankInfo();
511 switch (MI
.getIntrinsicID()) {
512 case Intrinsic::trap
: {
513 MachineInstr
*Trap
= MIRBuilder
.buildInstr(Mips::TRAP
);
514 MI
.eraseFromParent();
515 return constrainSelectedInstRegOperands(*Trap
, TII
, TRI
, RBI
);
517 case Intrinsic::vacopy
: {
518 MachinePointerInfo MPO
;
519 LLT PtrTy
= LLT::pointer(0, 32);
521 MIRBuilder
.buildLoad(PtrTy
, MI
.getOperand(2),
522 *MI
.getMF()->getMachineMemOperand(
523 MPO
, MachineMemOperand::MOLoad
, PtrTy
, Align(4)));
524 MIRBuilder
.buildStore(Tmp
, MI
.getOperand(1),
525 *MI
.getMF()->getMachineMemOperand(
526 MPO
, MachineMemOperand::MOStore
, PtrTy
, Align(4)));
527 MI
.eraseFromParent();
530 case Intrinsic::mips_addv_b
:
531 case Intrinsic::mips_addv_h
:
532 case Intrinsic::mips_addv_w
:
533 case Intrinsic::mips_addv_d
:
534 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_ADD
, MIRBuilder
, ST
);
535 case Intrinsic::mips_addvi_b
:
536 return SelectMSA3OpIntrinsic(MI
, Mips::ADDVI_B
, MIRBuilder
, ST
);
537 case Intrinsic::mips_addvi_h
:
538 return SelectMSA3OpIntrinsic(MI
, Mips::ADDVI_H
, MIRBuilder
, ST
);
539 case Intrinsic::mips_addvi_w
:
540 return SelectMSA3OpIntrinsic(MI
, Mips::ADDVI_W
, MIRBuilder
, ST
);
541 case Intrinsic::mips_addvi_d
:
542 return SelectMSA3OpIntrinsic(MI
, Mips::ADDVI_D
, MIRBuilder
, ST
);
543 case Intrinsic::mips_subv_b
:
544 case Intrinsic::mips_subv_h
:
545 case Intrinsic::mips_subv_w
:
546 case Intrinsic::mips_subv_d
:
547 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_SUB
, MIRBuilder
, ST
);
548 case Intrinsic::mips_subvi_b
:
549 return SelectMSA3OpIntrinsic(MI
, Mips::SUBVI_B
, MIRBuilder
, ST
);
550 case Intrinsic::mips_subvi_h
:
551 return SelectMSA3OpIntrinsic(MI
, Mips::SUBVI_H
, MIRBuilder
, ST
);
552 case Intrinsic::mips_subvi_w
:
553 return SelectMSA3OpIntrinsic(MI
, Mips::SUBVI_W
, MIRBuilder
, ST
);
554 case Intrinsic::mips_subvi_d
:
555 return SelectMSA3OpIntrinsic(MI
, Mips::SUBVI_D
, MIRBuilder
, ST
);
556 case Intrinsic::mips_mulv_b
:
557 case Intrinsic::mips_mulv_h
:
558 case Intrinsic::mips_mulv_w
:
559 case Intrinsic::mips_mulv_d
:
560 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_MUL
, MIRBuilder
, ST
);
561 case Intrinsic::mips_div_s_b
:
562 case Intrinsic::mips_div_s_h
:
563 case Intrinsic::mips_div_s_w
:
564 case Intrinsic::mips_div_s_d
:
565 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_SDIV
, MIRBuilder
, ST
);
566 case Intrinsic::mips_mod_s_b
:
567 case Intrinsic::mips_mod_s_h
:
568 case Intrinsic::mips_mod_s_w
:
569 case Intrinsic::mips_mod_s_d
:
570 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_SREM
, MIRBuilder
, ST
);
571 case Intrinsic::mips_div_u_b
:
572 case Intrinsic::mips_div_u_h
:
573 case Intrinsic::mips_div_u_w
:
574 case Intrinsic::mips_div_u_d
:
575 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_UDIV
, MIRBuilder
, ST
);
576 case Intrinsic::mips_mod_u_b
:
577 case Intrinsic::mips_mod_u_h
:
578 case Intrinsic::mips_mod_u_w
:
579 case Intrinsic::mips_mod_u_d
:
580 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_UREM
, MIRBuilder
, ST
);
581 case Intrinsic::mips_fadd_w
:
582 case Intrinsic::mips_fadd_d
:
583 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_FADD
, MIRBuilder
, ST
);
584 case Intrinsic::mips_fsub_w
:
585 case Intrinsic::mips_fsub_d
:
586 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_FSUB
, MIRBuilder
, ST
);
587 case Intrinsic::mips_fmul_w
:
588 case Intrinsic::mips_fmul_d
:
589 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_FMUL
, MIRBuilder
, ST
);
590 case Intrinsic::mips_fdiv_w
:
591 case Intrinsic::mips_fdiv_d
:
592 return MSA3OpIntrinsicToGeneric(MI
, TargetOpcode::G_FDIV
, MIRBuilder
, ST
);
593 case Intrinsic::mips_fmax_a_w
:
594 return SelectMSA3OpIntrinsic(MI
, Mips::FMAX_A_W
, MIRBuilder
, ST
);
595 case Intrinsic::mips_fmax_a_d
:
596 return SelectMSA3OpIntrinsic(MI
, Mips::FMAX_A_D
, MIRBuilder
, ST
);
597 case Intrinsic::mips_fsqrt_w
:
598 return MSA2OpIntrinsicToGeneric(MI
, TargetOpcode::G_FSQRT
, MIRBuilder
, ST
);
599 case Intrinsic::mips_fsqrt_d
:
600 return MSA2OpIntrinsicToGeneric(MI
, TargetOpcode::G_FSQRT
, MIRBuilder
, ST
);