Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / Target / Mips / MipsLegalizerInfo.cpp
blob14f26201e6c09b47d648bbe66db821a1b1c34f25
1 //===- MipsLegalizerInfo.cpp ------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for Mips.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
13 #include "MipsLegalizerInfo.h"
14 #include "MipsTargetMachine.h"
15 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
16 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
17 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
18 #include "llvm/IR/IntrinsicsMips.h"
20 using namespace llvm;
22 struct TypesAndMemOps {
23 LLT ValTy;
24 LLT PtrTy;
25 unsigned MemSize;
26 bool SystemSupportsUnalignedAccess;
29 // Assumes power of 2 memory size. Subtargets that have only naturally-aligned
30 // memory access need to perform additional legalization here.
31 static bool isUnalignedMemmoryAccess(uint64_t MemSize, uint64_t AlignInBits) {
32 assert(isPowerOf2_64(MemSize) && "Expected power of 2 memory size");
33 assert(isPowerOf2_64(AlignInBits) && "Expected power of 2 align");
34 if (MemSize > AlignInBits)
35 return true;
36 return false;
39 static bool
40 CheckTy0Ty1MemSizeAlign(const LegalityQuery &Query,
41 std::initializer_list<TypesAndMemOps> SupportedValues) {
42 unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
44 // Non power of two memory access is never legal.
45 if (!isPowerOf2_64(QueryMemSize))
46 return false;
48 for (auto &Val : SupportedValues) {
49 if (Val.ValTy != Query.Types[0])
50 continue;
51 if (Val.PtrTy != Query.Types[1])
52 continue;
53 if (Val.MemSize != QueryMemSize)
54 continue;
55 if (!Val.SystemSupportsUnalignedAccess &&
56 isUnalignedMemmoryAccess(QueryMemSize, Query.MMODescrs[0].AlignInBits))
57 return false;
58 return true;
60 return false;
63 static bool CheckTyN(unsigned N, const LegalityQuery &Query,
64 std::initializer_list<LLT> SupportedValues) {
65 return llvm::is_contained(SupportedValues, Query.Types[N]);
68 MipsLegalizerInfo::MipsLegalizerInfo(const MipsSubtarget &ST) {
69 using namespace TargetOpcode;
71 const LLT s1 = LLT::scalar(1);
72 const LLT s8 = LLT::scalar(8);
73 const LLT s16 = LLT::scalar(16);
74 const LLT s32 = LLT::scalar(32);
75 const LLT s64 = LLT::scalar(64);
76 const LLT v16s8 = LLT::fixed_vector(16, 8);
77 const LLT v8s16 = LLT::fixed_vector(8, 16);
78 const LLT v4s32 = LLT::fixed_vector(4, 32);
79 const LLT v2s64 = LLT::fixed_vector(2, 64);
80 const LLT p0 = LLT::pointer(0, 32);
82 getActionDefinitionsBuilder({G_ADD, G_SUB, G_MUL})
83 .legalIf([=, &ST](const LegalityQuery &Query) {
84 if (CheckTyN(0, Query, {s32}))
85 return true;
86 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
87 return true;
88 return false;
90 .clampScalar(0, s32, s32);
92 getActionDefinitionsBuilder({G_UADDO, G_UADDE, G_USUBO, G_USUBE, G_UMULO})
93 .lowerFor({{s32, s1}});
95 getActionDefinitionsBuilder(G_UMULH)
96 .legalFor({s32})
97 .maxScalar(0, s32);
99 // MIPS32r6 does not have alignment restrictions for memory access.
100 // For MIPS32r5 and older memory access must be naturally-aligned i.e. aligned
101 // to at least a multiple of its own size. There is however a two instruction
102 // combination that performs 4 byte unaligned access (lwr/lwl and swl/swr)
103 // therefore 4 byte load and store are legal and will use NoAlignRequirements.
104 bool NoAlignRequirements = true;
106 getActionDefinitionsBuilder({G_LOAD, G_STORE})
107 .legalIf([=, &ST](const LegalityQuery &Query) {
108 if (CheckTy0Ty1MemSizeAlign(
109 Query, {{s32, p0, 8, NoAlignRequirements},
110 {s32, p0, 16, ST.systemSupportsUnalignedAccess()},
111 {s32, p0, 32, NoAlignRequirements},
112 {p0, p0, 32, NoAlignRequirements},
113 {s64, p0, 64, ST.systemSupportsUnalignedAccess()}}))
114 return true;
115 if (ST.hasMSA() && CheckTy0Ty1MemSizeAlign(
116 Query, {{v16s8, p0, 128, NoAlignRequirements},
117 {v8s16, p0, 128, NoAlignRequirements},
118 {v4s32, p0, 128, NoAlignRequirements},
119 {v2s64, p0, 128, NoAlignRequirements}}))
120 return true;
121 return false;
123 // Custom lower scalar memory access, up to 8 bytes, for:
124 // - non-power-of-2 MemSizes
125 // - unaligned 2 or 8 byte MemSizes for MIPS32r5 and older
126 .customIf([=, &ST](const LegalityQuery &Query) {
127 if (!Query.Types[0].isScalar() || Query.Types[1] != p0 ||
128 Query.Types[0] == s1)
129 return false;
131 unsigned Size = Query.Types[0].getSizeInBits();
132 unsigned QueryMemSize = Query.MMODescrs[0].MemoryTy.getSizeInBits();
133 assert(QueryMemSize <= Size && "Scalar can't hold MemSize");
135 if (Size > 64 || QueryMemSize > 64)
136 return false;
138 if (!isPowerOf2_64(Query.MMODescrs[0].MemoryTy.getSizeInBits()))
139 return true;
141 if (!ST.systemSupportsUnalignedAccess() &&
142 isUnalignedMemmoryAccess(QueryMemSize,
143 Query.MMODescrs[0].AlignInBits)) {
144 assert(QueryMemSize != 32 && "4 byte load and store are legal");
145 return true;
148 return false;
150 .minScalar(0, s32)
151 .lower();
153 getActionDefinitionsBuilder(G_IMPLICIT_DEF)
154 .legalFor({s32, s64});
156 getActionDefinitionsBuilder(G_UNMERGE_VALUES)
157 .legalFor({{s32, s64}});
159 getActionDefinitionsBuilder(G_MERGE_VALUES)
160 .legalFor({{s64, s32}});
162 getActionDefinitionsBuilder({G_ZEXTLOAD, G_SEXTLOAD})
163 .legalForTypesWithMemDesc({{s32, p0, s8, 8},
164 {s32, p0, s16, 8}})
165 .clampScalar(0, s32, s32);
167 getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
168 .legalIf([](const LegalityQuery &Query) { return false; })
169 .maxScalar(0, s32);
171 getActionDefinitionsBuilder(G_TRUNC)
172 .legalIf([](const LegalityQuery &Query) { return false; })
173 .maxScalar(1, s32);
175 getActionDefinitionsBuilder(G_SELECT)
176 .legalForCartesianProduct({p0, s32, s64}, {s32})
177 .minScalar(0, s32)
178 .minScalar(1, s32);
180 getActionDefinitionsBuilder(G_BRCOND)
181 .legalFor({s32})
182 .minScalar(0, s32);
184 getActionDefinitionsBuilder(G_BRJT)
185 .legalFor({{p0, s32}});
187 getActionDefinitionsBuilder(G_BRINDIRECT)
188 .legalFor({p0});
190 getActionDefinitionsBuilder(G_PHI)
191 .legalFor({p0, s32, s64})
192 .minScalar(0, s32);
194 getActionDefinitionsBuilder({G_AND, G_OR, G_XOR})
195 .legalFor({s32})
196 .clampScalar(0, s32, s32);
198 getActionDefinitionsBuilder({G_SDIV, G_SREM, G_UDIV, G_UREM})
199 .legalIf([=, &ST](const LegalityQuery &Query) {
200 if (CheckTyN(0, Query, {s32}))
201 return true;
202 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
203 return true;
204 return false;
206 .minScalar(0, s32)
207 .libcallFor({s64});
209 getActionDefinitionsBuilder({G_SHL, G_ASHR, G_LSHR})
210 .legalFor({{s32, s32}})
211 .clampScalar(1, s32, s32)
212 .clampScalar(0, s32, s32);
214 getActionDefinitionsBuilder(G_ICMP)
215 .legalForCartesianProduct({s32}, {s32, p0})
216 .clampScalar(1, s32, s32)
217 .minScalar(0, s32);
219 getActionDefinitionsBuilder(G_CONSTANT)
220 .legalFor({s32})
221 .clampScalar(0, s32, s32);
223 getActionDefinitionsBuilder({G_PTR_ADD, G_INTTOPTR})
224 .legalFor({{p0, s32}});
226 getActionDefinitionsBuilder(G_PTRTOINT)
227 .legalFor({{s32, p0}});
229 getActionDefinitionsBuilder(G_FRAME_INDEX)
230 .legalFor({p0});
232 getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE})
233 .legalFor({p0});
235 getActionDefinitionsBuilder(G_DYN_STACKALLOC)
236 .lowerFor({{p0, s32}});
238 getActionDefinitionsBuilder(G_VASTART)
239 .legalFor({p0});
241 getActionDefinitionsBuilder(G_BSWAP)
242 .legalIf([=, &ST](const LegalityQuery &Query) {
243 if (ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
244 return true;
245 return false;
247 .lowerIf([=, &ST](const LegalityQuery &Query) {
248 if (!ST.hasMips32r2() && CheckTyN(0, Query, {s32}))
249 return true;
250 return false;
252 .maxScalar(0, s32);
254 getActionDefinitionsBuilder(G_BITREVERSE)
255 .lowerFor({s32})
256 .maxScalar(0, s32);
258 getActionDefinitionsBuilder(G_CTLZ)
259 .legalFor({{s32, s32}})
260 .maxScalar(0, s32)
261 .maxScalar(1, s32);
262 getActionDefinitionsBuilder(G_CTLZ_ZERO_UNDEF)
263 .lowerFor({{s32, s32}});
265 getActionDefinitionsBuilder(G_CTTZ)
266 .lowerFor({{s32, s32}})
267 .maxScalar(0, s32)
268 .maxScalar(1, s32);
269 getActionDefinitionsBuilder(G_CTTZ_ZERO_UNDEF)
270 .lowerFor({{s32, s32}, {s64, s64}});
272 getActionDefinitionsBuilder(G_CTPOP)
273 .lowerFor({{s32, s32}})
274 .clampScalar(0, s32, s32)
275 .clampScalar(1, s32, s32);
277 // FP instructions
278 getActionDefinitionsBuilder(G_FCONSTANT)
279 .legalFor({s32, s64});
281 getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FABS, G_FSQRT})
282 .legalIf([=, &ST](const LegalityQuery &Query) {
283 if (CheckTyN(0, Query, {s32, s64}))
284 return true;
285 if (ST.hasMSA() && CheckTyN(0, Query, {v16s8, v8s16, v4s32, v2s64}))
286 return true;
287 return false;
290 getActionDefinitionsBuilder(G_FCMP)
291 .legalFor({{s32, s32}, {s32, s64}})
292 .minScalar(0, s32);
294 getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
295 .libcallFor({s32, s64});
297 getActionDefinitionsBuilder(G_FPEXT)
298 .legalFor({{s64, s32}});
300 getActionDefinitionsBuilder(G_FPTRUNC)
301 .legalFor({{s32, s64}});
303 // FP to int conversion instructions
304 getActionDefinitionsBuilder(G_FPTOSI)
305 .legalForCartesianProduct({s32}, {s64, s32})
306 .libcallForCartesianProduct({s64}, {s64, s32})
307 .minScalar(0, s32);
309 getActionDefinitionsBuilder(G_FPTOUI)
310 .libcallForCartesianProduct({s64}, {s64, s32})
311 .lowerForCartesianProduct({s32}, {s64, s32})
312 .minScalar(0, s32);
314 // Int to FP conversion instructions
315 getActionDefinitionsBuilder(G_SITOFP)
316 .legalForCartesianProduct({s64, s32}, {s32})
317 .libcallForCartesianProduct({s64, s32}, {s64})
318 .minScalar(1, s32);
320 getActionDefinitionsBuilder(G_UITOFP)
321 .libcallForCartesianProduct({s64, s32}, {s64})
322 .customForCartesianProduct({s64, s32}, {s32})
323 .minScalar(1, s32);
325 getActionDefinitionsBuilder(G_SEXT_INREG).lower();
327 getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
329 getLegacyLegalizerInfo().computeTables();
330 verify(*ST.getInstrInfo());
333 bool MipsLegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
334 MachineInstr &MI) const {
335 using namespace TargetOpcode;
337 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
338 MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
340 const LLT s32 = LLT::scalar(32);
341 const LLT s64 = LLT::scalar(64);
343 switch (MI.getOpcode()) {
344 case G_LOAD:
345 case G_STORE: {
346 unsigned MemSize = (**MI.memoperands_begin()).getSize();
347 Register Val = MI.getOperand(0).getReg();
348 unsigned Size = MRI.getType(Val).getSizeInBits();
350 MachineMemOperand *MMOBase = *MI.memoperands_begin();
352 assert(MemSize <= 8 && "MemSize is too large");
353 assert(Size <= 64 && "Scalar size is too large");
355 // Split MemSize into two, P2HalfMemSize is largest power of two smaller
356 // then MemSize. e.g. 8 = 4 + 4 , 6 = 4 + 2, 3 = 2 + 1.
357 unsigned P2HalfMemSize, RemMemSize;
358 if (isPowerOf2_64(MemSize)) {
359 P2HalfMemSize = RemMemSize = MemSize / 2;
360 } else {
361 P2HalfMemSize = 1 << Log2_32(MemSize);
362 RemMemSize = MemSize - P2HalfMemSize;
365 Register BaseAddr = MI.getOperand(1).getReg();
366 LLT PtrTy = MRI.getType(BaseAddr);
367 MachineFunction &MF = MIRBuilder.getMF();
369 auto P2HalfMemOp = MF.getMachineMemOperand(MMOBase, 0, P2HalfMemSize);
370 auto RemMemOp = MF.getMachineMemOperand(MMOBase, P2HalfMemSize, RemMemSize);
372 if (MI.getOpcode() == G_STORE) {
373 // Widen Val to s32 or s64 in order to create legal G_LSHR or G_UNMERGE.
374 if (Size < 32)
375 Val = MIRBuilder.buildAnyExt(s32, Val).getReg(0);
376 if (Size > 32 && Size < 64)
377 Val = MIRBuilder.buildAnyExt(s64, Val).getReg(0);
379 auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
380 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
382 if (MI.getOpcode() == G_STORE && MemSize <= 4) {
383 MIRBuilder.buildStore(Val, BaseAddr, *P2HalfMemOp);
384 auto C_P2Half_InBits = MIRBuilder.buildConstant(s32, P2HalfMemSize * 8);
385 auto Shift = MIRBuilder.buildLShr(s32, Val, C_P2Half_InBits);
386 MIRBuilder.buildStore(Shift, Addr, *RemMemOp);
387 } else {
388 auto Unmerge = MIRBuilder.buildUnmerge(s32, Val);
389 MIRBuilder.buildStore(Unmerge.getReg(0), BaseAddr, *P2HalfMemOp);
390 MIRBuilder.buildStore(Unmerge.getReg(1), Addr, *RemMemOp);
394 if (MI.getOpcode() == G_LOAD) {
396 if (MemSize <= 4) {
397 // This is anyextending load, use 4 byte lwr/lwl.
398 auto *Load4MMO = MF.getMachineMemOperand(MMOBase, 0, 4);
400 if (Size == 32)
401 MIRBuilder.buildLoad(Val, BaseAddr, *Load4MMO);
402 else {
403 auto Load = MIRBuilder.buildLoad(s32, BaseAddr, *Load4MMO);
404 MIRBuilder.buildTrunc(Val, Load.getReg(0));
407 } else {
408 auto C_P2HalfMemSize = MIRBuilder.buildConstant(s32, P2HalfMemSize);
409 auto Addr = MIRBuilder.buildPtrAdd(PtrTy, BaseAddr, C_P2HalfMemSize);
411 auto Load_P2Half = MIRBuilder.buildLoad(s32, BaseAddr, *P2HalfMemOp);
412 auto Load_Rem = MIRBuilder.buildLoad(s32, Addr, *RemMemOp);
414 if (Size == 64)
415 MIRBuilder.buildMergeLikeInstr(Val, {Load_P2Half, Load_Rem});
416 else {
417 auto Merge =
418 MIRBuilder.buildMergeLikeInstr(s64, {Load_P2Half, Load_Rem});
419 MIRBuilder.buildTrunc(Val, Merge);
423 MI.eraseFromParent();
424 break;
426 case G_UITOFP: {
427 Register Dst = MI.getOperand(0).getReg();
428 Register Src = MI.getOperand(1).getReg();
429 LLT DstTy = MRI.getType(Dst);
430 LLT SrcTy = MRI.getType(Src);
432 if (SrcTy != s32)
433 return false;
434 if (DstTy != s32 && DstTy != s64)
435 return false;
437 // Let 0xABCDEFGH be given unsigned in MI.getOperand(1). First let's convert
438 // unsigned to double. Mantissa has 52 bits so we use following trick:
439 // First make floating point bit mask 0x43300000ABCDEFGH.
440 // Mask represents 2^52 * 0x1.00000ABCDEFGH i.e. 0x100000ABCDEFGH.0 .
441 // Next, subtract 2^52 * 0x1.0000000000000 i.e. 0x10000000000000.0 from it.
442 // Done. Trunc double to float if needed.
444 auto C_HiMask = MIRBuilder.buildConstant(s32, UINT32_C(0x43300000));
445 auto Bitcast =
446 MIRBuilder.buildMergeLikeInstr(s64, {Src, C_HiMask.getReg(0)});
448 MachineInstrBuilder TwoP52FP = MIRBuilder.buildFConstant(
449 s64, llvm::bit_cast<double>(UINT64_C(0x4330000000000000)));
451 if (DstTy == s64)
452 MIRBuilder.buildFSub(Dst, Bitcast, TwoP52FP);
453 else {
454 MachineInstrBuilder ResF64 = MIRBuilder.buildFSub(s64, Bitcast, TwoP52FP);
455 MIRBuilder.buildFPTrunc(Dst, ResF64);
458 MI.eraseFromParent();
459 break;
461 default:
462 return false;
465 return true;
468 static bool SelectMSA3OpIntrinsic(MachineInstr &MI, unsigned Opcode,
469 MachineIRBuilder &MIRBuilder,
470 const MipsSubtarget &ST) {
471 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
472 if (!MIRBuilder.buildInstr(Opcode)
473 .add(MI.getOperand(0))
474 .add(MI.getOperand(2))
475 .add(MI.getOperand(3))
476 .constrainAllUses(MIRBuilder.getTII(), *ST.getRegisterInfo(),
477 *ST.getRegBankInfo()))
478 return false;
479 MI.eraseFromParent();
480 return true;
483 static bool MSA3OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
484 MachineIRBuilder &MIRBuilder,
485 const MipsSubtarget &ST) {
486 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
487 MIRBuilder.buildInstr(Opcode)
488 .add(MI.getOperand(0))
489 .add(MI.getOperand(2))
490 .add(MI.getOperand(3));
491 MI.eraseFromParent();
492 return true;
495 static bool MSA2OpIntrinsicToGeneric(MachineInstr &MI, unsigned Opcode,
496 MachineIRBuilder &MIRBuilder,
497 const MipsSubtarget &ST) {
498 assert(ST.hasMSA() && "MSA intrinsic not supported on target without MSA.");
499 MIRBuilder.buildInstr(Opcode)
500 .add(MI.getOperand(0))
501 .add(MI.getOperand(2));
502 MI.eraseFromParent();
503 return true;
506 bool MipsLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
507 MachineInstr &MI) const {
508 MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
509 const MipsSubtarget &ST = MI.getMF()->getSubtarget<MipsSubtarget>();
510 const MipsInstrInfo &TII = *ST.getInstrInfo();
511 const MipsRegisterInfo &TRI = *ST.getRegisterInfo();
512 const RegisterBankInfo &RBI = *ST.getRegBankInfo();
514 switch (cast<GIntrinsic>(MI).getIntrinsicID()) {
515 case Intrinsic::trap: {
516 MachineInstr *Trap = MIRBuilder.buildInstr(Mips::TRAP);
517 MI.eraseFromParent();
518 return constrainSelectedInstRegOperands(*Trap, TII, TRI, RBI);
520 case Intrinsic::vacopy: {
521 MachinePointerInfo MPO;
522 LLT PtrTy = LLT::pointer(0, 32);
523 auto Tmp =
524 MIRBuilder.buildLoad(PtrTy, MI.getOperand(2),
525 *MI.getMF()->getMachineMemOperand(
526 MPO, MachineMemOperand::MOLoad, PtrTy, Align(4)));
527 MIRBuilder.buildStore(Tmp, MI.getOperand(1),
528 *MI.getMF()->getMachineMemOperand(
529 MPO, MachineMemOperand::MOStore, PtrTy, Align(4)));
530 MI.eraseFromParent();
531 return true;
533 case Intrinsic::mips_addv_b:
534 case Intrinsic::mips_addv_h:
535 case Intrinsic::mips_addv_w:
536 case Intrinsic::mips_addv_d:
537 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_ADD, MIRBuilder, ST);
538 case Intrinsic::mips_addvi_b:
539 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_B, MIRBuilder, ST);
540 case Intrinsic::mips_addvi_h:
541 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_H, MIRBuilder, ST);
542 case Intrinsic::mips_addvi_w:
543 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_W, MIRBuilder, ST);
544 case Intrinsic::mips_addvi_d:
545 return SelectMSA3OpIntrinsic(MI, Mips::ADDVI_D, MIRBuilder, ST);
546 case Intrinsic::mips_subv_b:
547 case Intrinsic::mips_subv_h:
548 case Intrinsic::mips_subv_w:
549 case Intrinsic::mips_subv_d:
550 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SUB, MIRBuilder, ST);
551 case Intrinsic::mips_subvi_b:
552 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_B, MIRBuilder, ST);
553 case Intrinsic::mips_subvi_h:
554 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_H, MIRBuilder, ST);
555 case Intrinsic::mips_subvi_w:
556 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_W, MIRBuilder, ST);
557 case Intrinsic::mips_subvi_d:
558 return SelectMSA3OpIntrinsic(MI, Mips::SUBVI_D, MIRBuilder, ST);
559 case Intrinsic::mips_mulv_b:
560 case Intrinsic::mips_mulv_h:
561 case Intrinsic::mips_mulv_w:
562 case Intrinsic::mips_mulv_d:
563 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_MUL, MIRBuilder, ST);
564 case Intrinsic::mips_div_s_b:
565 case Intrinsic::mips_div_s_h:
566 case Intrinsic::mips_div_s_w:
567 case Intrinsic::mips_div_s_d:
568 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SDIV, MIRBuilder, ST);
569 case Intrinsic::mips_mod_s_b:
570 case Intrinsic::mips_mod_s_h:
571 case Intrinsic::mips_mod_s_w:
572 case Intrinsic::mips_mod_s_d:
573 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_SREM, MIRBuilder, ST);
574 case Intrinsic::mips_div_u_b:
575 case Intrinsic::mips_div_u_h:
576 case Intrinsic::mips_div_u_w:
577 case Intrinsic::mips_div_u_d:
578 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UDIV, MIRBuilder, ST);
579 case Intrinsic::mips_mod_u_b:
580 case Intrinsic::mips_mod_u_h:
581 case Intrinsic::mips_mod_u_w:
582 case Intrinsic::mips_mod_u_d:
583 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_UREM, MIRBuilder, ST);
584 case Intrinsic::mips_fadd_w:
585 case Intrinsic::mips_fadd_d:
586 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FADD, MIRBuilder, ST);
587 case Intrinsic::mips_fsub_w:
588 case Intrinsic::mips_fsub_d:
589 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FSUB, MIRBuilder, ST);
590 case Intrinsic::mips_fmul_w:
591 case Intrinsic::mips_fmul_d:
592 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FMUL, MIRBuilder, ST);
593 case Intrinsic::mips_fdiv_w:
594 case Intrinsic::mips_fdiv_d:
595 return MSA3OpIntrinsicToGeneric(MI, TargetOpcode::G_FDIV, MIRBuilder, ST);
596 case Intrinsic::mips_fmax_a_w:
597 return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_W, MIRBuilder, ST);
598 case Intrinsic::mips_fmax_a_d:
599 return SelectMSA3OpIntrinsic(MI, Mips::FMAX_A_D, MIRBuilder, ST);
600 case Intrinsic::mips_fsqrt_w:
601 return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
602 case Intrinsic::mips_fsqrt_d:
603 return MSA2OpIntrinsicToGeneric(MI, TargetOpcode::G_FSQRT, MIRBuilder, ST);
604 default:
605 break;
607 return true;