1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
26 void MachineIRBuilder::setMF(MachineFunction
&MF
) {
29 State
.MRI
= &MF
.getRegInfo();
30 State
.TII
= MF
.getSubtarget().getInstrInfo();
31 State
.DL
= DebugLoc();
32 State
.II
= MachineBasicBlock::iterator();
33 State
.Observer
= nullptr;
36 void MachineIRBuilder::setMBB(MachineBasicBlock
&MBB
) {
39 assert(&getMF() == MBB
.getParent() &&
40 "Basic block is in a different function");
43 void MachineIRBuilder::setInstr(MachineInstr
&MI
) {
44 assert(MI
.getParent() && "Instruction is not part of a basic block");
45 setMBB(*MI
.getParent());
46 State
.II
= MI
.getIterator();
49 void MachineIRBuilder::setCSEInfo(GISelCSEInfo
*Info
) { State
.CSEInfo
= Info
; }
51 void MachineIRBuilder::setInsertPt(MachineBasicBlock
&MBB
,
52 MachineBasicBlock::iterator II
) {
53 assert(MBB
.getParent() == &getMF() &&
54 "Basic block is in a different function");
59 void MachineIRBuilder::recordInsertion(MachineInstr
*InsertedInstr
) const {
61 State
.Observer
->createdInstr(*InsertedInstr
);
64 void MachineIRBuilder::setChangeObserver(GISelChangeObserver
&Observer
) {
65 State
.Observer
= &Observer
;
68 void MachineIRBuilder::stopObservingChanges() { State
.Observer
= nullptr; }
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
74 MachineInstrBuilder
MachineIRBuilder::buildInstr(unsigned Opcode
) {
75 return insertInstr(buildInstrNoInsert(Opcode
));
78 MachineInstrBuilder
MachineIRBuilder::buildInstrNoInsert(unsigned Opcode
) {
79 MachineInstrBuilder MIB
= BuildMI(getMF(), getDL(), getTII().get(Opcode
));
83 MachineInstrBuilder
MachineIRBuilder::insertInstr(MachineInstrBuilder MIB
) {
84 getMBB().insert(getInsertPt(), MIB
);
90 MachineIRBuilder::buildDirectDbgValue(Register Reg
, const MDNode
*Variable
,
92 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
93 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
95 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
96 "Expected inlined-at fields to agree");
97 return insertInstr(BuildMI(getMF(), getDL(),
98 getTII().get(TargetOpcode::DBG_VALUE
),
99 /*IsIndirect*/ false, Reg
, Variable
, Expr
));
103 MachineIRBuilder::buildIndirectDbgValue(Register Reg
, const MDNode
*Variable
,
104 const MDNode
*Expr
) {
105 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
106 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
108 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
109 "Expected inlined-at fields to agree");
110 return insertInstr(BuildMI(getMF(), getDL(),
111 getTII().get(TargetOpcode::DBG_VALUE
),
112 /*IsIndirect*/ true, Reg
, Variable
, Expr
));
115 MachineInstrBuilder
MachineIRBuilder::buildFIDbgValue(int FI
,
116 const MDNode
*Variable
,
117 const MDNode
*Expr
) {
118 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
119 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
121 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
122 "Expected inlined-at fields to agree");
123 return buildInstr(TargetOpcode::DBG_VALUE
)
126 .addMetadata(Variable
)
130 MachineInstrBuilder
MachineIRBuilder::buildConstDbgValue(const Constant
&C
,
131 const MDNode
*Variable
,
132 const MDNode
*Expr
) {
133 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
134 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
136 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
137 "Expected inlined-at fields to agree");
138 auto MIB
= buildInstr(TargetOpcode::DBG_VALUE
);
139 if (auto *CI
= dyn_cast
<ConstantInt
>(&C
)) {
140 if (CI
->getBitWidth() > 64)
143 MIB
.addImm(CI
->getZExtValue());
144 } else if (auto *CFP
= dyn_cast
<ConstantFP
>(&C
)) {
147 // Insert %noreg if we didn't find a usable constant and had to drop it.
151 return MIB
.addImm(0).addMetadata(Variable
).addMetadata(Expr
);
154 MachineInstrBuilder
MachineIRBuilder::buildDbgLabel(const MDNode
*Label
) {
155 assert(isa
<DILabel
>(Label
) && "not a label");
156 assert(cast
<DILabel
>(Label
)->isValidLocationForIntrinsic(State
.DL
) &&
157 "Expected inlined-at fields to agree");
158 auto MIB
= buildInstr(TargetOpcode::DBG_LABEL
);
160 return MIB
.addMetadata(Label
);
163 MachineInstrBuilder
MachineIRBuilder::buildDynStackAlloc(const DstOp
&Res
,
166 assert(Res
.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
167 auto MIB
= buildInstr(TargetOpcode::G_DYN_STACKALLOC
);
168 Res
.addDefToMIB(*getMRI(), MIB
);
169 Size
.addSrcToMIB(MIB
);
174 MachineInstrBuilder
MachineIRBuilder::buildFrameIndex(const DstOp
&Res
,
176 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
177 auto MIB
= buildInstr(TargetOpcode::G_FRAME_INDEX
);
178 Res
.addDefToMIB(*getMRI(), MIB
);
179 MIB
.addFrameIndex(Idx
);
183 MachineInstrBuilder
MachineIRBuilder::buildGlobalValue(const DstOp
&Res
,
184 const GlobalValue
*GV
) {
185 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
186 assert(Res
.getLLTTy(*getMRI()).getAddressSpace() ==
187 GV
->getType()->getAddressSpace() &&
188 "address space mismatch");
190 auto MIB
= buildInstr(TargetOpcode::G_GLOBAL_VALUE
);
191 Res
.addDefToMIB(*getMRI(), MIB
);
192 MIB
.addGlobalAddress(GV
);
196 MachineInstrBuilder
MachineIRBuilder::buildJumpTable(const LLT PtrTy
,
198 return buildInstr(TargetOpcode::G_JUMP_TABLE
, {PtrTy
}, {})
199 .addJumpTableIndex(JTI
);
202 void MachineIRBuilder::validateBinaryOp(const LLT
&Res
, const LLT
&Op0
,
204 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
205 assert((Res
== Op0
&& Res
== Op1
) && "type mismatch");
208 void MachineIRBuilder::validateShiftOp(const LLT
&Res
, const LLT
&Op0
,
210 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
211 assert((Res
== Op0
) && "type mismatch");
214 MachineInstrBuilder
MachineIRBuilder::buildGEP(const DstOp
&Res
,
217 assert(Res
.getLLTTy(*getMRI()).isPointer() &&
218 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
219 assert(Op1
.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
221 return buildInstr(TargetOpcode::G_GEP
, {Res
}, {Op0
, Op1
});
224 Optional
<MachineInstrBuilder
>
225 MachineIRBuilder::materializeGEP(Register
&Res
, Register Op0
,
226 const LLT
&ValueTy
, uint64_t Value
) {
227 assert(Res
== 0 && "Res is a result argument");
228 assert(ValueTy
.isScalar() && "invalid offset type");
235 Res
= getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0
));
236 auto Cst
= buildConstant(ValueTy
, Value
);
237 return buildGEP(Res
, Op0
, Cst
.getReg(0));
240 MachineInstrBuilder
MachineIRBuilder::buildPtrMask(const DstOp
&Res
,
243 assert(Res
.getLLTTy(*getMRI()).isPointer() &&
244 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
246 auto MIB
= buildInstr(TargetOpcode::G_PTR_MASK
);
247 Res
.addDefToMIB(*getMRI(), MIB
);
248 Op0
.addSrcToMIB(MIB
);
253 MachineInstrBuilder
MachineIRBuilder::buildBr(MachineBasicBlock
&Dest
) {
254 return buildInstr(TargetOpcode::G_BR
).addMBB(&Dest
);
257 MachineInstrBuilder
MachineIRBuilder::buildBrIndirect(Register Tgt
) {
258 assert(getMRI()->getType(Tgt
).isPointer() && "invalid branch destination");
259 return buildInstr(TargetOpcode::G_BRINDIRECT
).addUse(Tgt
);
262 MachineInstrBuilder
MachineIRBuilder::buildBrJT(Register TablePtr
,
265 assert(getMRI()->getType(TablePtr
).isPointer() &&
266 "Table reg must be a pointer");
267 return buildInstr(TargetOpcode::G_BRJT
)
269 .addJumpTableIndex(JTI
)
273 MachineInstrBuilder
MachineIRBuilder::buildCopy(const DstOp
&Res
,
275 return buildInstr(TargetOpcode::COPY
, Res
, Op
);
278 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
279 const ConstantInt
&Val
) {
280 LLT Ty
= Res
.getLLTTy(*getMRI());
281 LLT EltTy
= Ty
.getScalarType();
282 assert(EltTy
.getScalarSizeInBits() == Val
.getBitWidth() &&
283 "creating constant with the wrong size");
286 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
)
287 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
289 return buildSplatVector(Res
, Const
);
292 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
);
293 Res
.addDefToMIB(*getMRI(), Const
);
298 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
300 auto IntN
= IntegerType::get(getMF().getFunction().getContext(),
301 Res
.getLLTTy(*getMRI()).getScalarSizeInBits());
302 ConstantInt
*CI
= ConstantInt::get(IntN
, Val
, true);
303 return buildConstant(Res
, *CI
);
306 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
307 const ConstantFP
&Val
) {
308 LLT Ty
= Res
.getLLTTy(*getMRI());
309 LLT EltTy
= Ty
.getScalarType();
311 assert(APFloat::getSizeInBits(Val
.getValueAPF().getSemantics())
312 == EltTy
.getSizeInBits() &&
313 "creating fconstant with the wrong size");
315 assert(!Ty
.isPointer() && "invalid operand type");
318 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
)
319 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
322 return buildSplatVector(Res
, Const
);
325 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
);
326 Res
.addDefToMIB(*getMRI(), Const
);
327 Const
.addFPImm(&Val
);
331 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
333 ConstantInt
*CI
= ConstantInt::get(getMF().getFunction().getContext(), Val
);
334 return buildConstant(Res
, *CI
);
337 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
339 LLT DstTy
= Res
.getLLTTy(*getMRI());
340 auto &Ctx
= getMF().getFunction().getContext();
342 ConstantFP::get(Ctx
, getAPFloatFromSize(Val
, DstTy
.getScalarSizeInBits()));
343 return buildFConstant(Res
, *CFP
);
346 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
347 const APFloat
&Val
) {
348 auto &Ctx
= getMF().getFunction().getContext();
349 auto *CFP
= ConstantFP::get(Ctx
, Val
);
350 return buildFConstant(Res
, *CFP
);
353 MachineInstrBuilder
MachineIRBuilder::buildBrCond(Register Tst
,
354 MachineBasicBlock
&Dest
) {
355 assert(getMRI()->getType(Tst
).isScalar() && "invalid operand type");
357 return buildInstr(TargetOpcode::G_BRCOND
).addUse(Tst
).addMBB(&Dest
);
360 MachineInstrBuilder
MachineIRBuilder::buildLoad(const DstOp
&Res
,
362 MachineMemOperand
&MMO
) {
363 return buildLoadInstr(TargetOpcode::G_LOAD
, Res
, Addr
, MMO
);
366 MachineInstrBuilder
MachineIRBuilder::buildLoadInstr(unsigned Opcode
,
369 MachineMemOperand
&MMO
) {
370 assert(Res
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
371 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
373 auto MIB
= buildInstr(Opcode
);
374 Res
.addDefToMIB(*getMRI(), MIB
);
375 Addr
.addSrcToMIB(MIB
);
376 MIB
.addMemOperand(&MMO
);
380 MachineInstrBuilder
MachineIRBuilder::buildStore(const SrcOp
&Val
,
382 MachineMemOperand
&MMO
) {
383 assert(Val
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
384 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
386 auto MIB
= buildInstr(TargetOpcode::G_STORE
);
387 Val
.addSrcToMIB(MIB
);
388 Addr
.addSrcToMIB(MIB
);
389 MIB
.addMemOperand(&MMO
);
393 MachineInstrBuilder
MachineIRBuilder::buildUAddo(const DstOp
&Res
,
394 const DstOp
&CarryOut
,
397 return buildInstr(TargetOpcode::G_UADDO
, {Res
, CarryOut
}, {Op0
, Op1
});
400 MachineInstrBuilder
MachineIRBuilder::buildUAdde(const DstOp
&Res
,
401 const DstOp
&CarryOut
,
404 const SrcOp
&CarryIn
) {
405 return buildInstr(TargetOpcode::G_UADDE
, {Res
, CarryOut
},
406 {Op0
, Op1
, CarryIn
});
409 MachineInstrBuilder
MachineIRBuilder::buildAnyExt(const DstOp
&Res
,
411 return buildInstr(TargetOpcode::G_ANYEXT
, Res
, Op
);
414 MachineInstrBuilder
MachineIRBuilder::buildSExt(const DstOp
&Res
,
416 return buildInstr(TargetOpcode::G_SEXT
, Res
, Op
);
419 MachineInstrBuilder
MachineIRBuilder::buildZExt(const DstOp
&Res
,
421 return buildInstr(TargetOpcode::G_ZEXT
, Res
, Op
);
424 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec
, bool IsFP
) const {
425 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
426 switch (TLI
->getBooleanContents(IsVec
, IsFP
)) {
427 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
428 return TargetOpcode::G_SEXT
;
429 case TargetLoweringBase::ZeroOrOneBooleanContent
:
430 return TargetOpcode::G_ZEXT
;
432 return TargetOpcode::G_ANYEXT
;
436 MachineInstrBuilder
MachineIRBuilder::buildBoolExt(const DstOp
&Res
,
439 unsigned ExtOp
= getBoolExtOp(getMRI()->getType(Op
.getReg()).isVector(), IsFP
);
440 return buildInstr(ExtOp
, Res
, Op
);
443 MachineInstrBuilder
MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc
,
446 assert((TargetOpcode::G_ANYEXT
== ExtOpc
|| TargetOpcode::G_ZEXT
== ExtOpc
||
447 TargetOpcode::G_SEXT
== ExtOpc
) &&
448 "Expecting Extending Opc");
449 assert(Res
.getLLTTy(*getMRI()).isScalar() ||
450 Res
.getLLTTy(*getMRI()).isVector());
451 assert(Res
.getLLTTy(*getMRI()).isScalar() ==
452 Op
.getLLTTy(*getMRI()).isScalar());
454 unsigned Opcode
= TargetOpcode::COPY
;
455 if (Res
.getLLTTy(*getMRI()).getSizeInBits() >
456 Op
.getLLTTy(*getMRI()).getSizeInBits())
458 else if (Res
.getLLTTy(*getMRI()).getSizeInBits() <
459 Op
.getLLTTy(*getMRI()).getSizeInBits())
460 Opcode
= TargetOpcode::G_TRUNC
;
462 assert(Res
.getLLTTy(*getMRI()) == Op
.getLLTTy(*getMRI()));
464 return buildInstr(Opcode
, Res
, Op
);
467 MachineInstrBuilder
MachineIRBuilder::buildSExtOrTrunc(const DstOp
&Res
,
469 return buildExtOrTrunc(TargetOpcode::G_SEXT
, Res
, Op
);
472 MachineInstrBuilder
MachineIRBuilder::buildZExtOrTrunc(const DstOp
&Res
,
474 return buildExtOrTrunc(TargetOpcode::G_ZEXT
, Res
, Op
);
477 MachineInstrBuilder
MachineIRBuilder::buildAnyExtOrTrunc(const DstOp
&Res
,
479 return buildExtOrTrunc(TargetOpcode::G_ANYEXT
, Res
, Op
);
482 MachineInstrBuilder
MachineIRBuilder::buildCast(const DstOp
&Dst
,
484 LLT SrcTy
= Src
.getLLTTy(*getMRI());
485 LLT DstTy
= Dst
.getLLTTy(*getMRI());
487 return buildCopy(Dst
, Src
);
490 if (SrcTy
.isPointer() && DstTy
.isScalar())
491 Opcode
= TargetOpcode::G_PTRTOINT
;
492 else if (DstTy
.isPointer() && SrcTy
.isScalar())
493 Opcode
= TargetOpcode::G_INTTOPTR
;
495 assert(!SrcTy
.isPointer() && !DstTy
.isPointer() && "n G_ADDRCAST yet");
496 Opcode
= TargetOpcode::G_BITCAST
;
499 return buildInstr(Opcode
, Dst
, Src
);
502 MachineInstrBuilder
MachineIRBuilder::buildExtract(const DstOp
&Dst
,
505 LLT SrcTy
= Src
.getLLTTy(*getMRI());
506 LLT DstTy
= Dst
.getLLTTy(*getMRI());
509 assert(SrcTy
.isValid() && "invalid operand type");
510 assert(DstTy
.isValid() && "invalid operand type");
511 assert(Index
+ DstTy
.getSizeInBits() <= SrcTy
.getSizeInBits() &&
512 "extracting off end of register");
515 if (DstTy
.getSizeInBits() == SrcTy
.getSizeInBits()) {
516 assert(Index
== 0 && "insertion past the end of a register");
517 return buildCast(Dst
, Src
);
520 auto Extract
= buildInstr(TargetOpcode::G_EXTRACT
);
521 Dst
.addDefToMIB(*getMRI(), Extract
);
522 Src
.addSrcToMIB(Extract
);
523 Extract
.addImm(Index
);
527 void MachineIRBuilder::buildSequence(Register Res
, ArrayRef
<Register
> Ops
,
528 ArrayRef
<uint64_t> Indices
) {
530 assert(Ops
.size() == Indices
.size() && "incompatible args");
531 assert(!Ops
.empty() && "invalid trivial sequence");
532 assert(std::is_sorted(Indices
.begin(), Indices
.end()) &&
533 "sequence offsets must be in ascending order");
535 assert(getMRI()->getType(Res
).isValid() && "invalid operand type");
537 assert(getMRI()->getType(Op
).isValid() && "invalid operand type");
540 LLT ResTy
= getMRI()->getType(Res
);
541 LLT OpTy
= getMRI()->getType(Ops
[0]);
542 unsigned OpSize
= OpTy
.getSizeInBits();
543 bool MaybeMerge
= true;
544 for (unsigned i
= 0; i
< Ops
.size(); ++i
) {
545 if (getMRI()->getType(Ops
[i
]) != OpTy
|| Indices
[i
] != i
* OpSize
) {
551 if (MaybeMerge
&& Ops
.size() * OpSize
== ResTy
.getSizeInBits()) {
552 buildMerge(Res
, Ops
);
556 Register ResIn
= getMRI()->createGenericVirtualRegister(ResTy
);
559 for (unsigned i
= 0; i
< Ops
.size(); ++i
) {
560 Register ResOut
= i
+ 1 == Ops
.size()
562 : getMRI()->createGenericVirtualRegister(ResTy
);
563 buildInsert(ResOut
, ResIn
, Ops
[i
], Indices
[i
]);
568 MachineInstrBuilder
MachineIRBuilder::buildUndef(const DstOp
&Res
) {
569 return buildInstr(TargetOpcode::G_IMPLICIT_DEF
, {Res
}, {});
572 MachineInstrBuilder
MachineIRBuilder::buildMerge(const DstOp
&Res
,
573 ArrayRef
<Register
> Ops
) {
574 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
575 // we need some temporary storage for the DstOp objects. Here we use a
576 // sufficiently large SmallVector to not go through the heap.
577 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
578 assert(TmpVec
.size() > 1);
579 return buildInstr(TargetOpcode::G_MERGE_VALUES
, Res
, TmpVec
);
582 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<LLT
> Res
,
584 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
585 // we need some temporary storage for the DstOp objects. Here we use a
586 // sufficiently large SmallVector to not go through the heap.
587 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
588 assert(TmpVec
.size() > 1);
589 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
592 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(LLT Res
,
594 unsigned NumReg
= Op
.getLLTTy(*getMRI()).getSizeInBits() / Res
.getSizeInBits();
595 SmallVector
<Register
, 8> TmpVec
;
596 for (unsigned I
= 0; I
!= NumReg
; ++I
)
597 TmpVec
.push_back(getMRI()->createGenericVirtualRegister(Res
));
598 return buildUnmerge(TmpVec
, Op
);
601 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<Register
> Res
,
603 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
604 // we need some temporary storage for the DstOp objects. Here we use a
605 // sufficiently large SmallVector to not go through the heap.
606 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
607 assert(TmpVec
.size() > 1);
608 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
611 MachineInstrBuilder
MachineIRBuilder::buildBuildVector(const DstOp
&Res
,
612 ArrayRef
<Register
> Ops
) {
613 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
614 // we need some temporary storage for the DstOp objects. Here we use a
615 // sufficiently large SmallVector to not go through the heap.
616 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
617 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
620 MachineInstrBuilder
MachineIRBuilder::buildSplatVector(const DstOp
&Res
,
622 SmallVector
<SrcOp
, 8> TmpVec(Res
.getLLTTy(*getMRI()).getNumElements(), Src
);
623 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
627 MachineIRBuilder::buildBuildVectorTrunc(const DstOp
&Res
,
628 ArrayRef
<Register
> Ops
) {
629 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
630 // we need some temporary storage for the DstOp objects. Here we use a
631 // sufficiently large SmallVector to not go through the heap.
632 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
633 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC
, Res
, TmpVec
);
637 MachineIRBuilder::buildConcatVectors(const DstOp
&Res
, ArrayRef
<Register
> Ops
) {
638 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
639 // we need some temporary storage for the DstOp objects. Here we use a
640 // sufficiently large SmallVector to not go through the heap.
641 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
642 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, Res
, TmpVec
);
645 MachineInstrBuilder
MachineIRBuilder::buildInsert(Register Res
, Register Src
,
646 Register Op
, unsigned Index
) {
647 assert(Index
+ getMRI()->getType(Op
).getSizeInBits() <=
648 getMRI()->getType(Res
).getSizeInBits() &&
649 "insertion past the end of a register");
651 if (getMRI()->getType(Res
).getSizeInBits() ==
652 getMRI()->getType(Op
).getSizeInBits()) {
653 return buildCast(Res
, Op
);
656 return buildInstr(TargetOpcode::G_INSERT
)
663 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
664 ArrayRef
<Register
> ResultRegs
,
665 bool HasSideEffects
) {
667 buildInstr(HasSideEffects
? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
668 : TargetOpcode::G_INTRINSIC
);
669 for (unsigned ResultReg
: ResultRegs
)
670 MIB
.addDef(ResultReg
);
671 MIB
.addIntrinsicID(ID
);
675 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
676 ArrayRef
<DstOp
> Results
,
677 bool HasSideEffects
) {
679 buildInstr(HasSideEffects
? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
680 : TargetOpcode::G_INTRINSIC
);
681 for (DstOp Result
: Results
)
682 Result
.addDefToMIB(*getMRI(), MIB
);
683 MIB
.addIntrinsicID(ID
);
687 MachineInstrBuilder
MachineIRBuilder::buildTrunc(const DstOp
&Res
,
689 return buildInstr(TargetOpcode::G_TRUNC
, Res
, Op
);
692 MachineInstrBuilder
MachineIRBuilder::buildFPTrunc(const DstOp
&Res
,
694 return buildInstr(TargetOpcode::G_FPTRUNC
, Res
, Op
);
697 MachineInstrBuilder
MachineIRBuilder::buildICmp(CmpInst::Predicate Pred
,
701 return buildInstr(TargetOpcode::G_ICMP
, Res
, {Pred
, Op0
, Op1
});
704 MachineInstrBuilder
MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred
,
708 Optional
<unsigned> Flags
) {
710 return buildInstr(TargetOpcode::G_FCMP
, Res
, {Pred
, Op0
, Op1
}, Flags
);
713 MachineInstrBuilder
MachineIRBuilder::buildSelect(const DstOp
&Res
,
717 Optional
<unsigned> Flags
) {
719 return buildInstr(TargetOpcode::G_SELECT
, {Res
}, {Tst
, Op0
, Op1
}, Flags
);
723 MachineIRBuilder::buildInsertVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
724 const SrcOp
&Elt
, const SrcOp
&Idx
) {
725 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT
, Res
, {Val
, Elt
, Idx
});
729 MachineIRBuilder::buildExtractVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
731 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT
, Res
, {Val
, Idx
});
734 MachineInstrBuilder
MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
735 Register OldValRes
, Register SuccessRes
, Register Addr
, Register CmpVal
,
736 Register NewVal
, MachineMemOperand
&MMO
) {
738 LLT OldValResTy
= getMRI()->getType(OldValRes
);
739 LLT SuccessResTy
= getMRI()->getType(SuccessRes
);
740 LLT AddrTy
= getMRI()->getType(Addr
);
741 LLT CmpValTy
= getMRI()->getType(CmpVal
);
742 LLT NewValTy
= getMRI()->getType(NewVal
);
743 assert(OldValResTy
.isScalar() && "invalid operand type");
744 assert(SuccessResTy
.isScalar() && "invalid operand type");
745 assert(AddrTy
.isPointer() && "invalid operand type");
746 assert(CmpValTy
.isValid() && "invalid operand type");
747 assert(NewValTy
.isValid() && "invalid operand type");
748 assert(OldValResTy
== CmpValTy
&& "type mismatch");
749 assert(OldValResTy
== NewValTy
&& "type mismatch");
752 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS
)
758 .addMemOperand(&MMO
);
762 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes
, Register Addr
,
763 Register CmpVal
, Register NewVal
,
764 MachineMemOperand
&MMO
) {
766 LLT OldValResTy
= getMRI()->getType(OldValRes
);
767 LLT AddrTy
= getMRI()->getType(Addr
);
768 LLT CmpValTy
= getMRI()->getType(CmpVal
);
769 LLT NewValTy
= getMRI()->getType(NewVal
);
770 assert(OldValResTy
.isScalar() && "invalid operand type");
771 assert(AddrTy
.isPointer() && "invalid operand type");
772 assert(CmpValTy
.isValid() && "invalid operand type");
773 assert(NewValTy
.isValid() && "invalid operand type");
774 assert(OldValResTy
== CmpValTy
&& "type mismatch");
775 assert(OldValResTy
== NewValTy
&& "type mismatch");
778 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG
)
783 .addMemOperand(&MMO
);
786 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMW(
787 unsigned Opcode
, const DstOp
&OldValRes
,
788 const SrcOp
&Addr
, const SrcOp
&Val
,
789 MachineMemOperand
&MMO
) {
792 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
793 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
794 LLT ValTy
= Val
.getLLTTy(*getMRI());
795 assert(OldValResTy
.isScalar() && "invalid operand type");
796 assert(AddrTy
.isPointer() && "invalid operand type");
797 assert(ValTy
.isValid() && "invalid operand type");
798 assert(OldValResTy
== ValTy
&& "type mismatch");
799 assert(MMO
.isAtomic() && "not atomic mem operand");
802 auto MIB
= buildInstr(Opcode
);
803 OldValRes
.addDefToMIB(*getMRI(), MIB
);
804 Addr
.addSrcToMIB(MIB
);
805 Val
.addSrcToMIB(MIB
);
806 MIB
.addMemOperand(&MMO
);
811 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes
, Register Addr
,
812 Register Val
, MachineMemOperand
&MMO
) {
813 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG
, OldValRes
, Addr
, Val
,
817 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes
, Register Addr
,
818 Register Val
, MachineMemOperand
&MMO
) {
819 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD
, OldValRes
, Addr
, Val
,
823 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes
, Register Addr
,
824 Register Val
, MachineMemOperand
&MMO
) {
825 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB
, OldValRes
, Addr
, Val
,
829 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes
, Register Addr
,
830 Register Val
, MachineMemOperand
&MMO
) {
831 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND
, OldValRes
, Addr
, Val
,
835 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes
, Register Addr
,
836 Register Val
, MachineMemOperand
&MMO
) {
837 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND
, OldValRes
, Addr
, Val
,
840 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMWOr(Register OldValRes
,
843 MachineMemOperand
&MMO
) {
844 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR
, OldValRes
, Addr
, Val
,
848 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes
, Register Addr
,
849 Register Val
, MachineMemOperand
&MMO
) {
850 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR
, OldValRes
, Addr
, Val
,
854 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes
, Register Addr
,
855 Register Val
, MachineMemOperand
&MMO
) {
856 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX
, OldValRes
, Addr
, Val
,
860 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes
, Register Addr
,
861 Register Val
, MachineMemOperand
&MMO
) {
862 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN
, OldValRes
, Addr
, Val
,
866 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes
, Register Addr
,
867 Register Val
, MachineMemOperand
&MMO
) {
868 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX
, OldValRes
, Addr
, Val
,
872 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes
, Register Addr
,
873 Register Val
, MachineMemOperand
&MMO
) {
874 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN
, OldValRes
, Addr
, Val
,
879 MachineIRBuilder::buildAtomicRMWFAdd(
880 const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
881 MachineMemOperand
&MMO
) {
882 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD
, OldValRes
, Addr
, Val
,
887 MachineIRBuilder::buildAtomicRMWFSub(const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
888 MachineMemOperand
&MMO
) {
889 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB
, OldValRes
, Addr
, Val
,
894 MachineIRBuilder::buildFence(unsigned Ordering
, unsigned Scope
) {
895 return buildInstr(TargetOpcode::G_FENCE
)
901 MachineIRBuilder::buildBlockAddress(Register Res
, const BlockAddress
*BA
) {
903 assert(getMRI()->getType(Res
).isPointer() && "invalid res type");
906 return buildInstr(TargetOpcode::G_BLOCK_ADDR
).addDef(Res
).addBlockAddress(BA
);
909 void MachineIRBuilder::validateTruncExt(const LLT
&DstTy
, const LLT
&SrcTy
,
912 if (DstTy
.isVector()) {
913 assert(SrcTy
.isVector() && "mismatched cast between vector and non-vector");
914 assert(SrcTy
.getNumElements() == DstTy
.getNumElements() &&
915 "different number of elements in a trunc/ext");
917 assert(DstTy
.isScalar() && SrcTy
.isScalar() && "invalid extend/trunc");
920 assert(DstTy
.getSizeInBits() > SrcTy
.getSizeInBits() &&
921 "invalid narrowing extend");
923 assert(DstTy
.getSizeInBits() < SrcTy
.getSizeInBits() &&
924 "invalid widening trunc");
928 void MachineIRBuilder::validateSelectOp(const LLT
&ResTy
, const LLT
&TstTy
,
929 const LLT
&Op0Ty
, const LLT
&Op1Ty
) {
931 assert((ResTy
.isScalar() || ResTy
.isVector() || ResTy
.isPointer()) &&
932 "invalid operand type");
933 assert((ResTy
== Op0Ty
&& ResTy
== Op1Ty
) && "type mismatch");
934 if (ResTy
.isScalar() || ResTy
.isPointer())
935 assert(TstTy
.isScalar() && "type mismatch");
937 assert((TstTy
.isScalar() ||
939 TstTy
.getNumElements() == Op0Ty
.getNumElements())) &&
944 MachineInstrBuilder
MachineIRBuilder::buildInstr(unsigned Opc
,
945 ArrayRef
<DstOp
> DstOps
,
946 ArrayRef
<SrcOp
> SrcOps
,
947 Optional
<unsigned> Flags
) {
951 case TargetOpcode::G_SELECT
: {
952 assert(DstOps
.size() == 1 && "Invalid select");
953 assert(SrcOps
.size() == 3 && "Invalid select");
955 DstOps
[0].getLLTTy(*getMRI()), SrcOps
[0].getLLTTy(*getMRI()),
956 SrcOps
[1].getLLTTy(*getMRI()), SrcOps
[2].getLLTTy(*getMRI()));
959 case TargetOpcode::G_ADD
:
960 case TargetOpcode::G_AND
:
961 case TargetOpcode::G_MUL
:
962 case TargetOpcode::G_OR
:
963 case TargetOpcode::G_SUB
:
964 case TargetOpcode::G_XOR
:
965 case TargetOpcode::G_UDIV
:
966 case TargetOpcode::G_SDIV
:
967 case TargetOpcode::G_UREM
:
968 case TargetOpcode::G_SREM
:
969 case TargetOpcode::G_SMIN
:
970 case TargetOpcode::G_SMAX
:
971 case TargetOpcode::G_UMIN
:
972 case TargetOpcode::G_UMAX
: {
973 // All these are binary ops.
974 assert(DstOps
.size() == 1 && "Invalid Dst");
975 assert(SrcOps
.size() == 2 && "Invalid Srcs");
976 validateBinaryOp(DstOps
[0].getLLTTy(*getMRI()),
977 SrcOps
[0].getLLTTy(*getMRI()),
978 SrcOps
[1].getLLTTy(*getMRI()));
981 case TargetOpcode::G_SHL
:
982 case TargetOpcode::G_ASHR
:
983 case TargetOpcode::G_LSHR
: {
984 assert(DstOps
.size() == 1 && "Invalid Dst");
985 assert(SrcOps
.size() == 2 && "Invalid Srcs");
986 validateShiftOp(DstOps
[0].getLLTTy(*getMRI()),
987 SrcOps
[0].getLLTTy(*getMRI()),
988 SrcOps
[1].getLLTTy(*getMRI()));
991 case TargetOpcode::G_SEXT
:
992 case TargetOpcode::G_ZEXT
:
993 case TargetOpcode::G_ANYEXT
:
994 assert(DstOps
.size() == 1 && "Invalid Dst");
995 assert(SrcOps
.size() == 1 && "Invalid Srcs");
996 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
997 SrcOps
[0].getLLTTy(*getMRI()), true);
999 case TargetOpcode::G_TRUNC
:
1000 case TargetOpcode::G_FPTRUNC
: {
1001 assert(DstOps
.size() == 1 && "Invalid Dst");
1002 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1003 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1004 SrcOps
[0].getLLTTy(*getMRI()), false);
1007 case TargetOpcode::COPY
:
1008 assert(DstOps
.size() == 1 && "Invalid Dst");
1009 // If the caller wants to add a subreg source it has to be done separately
1010 // so we may not have any SrcOps at this point yet.
1012 case TargetOpcode::G_FCMP
:
1013 case TargetOpcode::G_ICMP
: {
1014 assert(DstOps
.size() == 1 && "Invalid Dst Operands");
1015 assert(SrcOps
.size() == 3 && "Invalid Src Operands");
1016 // For F/ICMP, the first src operand is the predicate, followed by
1017 // the two comparands.
1018 assert(SrcOps
[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate
&&
1019 "Expecting predicate");
1020 assert([&]() -> bool {
1021 CmpInst::Predicate Pred
= SrcOps
[0].getPredicate();
1022 return Opc
== TargetOpcode::G_ICMP
? CmpInst::isIntPredicate(Pred
)
1023 : CmpInst::isFPPredicate(Pred
);
1024 }() && "Invalid predicate");
1025 assert(SrcOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1027 assert([&]() -> bool {
1028 LLT Op0Ty
= SrcOps
[1].getLLTTy(*getMRI());
1029 LLT DstTy
= DstOps
[0].getLLTTy(*getMRI());
1030 if (Op0Ty
.isScalar() || Op0Ty
.isPointer())
1031 return DstTy
.isScalar();
1033 return DstTy
.isVector() &&
1034 DstTy
.getNumElements() == Op0Ty
.getNumElements();
1035 }() && "Type Mismatch");
1038 case TargetOpcode::G_UNMERGE_VALUES
: {
1039 assert(!DstOps
.empty() && "Invalid trivial sequence");
1040 assert(SrcOps
.size() == 1 && "Invalid src for Unmerge");
1041 assert(std::all_of(DstOps
.begin(), DstOps
.end(),
1042 [&, this](const DstOp
&Op
) {
1043 return Op
.getLLTTy(*getMRI()) ==
1044 DstOps
[0].getLLTTy(*getMRI());
1046 "type mismatch in output list");
1047 assert(DstOps
.size() * DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1048 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1049 "input operands do not cover output register");
1052 case TargetOpcode::G_MERGE_VALUES
: {
1053 assert(!SrcOps
.empty() && "invalid trivial sequence");
1054 assert(DstOps
.size() == 1 && "Invalid Dst");
1055 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1056 [&, this](const SrcOp
&Op
) {
1057 return Op
.getLLTTy(*getMRI()) ==
1058 SrcOps
[0].getLLTTy(*getMRI());
1060 "type mismatch in input list");
1061 assert(SrcOps
.size() * SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1062 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1063 "input operands do not cover output register");
1064 if (SrcOps
.size() == 1)
1065 return buildCast(DstOps
[0], SrcOps
[0]);
1066 if (DstOps
[0].getLLTTy(*getMRI()).isVector())
1067 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, DstOps
, SrcOps
);
1070 case TargetOpcode::G_EXTRACT_VECTOR_ELT
: {
1071 assert(DstOps
.size() == 1 && "Invalid Dst size");
1072 assert(SrcOps
.size() == 2 && "Invalid Src size");
1073 assert(SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1074 assert((DstOps
[0].getLLTTy(*getMRI()).isScalar() ||
1075 DstOps
[0].getLLTTy(*getMRI()).isPointer()) &&
1076 "Invalid operand type");
1077 assert(SrcOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1078 assert(SrcOps
[0].getLLTTy(*getMRI()).getElementType() ==
1079 DstOps
[0].getLLTTy(*getMRI()) &&
1083 case TargetOpcode::G_INSERT_VECTOR_ELT
: {
1084 assert(DstOps
.size() == 1 && "Invalid dst size");
1085 assert(SrcOps
.size() == 3 && "Invalid src size");
1086 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1087 SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1088 assert(DstOps
[0].getLLTTy(*getMRI()).getElementType() ==
1089 SrcOps
[1].getLLTTy(*getMRI()) &&
1091 assert(SrcOps
[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1092 assert(DstOps
[0].getLLTTy(*getMRI()).getNumElements() ==
1093 SrcOps
[0].getLLTTy(*getMRI()).getNumElements() &&
1097 case TargetOpcode::G_BUILD_VECTOR
: {
1098 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1099 "Must have at least 2 operands");
1100 assert(DstOps
.size() == 1 && "Invalid DstOps");
1101 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1102 "Res type must be a vector");
1103 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1104 [&, this](const SrcOp
&Op
) {
1105 return Op
.getLLTTy(*getMRI()) ==
1106 SrcOps
[0].getLLTTy(*getMRI());
1108 "type mismatch in input list");
1109 assert(SrcOps
.size() * SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1110 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1111 "input scalars do not exactly cover the output vector register");
1114 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
1115 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1116 "Must have at least 2 operands");
1117 assert(DstOps
.size() == 1 && "Invalid DstOps");
1118 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1119 "Res type must be a vector");
1120 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1121 [&, this](const SrcOp
&Op
) {
1122 return Op
.getLLTTy(*getMRI()) ==
1123 SrcOps
[0].getLLTTy(*getMRI());
1125 "type mismatch in input list");
1126 if (SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1127 DstOps
[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1128 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, DstOps
, SrcOps
);
1131 case TargetOpcode::G_CONCAT_VECTORS
: {
1132 assert(DstOps
.size() == 1 && "Invalid DstOps");
1133 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1134 "Must have at least 2 operands");
1135 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1136 [&, this](const SrcOp
&Op
) {
1137 return (Op
.getLLTTy(*getMRI()).isVector() &&
1138 Op
.getLLTTy(*getMRI()) ==
1139 SrcOps
[0].getLLTTy(*getMRI()));
1141 "type mismatch in input list");
1142 assert(SrcOps
.size() * SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1143 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1144 "input vectors do not exactly cover the output vector register");
1147 case TargetOpcode::G_UADDE
: {
1148 assert(DstOps
.size() == 2 && "Invalid no of dst operands");
1149 assert(SrcOps
.size() == 3 && "Invalid no of src operands");
1150 assert(DstOps
[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1151 assert((DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[0].getLLTTy(*getMRI())) &&
1152 (DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[1].getLLTTy(*getMRI())) &&
1154 assert(DstOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1155 assert(DstOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1161 auto MIB
= buildInstr(Opc
);
1162 for (const DstOp
&Op
: DstOps
)
1163 Op
.addDefToMIB(*getMRI(), MIB
);
1164 for (const SrcOp
&Op
: SrcOps
)
1165 Op
.addSrcToMIB(MIB
);
1167 MIB
->setFlags(*Flags
);