1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
26 void MachineIRBuilder::setMF(MachineFunction
&MF
) {
29 State
.MRI
= &MF
.getRegInfo();
30 State
.TII
= MF
.getSubtarget().getInstrInfo();
31 State
.DL
= DebugLoc();
32 State
.II
= MachineBasicBlock::iterator();
33 State
.Observer
= nullptr;
36 void MachineIRBuilder::setMBB(MachineBasicBlock
&MBB
) {
39 assert(&getMF() == MBB
.getParent() &&
40 "Basic block is in a different function");
43 void MachineIRBuilder::setInstr(MachineInstr
&MI
) {
44 assert(MI
.getParent() && "Instruction is not part of a basic block");
45 setMBB(*MI
.getParent());
46 State
.II
= MI
.getIterator();
49 void MachineIRBuilder::setCSEInfo(GISelCSEInfo
*Info
) { State
.CSEInfo
= Info
; }
51 void MachineIRBuilder::setInsertPt(MachineBasicBlock
&MBB
,
52 MachineBasicBlock::iterator II
) {
53 assert(MBB
.getParent() == &getMF() &&
54 "Basic block is in a different function");
59 void MachineIRBuilder::recordInsertion(MachineInstr
*InsertedInstr
) const {
61 State
.Observer
->createdInstr(*InsertedInstr
);
64 void MachineIRBuilder::setChangeObserver(GISelChangeObserver
&Observer
) {
65 State
.Observer
= &Observer
;
68 void MachineIRBuilder::stopObservingChanges() { State
.Observer
= nullptr; }
70 //------------------------------------------------------------------------------
71 // Build instruction variants.
72 //------------------------------------------------------------------------------
74 MachineInstrBuilder
MachineIRBuilder::buildInstr(unsigned Opcode
) {
75 return insertInstr(buildInstrNoInsert(Opcode
));
78 MachineInstrBuilder
MachineIRBuilder::buildInstrNoInsert(unsigned Opcode
) {
79 MachineInstrBuilder MIB
= BuildMI(getMF(), getDL(), getTII().get(Opcode
));
83 MachineInstrBuilder
MachineIRBuilder::insertInstr(MachineInstrBuilder MIB
) {
84 getMBB().insert(getInsertPt(), MIB
);
90 MachineIRBuilder::buildDirectDbgValue(Register Reg
, const MDNode
*Variable
,
92 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
93 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
95 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
96 "Expected inlined-at fields to agree");
97 return insertInstr(BuildMI(getMF(), getDL(),
98 getTII().get(TargetOpcode::DBG_VALUE
),
99 /*IsIndirect*/ false, Reg
, Variable
, Expr
));
103 MachineIRBuilder::buildIndirectDbgValue(Register Reg
, const MDNode
*Variable
,
104 const MDNode
*Expr
) {
105 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
106 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
108 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
109 "Expected inlined-at fields to agree");
110 // DBG_VALUE insts now carry IR-level indirection in their DIExpression
111 // rather than encoding it in the instruction itself.
112 const DIExpression
*DIExpr
= cast
<DIExpression
>(Expr
);
113 DIExpr
= DIExpression::append(DIExpr
, {dwarf::DW_OP_deref
});
114 return insertInstr(BuildMI(getMF(), getDL(),
115 getTII().get(TargetOpcode::DBG_VALUE
),
116 /*IsIndirect*/ false, Reg
, Variable
, DIExpr
));
119 MachineInstrBuilder
MachineIRBuilder::buildFIDbgValue(int FI
,
120 const MDNode
*Variable
,
121 const MDNode
*Expr
) {
122 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
123 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
125 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
126 "Expected inlined-at fields to agree");
127 // DBG_VALUE insts now carry IR-level indirection in their DIExpression
128 // rather than encoding it in the instruction itself.
129 const DIExpression
*DIExpr
= cast
<DIExpression
>(Expr
);
130 DIExpr
= DIExpression::append(DIExpr
, {dwarf::DW_OP_deref
});
131 return buildInstr(TargetOpcode::DBG_VALUE
)
134 .addMetadata(Variable
)
135 .addMetadata(DIExpr
);
138 MachineInstrBuilder
MachineIRBuilder::buildConstDbgValue(const Constant
&C
,
139 const MDNode
*Variable
,
140 const MDNode
*Expr
) {
141 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
142 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
144 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
145 "Expected inlined-at fields to agree");
146 auto MIB
= buildInstr(TargetOpcode::DBG_VALUE
);
147 if (auto *CI
= dyn_cast
<ConstantInt
>(&C
)) {
148 if (CI
->getBitWidth() > 64)
151 MIB
.addImm(CI
->getZExtValue());
152 } else if (auto *CFP
= dyn_cast
<ConstantFP
>(&C
)) {
155 // Insert %noreg if we didn't find a usable constant and had to drop it.
159 return MIB
.addReg(0).addMetadata(Variable
).addMetadata(Expr
);
162 MachineInstrBuilder
MachineIRBuilder::buildDbgLabel(const MDNode
*Label
) {
163 assert(isa
<DILabel
>(Label
) && "not a label");
164 assert(cast
<DILabel
>(Label
)->isValidLocationForIntrinsic(State
.DL
) &&
165 "Expected inlined-at fields to agree");
166 auto MIB
= buildInstr(TargetOpcode::DBG_LABEL
);
168 return MIB
.addMetadata(Label
);
171 MachineInstrBuilder
MachineIRBuilder::buildDynStackAlloc(const DstOp
&Res
,
174 assert(Res
.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
175 auto MIB
= buildInstr(TargetOpcode::G_DYN_STACKALLOC
);
176 Res
.addDefToMIB(*getMRI(), MIB
);
177 Size
.addSrcToMIB(MIB
);
182 MachineInstrBuilder
MachineIRBuilder::buildFrameIndex(const DstOp
&Res
,
184 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
185 auto MIB
= buildInstr(TargetOpcode::G_FRAME_INDEX
);
186 Res
.addDefToMIB(*getMRI(), MIB
);
187 MIB
.addFrameIndex(Idx
);
191 MachineInstrBuilder
MachineIRBuilder::buildGlobalValue(const DstOp
&Res
,
192 const GlobalValue
*GV
) {
193 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
194 assert(Res
.getLLTTy(*getMRI()).getAddressSpace() ==
195 GV
->getType()->getAddressSpace() &&
196 "address space mismatch");
198 auto MIB
= buildInstr(TargetOpcode::G_GLOBAL_VALUE
);
199 Res
.addDefToMIB(*getMRI(), MIB
);
200 MIB
.addGlobalAddress(GV
);
204 MachineInstrBuilder
MachineIRBuilder::buildJumpTable(const LLT PtrTy
,
206 return buildInstr(TargetOpcode::G_JUMP_TABLE
, {PtrTy
}, {})
207 .addJumpTableIndex(JTI
);
210 void MachineIRBuilder::validateBinaryOp(const LLT
&Res
, const LLT
&Op0
,
212 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
213 assert((Res
== Op0
&& Res
== Op1
) && "type mismatch");
216 void MachineIRBuilder::validateShiftOp(const LLT
&Res
, const LLT
&Op0
,
218 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
219 assert((Res
== Op0
) && "type mismatch");
222 MachineInstrBuilder
MachineIRBuilder::buildGEP(const DstOp
&Res
,
225 assert(Res
.getLLTTy(*getMRI()).isPointer() &&
226 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
227 assert(Op1
.getLLTTy(*getMRI()).isScalar() && "invalid offset type");
229 return buildInstr(TargetOpcode::G_GEP
, {Res
}, {Op0
, Op1
});
232 Optional
<MachineInstrBuilder
>
233 MachineIRBuilder::materializeGEP(Register
&Res
, Register Op0
,
234 const LLT
&ValueTy
, uint64_t Value
) {
235 assert(Res
== 0 && "Res is a result argument");
236 assert(ValueTy
.isScalar() && "invalid offset type");
243 Res
= getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0
));
244 auto Cst
= buildConstant(ValueTy
, Value
);
245 return buildGEP(Res
, Op0
, Cst
.getReg(0));
248 MachineInstrBuilder
MachineIRBuilder::buildPtrMask(const DstOp
&Res
,
251 assert(Res
.getLLTTy(*getMRI()).isPointer() &&
252 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
254 auto MIB
= buildInstr(TargetOpcode::G_PTR_MASK
);
255 Res
.addDefToMIB(*getMRI(), MIB
);
256 Op0
.addSrcToMIB(MIB
);
261 MachineInstrBuilder
MachineIRBuilder::buildBr(MachineBasicBlock
&Dest
) {
262 return buildInstr(TargetOpcode::G_BR
).addMBB(&Dest
);
265 MachineInstrBuilder
MachineIRBuilder::buildBrIndirect(Register Tgt
) {
266 assert(getMRI()->getType(Tgt
).isPointer() && "invalid branch destination");
267 return buildInstr(TargetOpcode::G_BRINDIRECT
).addUse(Tgt
);
270 MachineInstrBuilder
MachineIRBuilder::buildBrJT(Register TablePtr
,
273 assert(getMRI()->getType(TablePtr
).isPointer() &&
274 "Table reg must be a pointer");
275 return buildInstr(TargetOpcode::G_BRJT
)
277 .addJumpTableIndex(JTI
)
281 MachineInstrBuilder
MachineIRBuilder::buildCopy(const DstOp
&Res
,
283 return buildInstr(TargetOpcode::COPY
, Res
, Op
);
286 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
287 const ConstantInt
&Val
) {
288 LLT Ty
= Res
.getLLTTy(*getMRI());
289 LLT EltTy
= Ty
.getScalarType();
290 assert(EltTy
.getScalarSizeInBits() == Val
.getBitWidth() &&
291 "creating constant with the wrong size");
294 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
)
295 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
297 return buildSplatVector(Res
, Const
);
300 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
);
301 Res
.addDefToMIB(*getMRI(), Const
);
306 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
308 auto IntN
= IntegerType::get(getMF().getFunction().getContext(),
309 Res
.getLLTTy(*getMRI()).getScalarSizeInBits());
310 ConstantInt
*CI
= ConstantInt::get(IntN
, Val
, true);
311 return buildConstant(Res
, *CI
);
314 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
315 const ConstantFP
&Val
) {
316 LLT Ty
= Res
.getLLTTy(*getMRI());
317 LLT EltTy
= Ty
.getScalarType();
319 assert(APFloat::getSizeInBits(Val
.getValueAPF().getSemantics())
320 == EltTy
.getSizeInBits() &&
321 "creating fconstant with the wrong size");
323 assert(!Ty
.isPointer() && "invalid operand type");
326 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
)
327 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
330 return buildSplatVector(Res
, Const
);
333 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
);
334 Res
.addDefToMIB(*getMRI(), Const
);
335 Const
.addFPImm(&Val
);
339 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
341 ConstantInt
*CI
= ConstantInt::get(getMF().getFunction().getContext(), Val
);
342 return buildConstant(Res
, *CI
);
345 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
347 LLT DstTy
= Res
.getLLTTy(*getMRI());
348 auto &Ctx
= getMF().getFunction().getContext();
350 ConstantFP::get(Ctx
, getAPFloatFromSize(Val
, DstTy
.getScalarSizeInBits()));
351 return buildFConstant(Res
, *CFP
);
354 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
355 const APFloat
&Val
) {
356 auto &Ctx
= getMF().getFunction().getContext();
357 auto *CFP
= ConstantFP::get(Ctx
, Val
);
358 return buildFConstant(Res
, *CFP
);
361 MachineInstrBuilder
MachineIRBuilder::buildBrCond(Register Tst
,
362 MachineBasicBlock
&Dest
) {
363 assert(getMRI()->getType(Tst
).isScalar() && "invalid operand type");
365 return buildInstr(TargetOpcode::G_BRCOND
).addUse(Tst
).addMBB(&Dest
);
368 MachineInstrBuilder
MachineIRBuilder::buildLoad(const DstOp
&Res
,
370 MachineMemOperand
&MMO
) {
371 return buildLoadInstr(TargetOpcode::G_LOAD
, Res
, Addr
, MMO
);
374 MachineInstrBuilder
MachineIRBuilder::buildLoadInstr(unsigned Opcode
,
377 MachineMemOperand
&MMO
) {
378 assert(Res
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
379 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
381 auto MIB
= buildInstr(Opcode
);
382 Res
.addDefToMIB(*getMRI(), MIB
);
383 Addr
.addSrcToMIB(MIB
);
384 MIB
.addMemOperand(&MMO
);
388 MachineInstrBuilder
MachineIRBuilder::buildStore(const SrcOp
&Val
,
390 MachineMemOperand
&MMO
) {
391 assert(Val
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
392 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
394 auto MIB
= buildInstr(TargetOpcode::G_STORE
);
395 Val
.addSrcToMIB(MIB
);
396 Addr
.addSrcToMIB(MIB
);
397 MIB
.addMemOperand(&MMO
);
401 MachineInstrBuilder
MachineIRBuilder::buildUAddo(const DstOp
&Res
,
402 const DstOp
&CarryOut
,
405 return buildInstr(TargetOpcode::G_UADDO
, {Res
, CarryOut
}, {Op0
, Op1
});
408 MachineInstrBuilder
MachineIRBuilder::buildUAdde(const DstOp
&Res
,
409 const DstOp
&CarryOut
,
412 const SrcOp
&CarryIn
) {
413 return buildInstr(TargetOpcode::G_UADDE
, {Res
, CarryOut
},
414 {Op0
, Op1
, CarryIn
});
417 MachineInstrBuilder
MachineIRBuilder::buildAnyExt(const DstOp
&Res
,
419 return buildInstr(TargetOpcode::G_ANYEXT
, Res
, Op
);
422 MachineInstrBuilder
MachineIRBuilder::buildSExt(const DstOp
&Res
,
424 return buildInstr(TargetOpcode::G_SEXT
, Res
, Op
);
427 MachineInstrBuilder
MachineIRBuilder::buildZExt(const DstOp
&Res
,
429 return buildInstr(TargetOpcode::G_ZEXT
, Res
, Op
);
432 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec
, bool IsFP
) const {
433 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
434 switch (TLI
->getBooleanContents(IsVec
, IsFP
)) {
435 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
436 return TargetOpcode::G_SEXT
;
437 case TargetLoweringBase::ZeroOrOneBooleanContent
:
438 return TargetOpcode::G_ZEXT
;
440 return TargetOpcode::G_ANYEXT
;
444 MachineInstrBuilder
MachineIRBuilder::buildBoolExt(const DstOp
&Res
,
447 unsigned ExtOp
= getBoolExtOp(getMRI()->getType(Op
.getReg()).isVector(), IsFP
);
448 return buildInstr(ExtOp
, Res
, Op
);
451 MachineInstrBuilder
MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc
,
454 assert((TargetOpcode::G_ANYEXT
== ExtOpc
|| TargetOpcode::G_ZEXT
== ExtOpc
||
455 TargetOpcode::G_SEXT
== ExtOpc
) &&
456 "Expecting Extending Opc");
457 assert(Res
.getLLTTy(*getMRI()).isScalar() ||
458 Res
.getLLTTy(*getMRI()).isVector());
459 assert(Res
.getLLTTy(*getMRI()).isScalar() ==
460 Op
.getLLTTy(*getMRI()).isScalar());
462 unsigned Opcode
= TargetOpcode::COPY
;
463 if (Res
.getLLTTy(*getMRI()).getSizeInBits() >
464 Op
.getLLTTy(*getMRI()).getSizeInBits())
466 else if (Res
.getLLTTy(*getMRI()).getSizeInBits() <
467 Op
.getLLTTy(*getMRI()).getSizeInBits())
468 Opcode
= TargetOpcode::G_TRUNC
;
470 assert(Res
.getLLTTy(*getMRI()) == Op
.getLLTTy(*getMRI()));
472 return buildInstr(Opcode
, Res
, Op
);
475 MachineInstrBuilder
MachineIRBuilder::buildSExtOrTrunc(const DstOp
&Res
,
477 return buildExtOrTrunc(TargetOpcode::G_SEXT
, Res
, Op
);
480 MachineInstrBuilder
MachineIRBuilder::buildZExtOrTrunc(const DstOp
&Res
,
482 return buildExtOrTrunc(TargetOpcode::G_ZEXT
, Res
, Op
);
485 MachineInstrBuilder
MachineIRBuilder::buildAnyExtOrTrunc(const DstOp
&Res
,
487 return buildExtOrTrunc(TargetOpcode::G_ANYEXT
, Res
, Op
);
490 MachineInstrBuilder
MachineIRBuilder::buildCast(const DstOp
&Dst
,
492 LLT SrcTy
= Src
.getLLTTy(*getMRI());
493 LLT DstTy
= Dst
.getLLTTy(*getMRI());
495 return buildCopy(Dst
, Src
);
498 if (SrcTy
.isPointer() && DstTy
.isScalar())
499 Opcode
= TargetOpcode::G_PTRTOINT
;
500 else if (DstTy
.isPointer() && SrcTy
.isScalar())
501 Opcode
= TargetOpcode::G_INTTOPTR
;
503 assert(!SrcTy
.isPointer() && !DstTy
.isPointer() && "n G_ADDRCAST yet");
504 Opcode
= TargetOpcode::G_BITCAST
;
507 return buildInstr(Opcode
, Dst
, Src
);
510 MachineInstrBuilder
MachineIRBuilder::buildExtract(const DstOp
&Dst
,
513 LLT SrcTy
= Src
.getLLTTy(*getMRI());
514 LLT DstTy
= Dst
.getLLTTy(*getMRI());
517 assert(SrcTy
.isValid() && "invalid operand type");
518 assert(DstTy
.isValid() && "invalid operand type");
519 assert(Index
+ DstTy
.getSizeInBits() <= SrcTy
.getSizeInBits() &&
520 "extracting off end of register");
523 if (DstTy
.getSizeInBits() == SrcTy
.getSizeInBits()) {
524 assert(Index
== 0 && "insertion past the end of a register");
525 return buildCast(Dst
, Src
);
528 auto Extract
= buildInstr(TargetOpcode::G_EXTRACT
);
529 Dst
.addDefToMIB(*getMRI(), Extract
);
530 Src
.addSrcToMIB(Extract
);
531 Extract
.addImm(Index
);
535 void MachineIRBuilder::buildSequence(Register Res
, ArrayRef
<Register
> Ops
,
536 ArrayRef
<uint64_t> Indices
) {
538 assert(Ops
.size() == Indices
.size() && "incompatible args");
539 assert(!Ops
.empty() && "invalid trivial sequence");
540 assert(std::is_sorted(Indices
.begin(), Indices
.end()) &&
541 "sequence offsets must be in ascending order");
543 assert(getMRI()->getType(Res
).isValid() && "invalid operand type");
545 assert(getMRI()->getType(Op
).isValid() && "invalid operand type");
548 LLT ResTy
= getMRI()->getType(Res
);
549 LLT OpTy
= getMRI()->getType(Ops
[0]);
550 unsigned OpSize
= OpTy
.getSizeInBits();
551 bool MaybeMerge
= true;
552 for (unsigned i
= 0; i
< Ops
.size(); ++i
) {
553 if (getMRI()->getType(Ops
[i
]) != OpTy
|| Indices
[i
] != i
* OpSize
) {
559 if (MaybeMerge
&& Ops
.size() * OpSize
== ResTy
.getSizeInBits()) {
560 buildMerge(Res
, Ops
);
564 Register ResIn
= getMRI()->createGenericVirtualRegister(ResTy
);
567 for (unsigned i
= 0; i
< Ops
.size(); ++i
) {
568 Register ResOut
= i
+ 1 == Ops
.size()
570 : getMRI()->createGenericVirtualRegister(ResTy
);
571 buildInsert(ResOut
, ResIn
, Ops
[i
], Indices
[i
]);
576 MachineInstrBuilder
MachineIRBuilder::buildUndef(const DstOp
&Res
) {
577 return buildInstr(TargetOpcode::G_IMPLICIT_DEF
, {Res
}, {});
580 MachineInstrBuilder
MachineIRBuilder::buildMerge(const DstOp
&Res
,
581 ArrayRef
<Register
> Ops
) {
582 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
583 // we need some temporary storage for the DstOp objects. Here we use a
584 // sufficiently large SmallVector to not go through the heap.
585 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
586 assert(TmpVec
.size() > 1);
587 return buildInstr(TargetOpcode::G_MERGE_VALUES
, Res
, TmpVec
);
590 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<LLT
> Res
,
592 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
593 // we need some temporary storage for the DstOp objects. Here we use a
594 // sufficiently large SmallVector to not go through the heap.
595 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
596 assert(TmpVec
.size() > 1);
597 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
600 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(LLT Res
,
602 unsigned NumReg
= Op
.getLLTTy(*getMRI()).getSizeInBits() / Res
.getSizeInBits();
603 SmallVector
<Register
, 8> TmpVec
;
604 for (unsigned I
= 0; I
!= NumReg
; ++I
)
605 TmpVec
.push_back(getMRI()->createGenericVirtualRegister(Res
));
606 return buildUnmerge(TmpVec
, Op
);
609 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<Register
> Res
,
611 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
612 // we need some temporary storage for the DstOp objects. Here we use a
613 // sufficiently large SmallVector to not go through the heap.
614 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
615 assert(TmpVec
.size() > 1);
616 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
619 MachineInstrBuilder
MachineIRBuilder::buildBuildVector(const DstOp
&Res
,
620 ArrayRef
<Register
> Ops
) {
621 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
622 // we need some temporary storage for the DstOp objects. Here we use a
623 // sufficiently large SmallVector to not go through the heap.
624 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
625 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
628 MachineInstrBuilder
MachineIRBuilder::buildSplatVector(const DstOp
&Res
,
630 SmallVector
<SrcOp
, 8> TmpVec(Res
.getLLTTy(*getMRI()).getNumElements(), Src
);
631 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
635 MachineIRBuilder::buildBuildVectorTrunc(const DstOp
&Res
,
636 ArrayRef
<Register
> Ops
) {
637 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
638 // we need some temporary storage for the DstOp objects. Here we use a
639 // sufficiently large SmallVector to not go through the heap.
640 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
641 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC
, Res
, TmpVec
);
645 MachineIRBuilder::buildConcatVectors(const DstOp
&Res
, ArrayRef
<Register
> Ops
) {
646 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
647 // we need some temporary storage for the DstOp objects. Here we use a
648 // sufficiently large SmallVector to not go through the heap.
649 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
650 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, Res
, TmpVec
);
653 MachineInstrBuilder
MachineIRBuilder::buildInsert(Register Res
, Register Src
,
654 Register Op
, unsigned Index
) {
655 assert(Index
+ getMRI()->getType(Op
).getSizeInBits() <=
656 getMRI()->getType(Res
).getSizeInBits() &&
657 "insertion past the end of a register");
659 if (getMRI()->getType(Res
).getSizeInBits() ==
660 getMRI()->getType(Op
).getSizeInBits()) {
661 return buildCast(Res
, Op
);
664 return buildInstr(TargetOpcode::G_INSERT
)
671 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
672 ArrayRef
<Register
> ResultRegs
,
673 bool HasSideEffects
) {
675 buildInstr(HasSideEffects
? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
676 : TargetOpcode::G_INTRINSIC
);
677 for (unsigned ResultReg
: ResultRegs
)
678 MIB
.addDef(ResultReg
);
679 MIB
.addIntrinsicID(ID
);
683 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
684 ArrayRef
<DstOp
> Results
,
685 bool HasSideEffects
) {
687 buildInstr(HasSideEffects
? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
688 : TargetOpcode::G_INTRINSIC
);
689 for (DstOp Result
: Results
)
690 Result
.addDefToMIB(*getMRI(), MIB
);
691 MIB
.addIntrinsicID(ID
);
695 MachineInstrBuilder
MachineIRBuilder::buildTrunc(const DstOp
&Res
,
697 return buildInstr(TargetOpcode::G_TRUNC
, Res
, Op
);
700 MachineInstrBuilder
MachineIRBuilder::buildFPTrunc(const DstOp
&Res
,
702 return buildInstr(TargetOpcode::G_FPTRUNC
, Res
, Op
);
705 MachineInstrBuilder
MachineIRBuilder::buildICmp(CmpInst::Predicate Pred
,
709 return buildInstr(TargetOpcode::G_ICMP
, Res
, {Pred
, Op0
, Op1
});
712 MachineInstrBuilder
MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred
,
716 Optional
<unsigned> Flags
) {
718 return buildInstr(TargetOpcode::G_FCMP
, Res
, {Pred
, Op0
, Op1
}, Flags
);
721 MachineInstrBuilder
MachineIRBuilder::buildSelect(const DstOp
&Res
,
725 Optional
<unsigned> Flags
) {
727 return buildInstr(TargetOpcode::G_SELECT
, {Res
}, {Tst
, Op0
, Op1
}, Flags
);
731 MachineIRBuilder::buildInsertVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
732 const SrcOp
&Elt
, const SrcOp
&Idx
) {
733 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT
, Res
, {Val
, Elt
, Idx
});
737 MachineIRBuilder::buildExtractVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
739 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT
, Res
, {Val
, Idx
});
742 MachineInstrBuilder
MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
743 Register OldValRes
, Register SuccessRes
, Register Addr
, Register CmpVal
,
744 Register NewVal
, MachineMemOperand
&MMO
) {
746 LLT OldValResTy
= getMRI()->getType(OldValRes
);
747 LLT SuccessResTy
= getMRI()->getType(SuccessRes
);
748 LLT AddrTy
= getMRI()->getType(Addr
);
749 LLT CmpValTy
= getMRI()->getType(CmpVal
);
750 LLT NewValTy
= getMRI()->getType(NewVal
);
751 assert(OldValResTy
.isScalar() && "invalid operand type");
752 assert(SuccessResTy
.isScalar() && "invalid operand type");
753 assert(AddrTy
.isPointer() && "invalid operand type");
754 assert(CmpValTy
.isValid() && "invalid operand type");
755 assert(NewValTy
.isValid() && "invalid operand type");
756 assert(OldValResTy
== CmpValTy
&& "type mismatch");
757 assert(OldValResTy
== NewValTy
&& "type mismatch");
760 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS
)
766 .addMemOperand(&MMO
);
770 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes
, Register Addr
,
771 Register CmpVal
, Register NewVal
,
772 MachineMemOperand
&MMO
) {
774 LLT OldValResTy
= getMRI()->getType(OldValRes
);
775 LLT AddrTy
= getMRI()->getType(Addr
);
776 LLT CmpValTy
= getMRI()->getType(CmpVal
);
777 LLT NewValTy
= getMRI()->getType(NewVal
);
778 assert(OldValResTy
.isScalar() && "invalid operand type");
779 assert(AddrTy
.isPointer() && "invalid operand type");
780 assert(CmpValTy
.isValid() && "invalid operand type");
781 assert(NewValTy
.isValid() && "invalid operand type");
782 assert(OldValResTy
== CmpValTy
&& "type mismatch");
783 assert(OldValResTy
== NewValTy
&& "type mismatch");
786 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG
)
791 .addMemOperand(&MMO
);
794 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMW(
795 unsigned Opcode
, const DstOp
&OldValRes
,
796 const SrcOp
&Addr
, const SrcOp
&Val
,
797 MachineMemOperand
&MMO
) {
800 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
801 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
802 LLT ValTy
= Val
.getLLTTy(*getMRI());
803 assert(OldValResTy
.isScalar() && "invalid operand type");
804 assert(AddrTy
.isPointer() && "invalid operand type");
805 assert(ValTy
.isValid() && "invalid operand type");
806 assert(OldValResTy
== ValTy
&& "type mismatch");
807 assert(MMO
.isAtomic() && "not atomic mem operand");
810 auto MIB
= buildInstr(Opcode
);
811 OldValRes
.addDefToMIB(*getMRI(), MIB
);
812 Addr
.addSrcToMIB(MIB
);
813 Val
.addSrcToMIB(MIB
);
814 MIB
.addMemOperand(&MMO
);
819 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes
, Register Addr
,
820 Register Val
, MachineMemOperand
&MMO
) {
821 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG
, OldValRes
, Addr
, Val
,
825 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes
, Register Addr
,
826 Register Val
, MachineMemOperand
&MMO
) {
827 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD
, OldValRes
, Addr
, Val
,
831 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes
, Register Addr
,
832 Register Val
, MachineMemOperand
&MMO
) {
833 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB
, OldValRes
, Addr
, Val
,
837 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes
, Register Addr
,
838 Register Val
, MachineMemOperand
&MMO
) {
839 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND
, OldValRes
, Addr
, Val
,
843 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes
, Register Addr
,
844 Register Val
, MachineMemOperand
&MMO
) {
845 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND
, OldValRes
, Addr
, Val
,
848 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMWOr(Register OldValRes
,
851 MachineMemOperand
&MMO
) {
852 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR
, OldValRes
, Addr
, Val
,
856 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes
, Register Addr
,
857 Register Val
, MachineMemOperand
&MMO
) {
858 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR
, OldValRes
, Addr
, Val
,
862 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes
, Register Addr
,
863 Register Val
, MachineMemOperand
&MMO
) {
864 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX
, OldValRes
, Addr
, Val
,
868 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes
, Register Addr
,
869 Register Val
, MachineMemOperand
&MMO
) {
870 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN
, OldValRes
, Addr
, Val
,
874 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes
, Register Addr
,
875 Register Val
, MachineMemOperand
&MMO
) {
876 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX
, OldValRes
, Addr
, Val
,
880 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes
, Register Addr
,
881 Register Val
, MachineMemOperand
&MMO
) {
882 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN
, OldValRes
, Addr
, Val
,
887 MachineIRBuilder::buildAtomicRMWFAdd(
888 const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
889 MachineMemOperand
&MMO
) {
890 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD
, OldValRes
, Addr
, Val
,
895 MachineIRBuilder::buildAtomicRMWFSub(const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
896 MachineMemOperand
&MMO
) {
897 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB
, OldValRes
, Addr
, Val
,
902 MachineIRBuilder::buildFence(unsigned Ordering
, unsigned Scope
) {
903 return buildInstr(TargetOpcode::G_FENCE
)
909 MachineIRBuilder::buildBlockAddress(Register Res
, const BlockAddress
*BA
) {
911 assert(getMRI()->getType(Res
).isPointer() && "invalid res type");
914 return buildInstr(TargetOpcode::G_BLOCK_ADDR
).addDef(Res
).addBlockAddress(BA
);
917 void MachineIRBuilder::validateTruncExt(const LLT
&DstTy
, const LLT
&SrcTy
,
920 if (DstTy
.isVector()) {
921 assert(SrcTy
.isVector() && "mismatched cast between vector and non-vector");
922 assert(SrcTy
.getNumElements() == DstTy
.getNumElements() &&
923 "different number of elements in a trunc/ext");
925 assert(DstTy
.isScalar() && SrcTy
.isScalar() && "invalid extend/trunc");
928 assert(DstTy
.getSizeInBits() > SrcTy
.getSizeInBits() &&
929 "invalid narrowing extend");
931 assert(DstTy
.getSizeInBits() < SrcTy
.getSizeInBits() &&
932 "invalid widening trunc");
936 void MachineIRBuilder::validateSelectOp(const LLT
&ResTy
, const LLT
&TstTy
,
937 const LLT
&Op0Ty
, const LLT
&Op1Ty
) {
939 assert((ResTy
.isScalar() || ResTy
.isVector() || ResTy
.isPointer()) &&
940 "invalid operand type");
941 assert((ResTy
== Op0Ty
&& ResTy
== Op1Ty
) && "type mismatch");
942 if (ResTy
.isScalar() || ResTy
.isPointer())
943 assert(TstTy
.isScalar() && "type mismatch");
945 assert((TstTy
.isScalar() ||
947 TstTy
.getNumElements() == Op0Ty
.getNumElements())) &&
952 MachineInstrBuilder
MachineIRBuilder::buildInstr(unsigned Opc
,
953 ArrayRef
<DstOp
> DstOps
,
954 ArrayRef
<SrcOp
> SrcOps
,
955 Optional
<unsigned> Flags
) {
959 case TargetOpcode::G_SELECT
: {
960 assert(DstOps
.size() == 1 && "Invalid select");
961 assert(SrcOps
.size() == 3 && "Invalid select");
963 DstOps
[0].getLLTTy(*getMRI()), SrcOps
[0].getLLTTy(*getMRI()),
964 SrcOps
[1].getLLTTy(*getMRI()), SrcOps
[2].getLLTTy(*getMRI()));
967 case TargetOpcode::G_ADD
:
968 case TargetOpcode::G_AND
:
969 case TargetOpcode::G_MUL
:
970 case TargetOpcode::G_OR
:
971 case TargetOpcode::G_SUB
:
972 case TargetOpcode::G_XOR
:
973 case TargetOpcode::G_UDIV
:
974 case TargetOpcode::G_SDIV
:
975 case TargetOpcode::G_UREM
:
976 case TargetOpcode::G_SREM
:
977 case TargetOpcode::G_SMIN
:
978 case TargetOpcode::G_SMAX
:
979 case TargetOpcode::G_UMIN
:
980 case TargetOpcode::G_UMAX
: {
981 // All these are binary ops.
982 assert(DstOps
.size() == 1 && "Invalid Dst");
983 assert(SrcOps
.size() == 2 && "Invalid Srcs");
984 validateBinaryOp(DstOps
[0].getLLTTy(*getMRI()),
985 SrcOps
[0].getLLTTy(*getMRI()),
986 SrcOps
[1].getLLTTy(*getMRI()));
989 case TargetOpcode::G_SHL
:
990 case TargetOpcode::G_ASHR
:
991 case TargetOpcode::G_LSHR
: {
992 assert(DstOps
.size() == 1 && "Invalid Dst");
993 assert(SrcOps
.size() == 2 && "Invalid Srcs");
994 validateShiftOp(DstOps
[0].getLLTTy(*getMRI()),
995 SrcOps
[0].getLLTTy(*getMRI()),
996 SrcOps
[1].getLLTTy(*getMRI()));
999 case TargetOpcode::G_SEXT
:
1000 case TargetOpcode::G_ZEXT
:
1001 case TargetOpcode::G_ANYEXT
:
1002 assert(DstOps
.size() == 1 && "Invalid Dst");
1003 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1004 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1005 SrcOps
[0].getLLTTy(*getMRI()), true);
1007 case TargetOpcode::G_TRUNC
:
1008 case TargetOpcode::G_FPTRUNC
: {
1009 assert(DstOps
.size() == 1 && "Invalid Dst");
1010 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1011 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1012 SrcOps
[0].getLLTTy(*getMRI()), false);
1015 case TargetOpcode::COPY
:
1016 assert(DstOps
.size() == 1 && "Invalid Dst");
1017 // If the caller wants to add a subreg source it has to be done separately
1018 // so we may not have any SrcOps at this point yet.
1020 case TargetOpcode::G_FCMP
:
1021 case TargetOpcode::G_ICMP
: {
1022 assert(DstOps
.size() == 1 && "Invalid Dst Operands");
1023 assert(SrcOps
.size() == 3 && "Invalid Src Operands");
1024 // For F/ICMP, the first src operand is the predicate, followed by
1025 // the two comparands.
1026 assert(SrcOps
[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate
&&
1027 "Expecting predicate");
1028 assert([&]() -> bool {
1029 CmpInst::Predicate Pred
= SrcOps
[0].getPredicate();
1030 return Opc
== TargetOpcode::G_ICMP
? CmpInst::isIntPredicate(Pred
)
1031 : CmpInst::isFPPredicate(Pred
);
1032 }() && "Invalid predicate");
1033 assert(SrcOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1035 assert([&]() -> bool {
1036 LLT Op0Ty
= SrcOps
[1].getLLTTy(*getMRI());
1037 LLT DstTy
= DstOps
[0].getLLTTy(*getMRI());
1038 if (Op0Ty
.isScalar() || Op0Ty
.isPointer())
1039 return DstTy
.isScalar();
1041 return DstTy
.isVector() &&
1042 DstTy
.getNumElements() == Op0Ty
.getNumElements();
1043 }() && "Type Mismatch");
1046 case TargetOpcode::G_UNMERGE_VALUES
: {
1047 assert(!DstOps
.empty() && "Invalid trivial sequence");
1048 assert(SrcOps
.size() == 1 && "Invalid src for Unmerge");
1049 assert(std::all_of(DstOps
.begin(), DstOps
.end(),
1050 [&, this](const DstOp
&Op
) {
1051 return Op
.getLLTTy(*getMRI()) ==
1052 DstOps
[0].getLLTTy(*getMRI());
1054 "type mismatch in output list");
1055 assert(DstOps
.size() * DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1056 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1057 "input operands do not cover output register");
1060 case TargetOpcode::G_MERGE_VALUES
: {
1061 assert(!SrcOps
.empty() && "invalid trivial sequence");
1062 assert(DstOps
.size() == 1 && "Invalid Dst");
1063 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1064 [&, this](const SrcOp
&Op
) {
1065 return Op
.getLLTTy(*getMRI()) ==
1066 SrcOps
[0].getLLTTy(*getMRI());
1068 "type mismatch in input list");
1069 assert(SrcOps
.size() * SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1070 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1071 "input operands do not cover output register");
1072 if (SrcOps
.size() == 1)
1073 return buildCast(DstOps
[0], SrcOps
[0]);
1074 if (DstOps
[0].getLLTTy(*getMRI()).isVector()) {
1075 if (SrcOps
[0].getLLTTy(*getMRI()).isVector())
1076 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, DstOps
, SrcOps
);
1077 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, DstOps
, SrcOps
);
1081 case TargetOpcode::G_EXTRACT_VECTOR_ELT
: {
1082 assert(DstOps
.size() == 1 && "Invalid Dst size");
1083 assert(SrcOps
.size() == 2 && "Invalid Src size");
1084 assert(SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1085 assert((DstOps
[0].getLLTTy(*getMRI()).isScalar() ||
1086 DstOps
[0].getLLTTy(*getMRI()).isPointer()) &&
1087 "Invalid operand type");
1088 assert(SrcOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1089 assert(SrcOps
[0].getLLTTy(*getMRI()).getElementType() ==
1090 DstOps
[0].getLLTTy(*getMRI()) &&
1094 case TargetOpcode::G_INSERT_VECTOR_ELT
: {
1095 assert(DstOps
.size() == 1 && "Invalid dst size");
1096 assert(SrcOps
.size() == 3 && "Invalid src size");
1097 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1098 SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1099 assert(DstOps
[0].getLLTTy(*getMRI()).getElementType() ==
1100 SrcOps
[1].getLLTTy(*getMRI()) &&
1102 assert(SrcOps
[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1103 assert(DstOps
[0].getLLTTy(*getMRI()).getNumElements() ==
1104 SrcOps
[0].getLLTTy(*getMRI()).getNumElements() &&
1108 case TargetOpcode::G_BUILD_VECTOR
: {
1109 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1110 "Must have at least 2 operands");
1111 assert(DstOps
.size() == 1 && "Invalid DstOps");
1112 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1113 "Res type must be a vector");
1114 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1115 [&, this](const SrcOp
&Op
) {
1116 return Op
.getLLTTy(*getMRI()) ==
1117 SrcOps
[0].getLLTTy(*getMRI());
1119 "type mismatch in input list");
1120 assert(SrcOps
.size() * SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1121 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1122 "input scalars do not exactly cover the output vector register");
1125 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
1126 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1127 "Must have at least 2 operands");
1128 assert(DstOps
.size() == 1 && "Invalid DstOps");
1129 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1130 "Res type must be a vector");
1131 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1132 [&, this](const SrcOp
&Op
) {
1133 return Op
.getLLTTy(*getMRI()) ==
1134 SrcOps
[0].getLLTTy(*getMRI());
1136 "type mismatch in input list");
1137 if (SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1138 DstOps
[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1139 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, DstOps
, SrcOps
);
1142 case TargetOpcode::G_CONCAT_VECTORS
: {
1143 assert(DstOps
.size() == 1 && "Invalid DstOps");
1144 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1145 "Must have at least 2 operands");
1146 assert(std::all_of(SrcOps
.begin(), SrcOps
.end(),
1147 [&, this](const SrcOp
&Op
) {
1148 return (Op
.getLLTTy(*getMRI()).isVector() &&
1149 Op
.getLLTTy(*getMRI()) ==
1150 SrcOps
[0].getLLTTy(*getMRI()));
1152 "type mismatch in input list");
1153 assert(SrcOps
.size() * SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1154 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1155 "input vectors do not exactly cover the output vector register");
1158 case TargetOpcode::G_UADDE
: {
1159 assert(DstOps
.size() == 2 && "Invalid no of dst operands");
1160 assert(SrcOps
.size() == 3 && "Invalid no of src operands");
1161 assert(DstOps
[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1162 assert((DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[0].getLLTTy(*getMRI())) &&
1163 (DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[1].getLLTTy(*getMRI())) &&
1165 assert(DstOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1166 assert(DstOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1172 auto MIB
= buildInstr(Opc
);
1173 for (const DstOp
&Op
: DstOps
)
1174 Op
.addDefToMIB(*getMRI(), MIB
);
1175 for (const SrcOp
&Op
: SrcOps
)
1176 Op
.addSrcToMIB(MIB
);
1178 MIB
->setFlags(*Flags
);