1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/Analysis/MemoryLocation.h"
13 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
14 #include "llvm/CodeGen/MachineFunction.h"
15 #include "llvm/CodeGen/MachineInstr.h"
16 #include "llvm/CodeGen/MachineInstrBuilder.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/CodeGen/TargetInstrInfo.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/TargetSubtargetInfo.h"
22 #include "llvm/IR/DebugInfo.h"
26 void MachineIRBuilder::setMF(MachineFunction
&MF
) {
29 State
.MRI
= &MF
.getRegInfo();
30 State
.TII
= MF
.getSubtarget().getInstrInfo();
31 State
.DL
= DebugLoc();
32 State
.II
= MachineBasicBlock::iterator();
33 State
.Observer
= nullptr;
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
40 MachineInstrBuilder
MachineIRBuilder::buildInstrNoInsert(unsigned Opcode
) {
41 MachineInstrBuilder MIB
= BuildMI(getMF(), getDL(), getTII().get(Opcode
));
45 MachineInstrBuilder
MachineIRBuilder::insertInstr(MachineInstrBuilder MIB
) {
46 getMBB().insert(getInsertPt(), MIB
);
52 MachineIRBuilder::buildDirectDbgValue(Register Reg
, const MDNode
*Variable
,
54 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
55 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
57 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE
),
61 /*IsIndirect*/ false, Reg
, Variable
, Expr
));
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg
, const MDNode
*Variable
,
67 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
68 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
70 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE
),
74 /*IsIndirect*/ true, Reg
, Variable
, Expr
));
77 MachineInstrBuilder
MachineIRBuilder::buildFIDbgValue(int FI
,
78 const MDNode
*Variable
,
80 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
81 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
83 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return buildInstr(TargetOpcode::DBG_VALUE
)
88 .addMetadata(Variable
)
92 MachineInstrBuilder
MachineIRBuilder::buildConstDbgValue(const Constant
&C
,
93 const MDNode
*Variable
,
95 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
96 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
98 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB
= buildInstrNoInsert(TargetOpcode::DBG_VALUE
);
101 if (auto *CI
= dyn_cast
<ConstantInt
>(&C
)) {
102 if (CI
->getBitWidth() > 64)
105 MIB
.addImm(CI
->getZExtValue());
106 } else if (auto *CFP
= dyn_cast
<ConstantFP
>(&C
)) {
109 // Insert $noreg if we didn't find a usable constant and had to drop it.
110 MIB
.addReg(Register());
113 MIB
.addImm(0).addMetadata(Variable
).addMetadata(Expr
);
114 return insertInstr(MIB
);
117 MachineInstrBuilder
MachineIRBuilder::buildDbgLabel(const MDNode
*Label
) {
118 assert(isa
<DILabel
>(Label
) && "not a label");
119 assert(cast
<DILabel
>(Label
)->isValidLocationForIntrinsic(State
.DL
) &&
120 "Expected inlined-at fields to agree");
121 auto MIB
= buildInstr(TargetOpcode::DBG_LABEL
);
123 return MIB
.addMetadata(Label
);
126 MachineInstrBuilder
MachineIRBuilder::buildDynStackAlloc(const DstOp
&Res
,
129 assert(Res
.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
130 auto MIB
= buildInstr(TargetOpcode::G_DYN_STACKALLOC
);
131 Res
.addDefToMIB(*getMRI(), MIB
);
132 Size
.addSrcToMIB(MIB
);
133 MIB
.addImm(Alignment
.value());
137 MachineInstrBuilder
MachineIRBuilder::buildFrameIndex(const DstOp
&Res
,
139 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
140 auto MIB
= buildInstr(TargetOpcode::G_FRAME_INDEX
);
141 Res
.addDefToMIB(*getMRI(), MIB
);
142 MIB
.addFrameIndex(Idx
);
146 MachineInstrBuilder
MachineIRBuilder::buildGlobalValue(const DstOp
&Res
,
147 const GlobalValue
*GV
) {
148 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
149 assert(Res
.getLLTTy(*getMRI()).getAddressSpace() ==
150 GV
->getType()->getAddressSpace() &&
151 "address space mismatch");
153 auto MIB
= buildInstr(TargetOpcode::G_GLOBAL_VALUE
);
154 Res
.addDefToMIB(*getMRI(), MIB
);
155 MIB
.addGlobalAddress(GV
);
159 MachineInstrBuilder
MachineIRBuilder::buildJumpTable(const LLT PtrTy
,
161 return buildInstr(TargetOpcode::G_JUMP_TABLE
, {PtrTy
}, {})
162 .addJumpTableIndex(JTI
);
165 void MachineIRBuilder::validateUnaryOp(const LLT Res
, const LLT Op0
) {
166 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
167 assert((Res
== Op0
) && "type mismatch");
170 void MachineIRBuilder::validateBinaryOp(const LLT Res
, const LLT Op0
,
172 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
173 assert((Res
== Op0
&& Res
== Op1
) && "type mismatch");
176 void MachineIRBuilder::validateShiftOp(const LLT Res
, const LLT Op0
,
178 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
179 assert((Res
== Op0
) && "type mismatch");
182 MachineInstrBuilder
MachineIRBuilder::buildPtrAdd(const DstOp
&Res
,
185 assert(Res
.getLLTTy(*getMRI()).getScalarType().isPointer() &&
186 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
187 assert(Op1
.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
189 return buildInstr(TargetOpcode::G_PTR_ADD
, {Res
}, {Op0
, Op1
});
192 Optional
<MachineInstrBuilder
>
193 MachineIRBuilder::materializePtrAdd(Register
&Res
, Register Op0
,
194 const LLT ValueTy
, uint64_t Value
) {
195 assert(Res
== 0 && "Res is a result argument");
196 assert(ValueTy
.isScalar() && "invalid offset type");
203 Res
= getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0
));
204 auto Cst
= buildConstant(ValueTy
, Value
);
205 return buildPtrAdd(Res
, Op0
, Cst
.getReg(0));
208 MachineInstrBuilder
MachineIRBuilder::buildMaskLowPtrBits(const DstOp
&Res
,
211 LLT PtrTy
= Res
.getLLTTy(*getMRI());
212 LLT MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
213 Register MaskReg
= getMRI()->createGenericVirtualRegister(MaskTy
);
214 buildConstant(MaskReg
, maskTrailingZeros
<uint64_t>(NumBits
));
215 return buildPtrMask(Res
, Op0
, MaskReg
);
219 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp
&Res
,
221 LLT ResTy
= Res
.getLLTTy(*getMRI());
222 LLT Op0Ty
= Op0
.getLLTTy(*getMRI());
224 assert((ResTy
.isVector() && Op0Ty
.isVector()) && "Non vector type");
225 assert((ResTy
.getElementType() == Op0Ty
.getElementType()) &&
226 "Different vector element types");
227 assert((ResTy
.getNumElements() > Op0Ty
.getNumElements()) &&
228 "Op0 has more elements");
230 auto Unmerge
= buildUnmerge(Op0Ty
.getElementType(), Op0
);
231 SmallVector
<Register
, 8> Regs
;
232 for (auto Op
: Unmerge
.getInstr()->defs())
233 Regs
.push_back(Op
.getReg());
234 Register Undef
= buildUndef(Op0Ty
.getElementType()).getReg(0);
235 unsigned NumberOfPadElts
= ResTy
.getNumElements() - Regs
.size();
236 for (unsigned i
= 0; i
< NumberOfPadElts
; ++i
)
237 Regs
.push_back(Undef
);
238 return buildMerge(Res
, Regs
);
242 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp
&Res
,
244 LLT ResTy
= Res
.getLLTTy(*getMRI());
245 LLT Op0Ty
= Op0
.getLLTTy(*getMRI());
247 assert((ResTy
.isVector() && Op0Ty
.isVector()) && "Non vector type");
248 assert((ResTy
.getElementType() == Op0Ty
.getElementType()) &&
249 "Different vector element types");
250 assert((ResTy
.getNumElements() < Op0Ty
.getNumElements()) &&
251 "Op0 has fewer elements");
253 SmallVector
<Register
, 8> Regs
;
254 auto Unmerge
= buildUnmerge(Op0Ty
.getElementType(), Op0
);
255 for (unsigned i
= 0; i
< ResTy
.getNumElements(); ++i
)
256 Regs
.push_back(Unmerge
.getReg(i
));
257 return buildMerge(Res
, Regs
);
260 MachineInstrBuilder
MachineIRBuilder::buildBr(MachineBasicBlock
&Dest
) {
261 return buildInstr(TargetOpcode::G_BR
).addMBB(&Dest
);
264 MachineInstrBuilder
MachineIRBuilder::buildBrIndirect(Register Tgt
) {
265 assert(getMRI()->getType(Tgt
).isPointer() && "invalid branch destination");
266 return buildInstr(TargetOpcode::G_BRINDIRECT
).addUse(Tgt
);
269 MachineInstrBuilder
MachineIRBuilder::buildBrJT(Register TablePtr
,
272 assert(getMRI()->getType(TablePtr
).isPointer() &&
273 "Table reg must be a pointer");
274 return buildInstr(TargetOpcode::G_BRJT
)
276 .addJumpTableIndex(JTI
)
280 MachineInstrBuilder
MachineIRBuilder::buildCopy(const DstOp
&Res
,
282 return buildInstr(TargetOpcode::COPY
, Res
, Op
);
285 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
286 const ConstantInt
&Val
) {
287 LLT Ty
= Res
.getLLTTy(*getMRI());
288 LLT EltTy
= Ty
.getScalarType();
289 assert(EltTy
.getScalarSizeInBits() == Val
.getBitWidth() &&
290 "creating constant with the wrong size");
293 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
)
294 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
296 return buildSplatVector(Res
, Const
);
299 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
);
300 Const
->setDebugLoc(DebugLoc());
301 Res
.addDefToMIB(*getMRI(), Const
);
306 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
308 auto IntN
= IntegerType::get(getMF().getFunction().getContext(),
309 Res
.getLLTTy(*getMRI()).getScalarSizeInBits());
310 ConstantInt
*CI
= ConstantInt::get(IntN
, Val
, true);
311 return buildConstant(Res
, *CI
);
314 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
315 const ConstantFP
&Val
) {
316 LLT Ty
= Res
.getLLTTy(*getMRI());
317 LLT EltTy
= Ty
.getScalarType();
319 assert(APFloat::getSizeInBits(Val
.getValueAPF().getSemantics())
320 == EltTy
.getSizeInBits() &&
321 "creating fconstant with the wrong size");
323 assert(!Ty
.isPointer() && "invalid operand type");
326 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
)
327 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
330 return buildSplatVector(Res
, Const
);
333 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
);
334 Const
->setDebugLoc(DebugLoc());
335 Res
.addDefToMIB(*getMRI(), Const
);
336 Const
.addFPImm(&Val
);
340 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
342 ConstantInt
*CI
= ConstantInt::get(getMF().getFunction().getContext(), Val
);
343 return buildConstant(Res
, *CI
);
346 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
348 LLT DstTy
= Res
.getLLTTy(*getMRI());
349 auto &Ctx
= getMF().getFunction().getContext();
351 ConstantFP::get(Ctx
, getAPFloatFromSize(Val
, DstTy
.getScalarSizeInBits()));
352 return buildFConstant(Res
, *CFP
);
355 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
356 const APFloat
&Val
) {
357 auto &Ctx
= getMF().getFunction().getContext();
358 auto *CFP
= ConstantFP::get(Ctx
, Val
);
359 return buildFConstant(Res
, *CFP
);
362 MachineInstrBuilder
MachineIRBuilder::buildBrCond(const SrcOp
&Tst
,
363 MachineBasicBlock
&Dest
) {
364 assert(Tst
.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
366 auto MIB
= buildInstr(TargetOpcode::G_BRCOND
);
367 Tst
.addSrcToMIB(MIB
);
373 MachineIRBuilder::buildLoad(const DstOp
&Dst
, const SrcOp
&Addr
,
374 MachinePointerInfo PtrInfo
, Align Alignment
,
375 MachineMemOperand::Flags MMOFlags
,
376 const AAMDNodes
&AAInfo
) {
377 MMOFlags
|= MachineMemOperand::MOLoad
;
378 assert((MMOFlags
& MachineMemOperand::MOStore
) == 0);
380 LLT Ty
= Dst
.getLLTTy(*getMRI());
381 MachineMemOperand
*MMO
=
382 getMF().getMachineMemOperand(PtrInfo
, MMOFlags
, Ty
, Alignment
, AAInfo
);
383 return buildLoad(Dst
, Addr
, *MMO
);
386 MachineInstrBuilder
MachineIRBuilder::buildLoadInstr(unsigned Opcode
,
389 MachineMemOperand
&MMO
) {
390 assert(Res
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
391 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
393 auto MIB
= buildInstr(Opcode
);
394 Res
.addDefToMIB(*getMRI(), MIB
);
395 Addr
.addSrcToMIB(MIB
);
396 MIB
.addMemOperand(&MMO
);
400 MachineInstrBuilder
MachineIRBuilder::buildLoadFromOffset(
401 const DstOp
&Dst
, const SrcOp
&BasePtr
,
402 MachineMemOperand
&BaseMMO
, int64_t Offset
) {
403 LLT LoadTy
= Dst
.getLLTTy(*getMRI());
404 MachineMemOperand
*OffsetMMO
=
405 getMF().getMachineMemOperand(&BaseMMO
, Offset
, LoadTy
);
407 if (Offset
== 0) // This may be a size or type changing load.
408 return buildLoad(Dst
, BasePtr
, *OffsetMMO
);
410 LLT PtrTy
= BasePtr
.getLLTTy(*getMRI());
411 LLT OffsetTy
= LLT::scalar(PtrTy
.getSizeInBits());
412 auto ConstOffset
= buildConstant(OffsetTy
, Offset
);
413 auto Ptr
= buildPtrAdd(PtrTy
, BasePtr
, ConstOffset
);
414 return buildLoad(Dst
, Ptr
, *OffsetMMO
);
417 MachineInstrBuilder
MachineIRBuilder::buildStore(const SrcOp
&Val
,
419 MachineMemOperand
&MMO
) {
420 assert(Val
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
421 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
423 auto MIB
= buildInstr(TargetOpcode::G_STORE
);
424 Val
.addSrcToMIB(MIB
);
425 Addr
.addSrcToMIB(MIB
);
426 MIB
.addMemOperand(&MMO
);
431 MachineIRBuilder::buildStore(const SrcOp
&Val
, const SrcOp
&Addr
,
432 MachinePointerInfo PtrInfo
, Align Alignment
,
433 MachineMemOperand::Flags MMOFlags
,
434 const AAMDNodes
&AAInfo
) {
435 MMOFlags
|= MachineMemOperand::MOStore
;
436 assert((MMOFlags
& MachineMemOperand::MOLoad
) == 0);
438 LLT Ty
= Val
.getLLTTy(*getMRI());
439 MachineMemOperand
*MMO
=
440 getMF().getMachineMemOperand(PtrInfo
, MMOFlags
, Ty
, Alignment
, AAInfo
);
441 return buildStore(Val
, Addr
, *MMO
);
444 MachineInstrBuilder
MachineIRBuilder::buildAnyExt(const DstOp
&Res
,
446 return buildInstr(TargetOpcode::G_ANYEXT
, Res
, Op
);
449 MachineInstrBuilder
MachineIRBuilder::buildSExt(const DstOp
&Res
,
451 return buildInstr(TargetOpcode::G_SEXT
, Res
, Op
);
454 MachineInstrBuilder
MachineIRBuilder::buildZExt(const DstOp
&Res
,
456 return buildInstr(TargetOpcode::G_ZEXT
, Res
, Op
);
459 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec
, bool IsFP
) const {
460 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
461 switch (TLI
->getBooleanContents(IsVec
, IsFP
)) {
462 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
463 return TargetOpcode::G_SEXT
;
464 case TargetLoweringBase::ZeroOrOneBooleanContent
:
465 return TargetOpcode::G_ZEXT
;
467 return TargetOpcode::G_ANYEXT
;
471 MachineInstrBuilder
MachineIRBuilder::buildBoolExt(const DstOp
&Res
,
474 unsigned ExtOp
= getBoolExtOp(getMRI()->getType(Op
.getReg()).isVector(), IsFP
);
475 return buildInstr(ExtOp
, Res
, Op
);
478 MachineInstrBuilder
MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc
,
481 assert((TargetOpcode::G_ANYEXT
== ExtOpc
|| TargetOpcode::G_ZEXT
== ExtOpc
||
482 TargetOpcode::G_SEXT
== ExtOpc
) &&
483 "Expecting Extending Opc");
484 assert(Res
.getLLTTy(*getMRI()).isScalar() ||
485 Res
.getLLTTy(*getMRI()).isVector());
486 assert(Res
.getLLTTy(*getMRI()).isScalar() ==
487 Op
.getLLTTy(*getMRI()).isScalar());
489 unsigned Opcode
= TargetOpcode::COPY
;
490 if (Res
.getLLTTy(*getMRI()).getSizeInBits() >
491 Op
.getLLTTy(*getMRI()).getSizeInBits())
493 else if (Res
.getLLTTy(*getMRI()).getSizeInBits() <
494 Op
.getLLTTy(*getMRI()).getSizeInBits())
495 Opcode
= TargetOpcode::G_TRUNC
;
497 assert(Res
.getLLTTy(*getMRI()) == Op
.getLLTTy(*getMRI()));
499 return buildInstr(Opcode
, Res
, Op
);
502 MachineInstrBuilder
MachineIRBuilder::buildSExtOrTrunc(const DstOp
&Res
,
504 return buildExtOrTrunc(TargetOpcode::G_SEXT
, Res
, Op
);
507 MachineInstrBuilder
MachineIRBuilder::buildZExtOrTrunc(const DstOp
&Res
,
509 return buildExtOrTrunc(TargetOpcode::G_ZEXT
, Res
, Op
);
512 MachineInstrBuilder
MachineIRBuilder::buildAnyExtOrTrunc(const DstOp
&Res
,
514 return buildExtOrTrunc(TargetOpcode::G_ANYEXT
, Res
, Op
);
517 MachineInstrBuilder
MachineIRBuilder::buildZExtInReg(const DstOp
&Res
,
520 LLT ResTy
= Res
.getLLTTy(*getMRI());
521 auto Mask
= buildConstant(
522 ResTy
, APInt::getLowBitsSet(ResTy
.getScalarSizeInBits(), ImmOp
));
523 return buildAnd(Res
, Op
, Mask
);
526 MachineInstrBuilder
MachineIRBuilder::buildCast(const DstOp
&Dst
,
528 LLT SrcTy
= Src
.getLLTTy(*getMRI());
529 LLT DstTy
= Dst
.getLLTTy(*getMRI());
531 return buildCopy(Dst
, Src
);
534 if (SrcTy
.isPointer() && DstTy
.isScalar())
535 Opcode
= TargetOpcode::G_PTRTOINT
;
536 else if (DstTy
.isPointer() && SrcTy
.isScalar())
537 Opcode
= TargetOpcode::G_INTTOPTR
;
539 assert(!SrcTy
.isPointer() && !DstTy
.isPointer() && "n G_ADDRCAST yet");
540 Opcode
= TargetOpcode::G_BITCAST
;
543 return buildInstr(Opcode
, Dst
, Src
);
546 MachineInstrBuilder
MachineIRBuilder::buildExtract(const DstOp
&Dst
,
549 LLT SrcTy
= Src
.getLLTTy(*getMRI());
550 LLT DstTy
= Dst
.getLLTTy(*getMRI());
553 assert(SrcTy
.isValid() && "invalid operand type");
554 assert(DstTy
.isValid() && "invalid operand type");
555 assert(Index
+ DstTy
.getSizeInBits() <= SrcTy
.getSizeInBits() &&
556 "extracting off end of register");
559 if (DstTy
.getSizeInBits() == SrcTy
.getSizeInBits()) {
560 assert(Index
== 0 && "insertion past the end of a register");
561 return buildCast(Dst
, Src
);
564 auto Extract
= buildInstr(TargetOpcode::G_EXTRACT
);
565 Dst
.addDefToMIB(*getMRI(), Extract
);
566 Src
.addSrcToMIB(Extract
);
567 Extract
.addImm(Index
);
571 void MachineIRBuilder::buildSequence(Register Res
, ArrayRef
<Register
> Ops
,
572 ArrayRef
<uint64_t> Indices
) {
574 assert(Ops
.size() == Indices
.size() && "incompatible args");
575 assert(!Ops
.empty() && "invalid trivial sequence");
576 assert(llvm::is_sorted(Indices
) &&
577 "sequence offsets must be in ascending order");
579 assert(getMRI()->getType(Res
).isValid() && "invalid operand type");
581 assert(getMRI()->getType(Op
).isValid() && "invalid operand type");
584 LLT ResTy
= getMRI()->getType(Res
);
585 LLT OpTy
= getMRI()->getType(Ops
[0]);
586 unsigned OpSize
= OpTy
.getSizeInBits();
587 bool MaybeMerge
= true;
588 for (unsigned i
= 0; i
< Ops
.size(); ++i
) {
589 if (getMRI()->getType(Ops
[i
]) != OpTy
|| Indices
[i
] != i
* OpSize
) {
595 if (MaybeMerge
&& Ops
.size() * OpSize
== ResTy
.getSizeInBits()) {
596 buildMerge(Res
, Ops
);
600 Register ResIn
= getMRI()->createGenericVirtualRegister(ResTy
);
603 for (unsigned i
= 0; i
< Ops
.size(); ++i
) {
604 Register ResOut
= i
+ 1 == Ops
.size()
606 : getMRI()->createGenericVirtualRegister(ResTy
);
607 buildInsert(ResOut
, ResIn
, Ops
[i
], Indices
[i
]);
612 MachineInstrBuilder
MachineIRBuilder::buildUndef(const DstOp
&Res
) {
613 return buildInstr(TargetOpcode::G_IMPLICIT_DEF
, {Res
}, {});
616 MachineInstrBuilder
MachineIRBuilder::buildMerge(const DstOp
&Res
,
617 ArrayRef
<Register
> Ops
) {
618 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
619 // we need some temporary storage for the DstOp objects. Here we use a
620 // sufficiently large SmallVector to not go through the heap.
621 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
622 assert(TmpVec
.size() > 1);
623 return buildInstr(TargetOpcode::G_MERGE_VALUES
, Res
, TmpVec
);
627 MachineIRBuilder::buildMerge(const DstOp
&Res
,
628 std::initializer_list
<SrcOp
> Ops
) {
629 assert(Ops
.size() > 1);
630 return buildInstr(TargetOpcode::G_MERGE_VALUES
, Res
, Ops
);
633 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<LLT
> Res
,
635 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
636 // we need some temporary storage for the DstOp objects. Here we use a
637 // sufficiently large SmallVector to not go through the heap.
638 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
639 assert(TmpVec
.size() > 1);
640 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
643 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(LLT Res
,
645 unsigned NumReg
= Op
.getLLTTy(*getMRI()).getSizeInBits() / Res
.getSizeInBits();
646 SmallVector
<DstOp
, 8> TmpVec(NumReg
, Res
);
647 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
650 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<Register
> Res
,
652 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
653 // we need some temporary storage for the DstOp objects. Here we use a
654 // sufficiently large SmallVector to not go through the heap.
655 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
656 assert(TmpVec
.size() > 1);
657 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
660 MachineInstrBuilder
MachineIRBuilder::buildBuildVector(const DstOp
&Res
,
661 ArrayRef
<Register
> Ops
) {
662 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
663 // we need some temporary storage for the DstOp objects. Here we use a
664 // sufficiently large SmallVector to not go through the heap.
665 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
666 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
669 MachineInstrBuilder
MachineIRBuilder::buildSplatVector(const DstOp
&Res
,
671 SmallVector
<SrcOp
, 8> TmpVec(Res
.getLLTTy(*getMRI()).getNumElements(), Src
);
672 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
676 MachineIRBuilder::buildBuildVectorTrunc(const DstOp
&Res
,
677 ArrayRef
<Register
> Ops
) {
678 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
679 // we need some temporary storage for the DstOp objects. Here we use a
680 // sufficiently large SmallVector to not go through the heap.
681 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
682 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC
, Res
, TmpVec
);
685 MachineInstrBuilder
MachineIRBuilder::buildShuffleSplat(const DstOp
&Res
,
687 LLT DstTy
= Res
.getLLTTy(*getMRI());
688 assert(Src
.getLLTTy(*getMRI()) == DstTy
.getElementType() &&
689 "Expected Src to match Dst elt ty");
690 auto UndefVec
= buildUndef(DstTy
);
691 auto Zero
= buildConstant(LLT::scalar(64), 0);
692 auto InsElt
= buildInsertVectorElement(DstTy
, UndefVec
, Src
, Zero
);
693 SmallVector
<int, 16> ZeroMask(DstTy
.getNumElements());
694 return buildShuffleVector(DstTy
, InsElt
, UndefVec
, ZeroMask
);
697 MachineInstrBuilder
MachineIRBuilder::buildShuffleVector(const DstOp
&Res
,
700 ArrayRef
<int> Mask
) {
701 LLT DstTy
= Res
.getLLTTy(*getMRI());
702 LLT Src1Ty
= Src1
.getLLTTy(*getMRI());
703 LLT Src2Ty
= Src2
.getLLTTy(*getMRI());
704 assert((size_t)(Src1Ty
.getNumElements() + Src2Ty
.getNumElements()) >=
706 assert(DstTy
.getElementType() == Src1Ty
.getElementType() &&
707 DstTy
.getElementType() == Src2Ty
.getElementType());
711 ArrayRef
<int> MaskAlloc
= getMF().allocateShuffleMask(Mask
);
712 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR
, {Res
}, {Src1
, Src2
})
713 .addShuffleMask(MaskAlloc
);
717 MachineIRBuilder::buildConcatVectors(const DstOp
&Res
, ArrayRef
<Register
> Ops
) {
718 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
719 // we need some temporary storage for the DstOp objects. Here we use a
720 // sufficiently large SmallVector to not go through the heap.
721 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
722 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, Res
, TmpVec
);
725 MachineInstrBuilder
MachineIRBuilder::buildInsert(const DstOp
&Res
,
729 assert(Index
+ Op
.getLLTTy(*getMRI()).getSizeInBits() <=
730 Res
.getLLTTy(*getMRI()).getSizeInBits() &&
731 "insertion past the end of a register");
733 if (Res
.getLLTTy(*getMRI()).getSizeInBits() ==
734 Op
.getLLTTy(*getMRI()).getSizeInBits()) {
735 return buildCast(Res
, Op
);
738 return buildInstr(TargetOpcode::G_INSERT
, Res
, {Src
, Op
, uint64_t(Index
)});
741 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
742 ArrayRef
<Register
> ResultRegs
,
743 bool HasSideEffects
) {
745 buildInstr(HasSideEffects
? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
746 : TargetOpcode::G_INTRINSIC
);
747 for (unsigned ResultReg
: ResultRegs
)
748 MIB
.addDef(ResultReg
);
749 MIB
.addIntrinsicID(ID
);
753 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
754 ArrayRef
<DstOp
> Results
,
755 bool HasSideEffects
) {
757 buildInstr(HasSideEffects
? TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
758 : TargetOpcode::G_INTRINSIC
);
759 for (DstOp Result
: Results
)
760 Result
.addDefToMIB(*getMRI(), MIB
);
761 MIB
.addIntrinsicID(ID
);
765 MachineInstrBuilder
MachineIRBuilder::buildTrunc(const DstOp
&Res
,
767 return buildInstr(TargetOpcode::G_TRUNC
, Res
, Op
);
770 MachineInstrBuilder
MachineIRBuilder::buildFPTrunc(const DstOp
&Res
,
772 Optional
<unsigned> Flags
) {
773 return buildInstr(TargetOpcode::G_FPTRUNC
, Res
, Op
, Flags
);
776 MachineInstrBuilder
MachineIRBuilder::buildICmp(CmpInst::Predicate Pred
,
780 return buildInstr(TargetOpcode::G_ICMP
, Res
, {Pred
, Op0
, Op1
});
783 MachineInstrBuilder
MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred
,
787 Optional
<unsigned> Flags
) {
789 return buildInstr(TargetOpcode::G_FCMP
, Res
, {Pred
, Op0
, Op1
}, Flags
);
792 MachineInstrBuilder
MachineIRBuilder::buildSelect(const DstOp
&Res
,
796 Optional
<unsigned> Flags
) {
798 return buildInstr(TargetOpcode::G_SELECT
, {Res
}, {Tst
, Op0
, Op1
}, Flags
);
802 MachineIRBuilder::buildInsertVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
803 const SrcOp
&Elt
, const SrcOp
&Idx
) {
804 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT
, Res
, {Val
, Elt
, Idx
});
808 MachineIRBuilder::buildExtractVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
810 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT
, Res
, {Val
, Idx
});
813 MachineInstrBuilder
MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
814 Register OldValRes
, Register SuccessRes
, Register Addr
, Register CmpVal
,
815 Register NewVal
, MachineMemOperand
&MMO
) {
817 LLT OldValResTy
= getMRI()->getType(OldValRes
);
818 LLT SuccessResTy
= getMRI()->getType(SuccessRes
);
819 LLT AddrTy
= getMRI()->getType(Addr
);
820 LLT CmpValTy
= getMRI()->getType(CmpVal
);
821 LLT NewValTy
= getMRI()->getType(NewVal
);
822 assert(OldValResTy
.isScalar() && "invalid operand type");
823 assert(SuccessResTy
.isScalar() && "invalid operand type");
824 assert(AddrTy
.isPointer() && "invalid operand type");
825 assert(CmpValTy
.isValid() && "invalid operand type");
826 assert(NewValTy
.isValid() && "invalid operand type");
827 assert(OldValResTy
== CmpValTy
&& "type mismatch");
828 assert(OldValResTy
== NewValTy
&& "type mismatch");
831 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS
)
837 .addMemOperand(&MMO
);
841 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes
, Register Addr
,
842 Register CmpVal
, Register NewVal
,
843 MachineMemOperand
&MMO
) {
845 LLT OldValResTy
= getMRI()->getType(OldValRes
);
846 LLT AddrTy
= getMRI()->getType(Addr
);
847 LLT CmpValTy
= getMRI()->getType(CmpVal
);
848 LLT NewValTy
= getMRI()->getType(NewVal
);
849 assert(OldValResTy
.isScalar() && "invalid operand type");
850 assert(AddrTy
.isPointer() && "invalid operand type");
851 assert(CmpValTy
.isValid() && "invalid operand type");
852 assert(NewValTy
.isValid() && "invalid operand type");
853 assert(OldValResTy
== CmpValTy
&& "type mismatch");
854 assert(OldValResTy
== NewValTy
&& "type mismatch");
857 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG
)
862 .addMemOperand(&MMO
);
865 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMW(
866 unsigned Opcode
, const DstOp
&OldValRes
,
867 const SrcOp
&Addr
, const SrcOp
&Val
,
868 MachineMemOperand
&MMO
) {
871 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
872 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
873 LLT ValTy
= Val
.getLLTTy(*getMRI());
874 assert(OldValResTy
.isScalar() && "invalid operand type");
875 assert(AddrTy
.isPointer() && "invalid operand type");
876 assert(ValTy
.isValid() && "invalid operand type");
877 assert(OldValResTy
== ValTy
&& "type mismatch");
878 assert(MMO
.isAtomic() && "not atomic mem operand");
881 auto MIB
= buildInstr(Opcode
);
882 OldValRes
.addDefToMIB(*getMRI(), MIB
);
883 Addr
.addSrcToMIB(MIB
);
884 Val
.addSrcToMIB(MIB
);
885 MIB
.addMemOperand(&MMO
);
890 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes
, Register Addr
,
891 Register Val
, MachineMemOperand
&MMO
) {
892 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG
, OldValRes
, Addr
, Val
,
896 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes
, Register Addr
,
897 Register Val
, MachineMemOperand
&MMO
) {
898 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD
, OldValRes
, Addr
, Val
,
902 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes
, Register Addr
,
903 Register Val
, MachineMemOperand
&MMO
) {
904 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB
, OldValRes
, Addr
, Val
,
908 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes
, Register Addr
,
909 Register Val
, MachineMemOperand
&MMO
) {
910 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND
, OldValRes
, Addr
, Val
,
914 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes
, Register Addr
,
915 Register Val
, MachineMemOperand
&MMO
) {
916 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND
, OldValRes
, Addr
, Val
,
919 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMWOr(Register OldValRes
,
922 MachineMemOperand
&MMO
) {
923 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR
, OldValRes
, Addr
, Val
,
927 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes
, Register Addr
,
928 Register Val
, MachineMemOperand
&MMO
) {
929 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR
, OldValRes
, Addr
, Val
,
933 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes
, Register Addr
,
934 Register Val
, MachineMemOperand
&MMO
) {
935 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX
, OldValRes
, Addr
, Val
,
939 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes
, Register Addr
,
940 Register Val
, MachineMemOperand
&MMO
) {
941 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN
, OldValRes
, Addr
, Val
,
945 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes
, Register Addr
,
946 Register Val
, MachineMemOperand
&MMO
) {
947 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX
, OldValRes
, Addr
, Val
,
951 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes
, Register Addr
,
952 Register Val
, MachineMemOperand
&MMO
) {
953 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN
, OldValRes
, Addr
, Val
,
958 MachineIRBuilder::buildAtomicRMWFAdd(
959 const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
960 MachineMemOperand
&MMO
) {
961 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD
, OldValRes
, Addr
, Val
,
966 MachineIRBuilder::buildAtomicRMWFSub(const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
967 MachineMemOperand
&MMO
) {
968 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB
, OldValRes
, Addr
, Val
,
973 MachineIRBuilder::buildFence(unsigned Ordering
, unsigned Scope
) {
974 return buildInstr(TargetOpcode::G_FENCE
)
980 MachineIRBuilder::buildBlockAddress(Register Res
, const BlockAddress
*BA
) {
982 assert(getMRI()->getType(Res
).isPointer() && "invalid res type");
985 return buildInstr(TargetOpcode::G_BLOCK_ADDR
).addDef(Res
).addBlockAddress(BA
);
988 void MachineIRBuilder::validateTruncExt(const LLT DstTy
, const LLT SrcTy
,
991 if (DstTy
.isVector()) {
992 assert(SrcTy
.isVector() && "mismatched cast between vector and non-vector");
993 assert(SrcTy
.getNumElements() == DstTy
.getNumElements() &&
994 "different number of elements in a trunc/ext");
996 assert(DstTy
.isScalar() && SrcTy
.isScalar() && "invalid extend/trunc");
999 assert(DstTy
.getSizeInBits() > SrcTy
.getSizeInBits() &&
1000 "invalid narrowing extend");
1002 assert(DstTy
.getSizeInBits() < SrcTy
.getSizeInBits() &&
1003 "invalid widening trunc");
1007 void MachineIRBuilder::validateSelectOp(const LLT ResTy
, const LLT TstTy
,
1008 const LLT Op0Ty
, const LLT Op1Ty
) {
1010 assert((ResTy
.isScalar() || ResTy
.isVector() || ResTy
.isPointer()) &&
1011 "invalid operand type");
1012 assert((ResTy
== Op0Ty
&& ResTy
== Op1Ty
) && "type mismatch");
1013 if (ResTy
.isScalar() || ResTy
.isPointer())
1014 assert(TstTy
.isScalar() && "type mismatch");
1016 assert((TstTy
.isScalar() ||
1017 (TstTy
.isVector() &&
1018 TstTy
.getNumElements() == Op0Ty
.getNumElements())) &&
1023 MachineInstrBuilder
MachineIRBuilder::buildInstr(unsigned Opc
,
1024 ArrayRef
<DstOp
> DstOps
,
1025 ArrayRef
<SrcOp
> SrcOps
,
1026 Optional
<unsigned> Flags
) {
1030 case TargetOpcode::G_SELECT
: {
1031 assert(DstOps
.size() == 1 && "Invalid select");
1032 assert(SrcOps
.size() == 3 && "Invalid select");
1034 DstOps
[0].getLLTTy(*getMRI()), SrcOps
[0].getLLTTy(*getMRI()),
1035 SrcOps
[1].getLLTTy(*getMRI()), SrcOps
[2].getLLTTy(*getMRI()));
1038 case TargetOpcode::G_FNEG
:
1039 case TargetOpcode::G_ABS
:
1040 // All these are unary ops.
1041 assert(DstOps
.size() == 1 && "Invalid Dst");
1042 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1043 validateUnaryOp(DstOps
[0].getLLTTy(*getMRI()),
1044 SrcOps
[0].getLLTTy(*getMRI()));
1046 case TargetOpcode::G_ADD
:
1047 case TargetOpcode::G_AND
:
1048 case TargetOpcode::G_MUL
:
1049 case TargetOpcode::G_OR
:
1050 case TargetOpcode::G_SUB
:
1051 case TargetOpcode::G_XOR
:
1052 case TargetOpcode::G_UDIV
:
1053 case TargetOpcode::G_SDIV
:
1054 case TargetOpcode::G_UREM
:
1055 case TargetOpcode::G_SREM
:
1056 case TargetOpcode::G_SMIN
:
1057 case TargetOpcode::G_SMAX
:
1058 case TargetOpcode::G_UMIN
:
1059 case TargetOpcode::G_UMAX
:
1060 case TargetOpcode::G_UADDSAT
:
1061 case TargetOpcode::G_SADDSAT
:
1062 case TargetOpcode::G_USUBSAT
:
1063 case TargetOpcode::G_SSUBSAT
: {
1064 // All these are binary ops.
1065 assert(DstOps
.size() == 1 && "Invalid Dst");
1066 assert(SrcOps
.size() == 2 && "Invalid Srcs");
1067 validateBinaryOp(DstOps
[0].getLLTTy(*getMRI()),
1068 SrcOps
[0].getLLTTy(*getMRI()),
1069 SrcOps
[1].getLLTTy(*getMRI()));
1072 case TargetOpcode::G_SHL
:
1073 case TargetOpcode::G_ASHR
:
1074 case TargetOpcode::G_LSHR
:
1075 case TargetOpcode::G_USHLSAT
:
1076 case TargetOpcode::G_SSHLSAT
: {
1077 assert(DstOps
.size() == 1 && "Invalid Dst");
1078 assert(SrcOps
.size() == 2 && "Invalid Srcs");
1079 validateShiftOp(DstOps
[0].getLLTTy(*getMRI()),
1080 SrcOps
[0].getLLTTy(*getMRI()),
1081 SrcOps
[1].getLLTTy(*getMRI()));
1084 case TargetOpcode::G_SEXT
:
1085 case TargetOpcode::G_ZEXT
:
1086 case TargetOpcode::G_ANYEXT
:
1087 assert(DstOps
.size() == 1 && "Invalid Dst");
1088 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1089 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1090 SrcOps
[0].getLLTTy(*getMRI()), true);
1092 case TargetOpcode::G_TRUNC
:
1093 case TargetOpcode::G_FPTRUNC
: {
1094 assert(DstOps
.size() == 1 && "Invalid Dst");
1095 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1096 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1097 SrcOps
[0].getLLTTy(*getMRI()), false);
1100 case TargetOpcode::G_BITCAST
: {
1101 assert(DstOps
.size() == 1 && "Invalid Dst");
1102 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1103 assert(DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1104 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1107 case TargetOpcode::COPY
:
1108 assert(DstOps
.size() == 1 && "Invalid Dst");
1109 // If the caller wants to add a subreg source it has to be done separately
1110 // so we may not have any SrcOps at this point yet.
1112 case TargetOpcode::G_FCMP
:
1113 case TargetOpcode::G_ICMP
: {
1114 assert(DstOps
.size() == 1 && "Invalid Dst Operands");
1115 assert(SrcOps
.size() == 3 && "Invalid Src Operands");
1116 // For F/ICMP, the first src operand is the predicate, followed by
1117 // the two comparands.
1118 assert(SrcOps
[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate
&&
1119 "Expecting predicate");
1120 assert([&]() -> bool {
1121 CmpInst::Predicate Pred
= SrcOps
[0].getPredicate();
1122 return Opc
== TargetOpcode::G_ICMP
? CmpInst::isIntPredicate(Pred
)
1123 : CmpInst::isFPPredicate(Pred
);
1124 }() && "Invalid predicate");
1125 assert(SrcOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1127 assert([&]() -> bool {
1128 LLT Op0Ty
= SrcOps
[1].getLLTTy(*getMRI());
1129 LLT DstTy
= DstOps
[0].getLLTTy(*getMRI());
1130 if (Op0Ty
.isScalar() || Op0Ty
.isPointer())
1131 return DstTy
.isScalar();
1133 return DstTy
.isVector() &&
1134 DstTy
.getNumElements() == Op0Ty
.getNumElements();
1135 }() && "Type Mismatch");
1138 case TargetOpcode::G_UNMERGE_VALUES
: {
1139 assert(!DstOps
.empty() && "Invalid trivial sequence");
1140 assert(SrcOps
.size() == 1 && "Invalid src for Unmerge");
1141 assert(llvm::all_of(DstOps
,
1142 [&, this](const DstOp
&Op
) {
1143 return Op
.getLLTTy(*getMRI()) ==
1144 DstOps
[0].getLLTTy(*getMRI());
1146 "type mismatch in output list");
1147 assert((TypeSize::ScalarTy
)DstOps
.size() *
1148 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1149 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1150 "input operands do not cover output register");
1153 case TargetOpcode::G_MERGE_VALUES
: {
1154 assert(!SrcOps
.empty() && "invalid trivial sequence");
1155 assert(DstOps
.size() == 1 && "Invalid Dst");
1156 assert(llvm::all_of(SrcOps
,
1157 [&, this](const SrcOp
&Op
) {
1158 return Op
.getLLTTy(*getMRI()) ==
1159 SrcOps
[0].getLLTTy(*getMRI());
1161 "type mismatch in input list");
1162 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1163 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1164 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1165 "input operands do not cover output register");
1166 if (SrcOps
.size() == 1)
1167 return buildCast(DstOps
[0], SrcOps
[0]);
1168 if (DstOps
[0].getLLTTy(*getMRI()).isVector()) {
1169 if (SrcOps
[0].getLLTTy(*getMRI()).isVector())
1170 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, DstOps
, SrcOps
);
1171 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, DstOps
, SrcOps
);
1175 case TargetOpcode::G_EXTRACT_VECTOR_ELT
: {
1176 assert(DstOps
.size() == 1 && "Invalid Dst size");
1177 assert(SrcOps
.size() == 2 && "Invalid Src size");
1178 assert(SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1179 assert((DstOps
[0].getLLTTy(*getMRI()).isScalar() ||
1180 DstOps
[0].getLLTTy(*getMRI()).isPointer()) &&
1181 "Invalid operand type");
1182 assert(SrcOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1183 assert(SrcOps
[0].getLLTTy(*getMRI()).getElementType() ==
1184 DstOps
[0].getLLTTy(*getMRI()) &&
1188 case TargetOpcode::G_INSERT_VECTOR_ELT
: {
1189 assert(DstOps
.size() == 1 && "Invalid dst size");
1190 assert(SrcOps
.size() == 3 && "Invalid src size");
1191 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1192 SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1193 assert(DstOps
[0].getLLTTy(*getMRI()).getElementType() ==
1194 SrcOps
[1].getLLTTy(*getMRI()) &&
1196 assert(SrcOps
[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1197 assert(DstOps
[0].getLLTTy(*getMRI()).getNumElements() ==
1198 SrcOps
[0].getLLTTy(*getMRI()).getNumElements() &&
1202 case TargetOpcode::G_BUILD_VECTOR
: {
1203 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1204 "Must have at least 2 operands");
1205 assert(DstOps
.size() == 1 && "Invalid DstOps");
1206 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1207 "Res type must be a vector");
1208 assert(llvm::all_of(SrcOps
,
1209 [&, this](const SrcOp
&Op
) {
1210 return Op
.getLLTTy(*getMRI()) ==
1211 SrcOps
[0].getLLTTy(*getMRI());
1213 "type mismatch in input list");
1214 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1215 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1216 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1217 "input scalars do not exactly cover the output vector register");
1220 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
1221 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1222 "Must have at least 2 operands");
1223 assert(DstOps
.size() == 1 && "Invalid DstOps");
1224 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1225 "Res type must be a vector");
1226 assert(llvm::all_of(SrcOps
,
1227 [&, this](const SrcOp
&Op
) {
1228 return Op
.getLLTTy(*getMRI()) ==
1229 SrcOps
[0].getLLTTy(*getMRI());
1231 "type mismatch in input list");
1232 if (SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1233 DstOps
[0].getLLTTy(*getMRI()).getElementType().getSizeInBits())
1234 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, DstOps
, SrcOps
);
1237 case TargetOpcode::G_CONCAT_VECTORS
: {
1238 assert(DstOps
.size() == 1 && "Invalid DstOps");
1239 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1240 "Must have at least 2 operands");
1241 assert(llvm::all_of(SrcOps
,
1242 [&, this](const SrcOp
&Op
) {
1243 return (Op
.getLLTTy(*getMRI()).isVector() &&
1244 Op
.getLLTTy(*getMRI()) ==
1245 SrcOps
[0].getLLTTy(*getMRI()));
1247 "type mismatch in input list");
1248 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1249 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1250 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1251 "input vectors do not exactly cover the output vector register");
1254 case TargetOpcode::G_UADDE
: {
1255 assert(DstOps
.size() == 2 && "Invalid no of dst operands");
1256 assert(SrcOps
.size() == 3 && "Invalid no of src operands");
1257 assert(DstOps
[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1258 assert((DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[0].getLLTTy(*getMRI())) &&
1259 (DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[1].getLLTTy(*getMRI())) &&
1261 assert(DstOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1262 assert(DstOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1268 auto MIB
= buildInstr(Opc
);
1269 for (const DstOp
&Op
: DstOps
)
1270 Op
.addDefToMIB(*getMRI(), MIB
);
1271 for (const SrcOp
&Op
: SrcOps
)
1272 Op
.addSrcToMIB(MIB
);
1274 MIB
->setFlags(*Flags
);