1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
24 void MachineIRBuilder::setMF(MachineFunction
&MF
) {
27 State
.MRI
= &MF
.getRegInfo();
28 State
.TII
= MF
.getSubtarget().getInstrInfo();
29 State
.DL
= DebugLoc();
30 State
.PCSections
= nullptr;
31 State
.II
= MachineBasicBlock::iterator();
32 State
.Observer
= nullptr;
35 //------------------------------------------------------------------------------
36 // Build instruction variants.
37 //------------------------------------------------------------------------------
39 MachineInstrBuilder
MachineIRBuilder::buildInstrNoInsert(unsigned Opcode
) {
40 return BuildMI(getMF(), {getDL(), getPCSections()}, getTII().get(Opcode
));
43 MachineInstrBuilder
MachineIRBuilder::insertInstr(MachineInstrBuilder MIB
) {
44 getMBB().insert(getInsertPt(), MIB
);
50 MachineIRBuilder::buildDirectDbgValue(Register Reg
, const MDNode
*Variable
,
52 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
53 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
55 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
56 "Expected inlined-at fields to agree");
57 return insertInstr(BuildMI(getMF(), getDL(),
58 getTII().get(TargetOpcode::DBG_VALUE
),
59 /*IsIndirect*/ false, Reg
, Variable
, Expr
));
63 MachineIRBuilder::buildIndirectDbgValue(Register Reg
, const MDNode
*Variable
,
65 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
66 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
68 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
69 "Expected inlined-at fields to agree");
70 return insertInstr(BuildMI(getMF(), getDL(),
71 getTII().get(TargetOpcode::DBG_VALUE
),
72 /*IsIndirect*/ true, Reg
, Variable
, Expr
));
75 MachineInstrBuilder
MachineIRBuilder::buildFIDbgValue(int FI
,
76 const MDNode
*Variable
,
78 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
79 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
81 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
82 "Expected inlined-at fields to agree");
83 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE
)
86 .addMetadata(Variable
)
90 MachineInstrBuilder
MachineIRBuilder::buildConstDbgValue(const Constant
&C
,
91 const MDNode
*Variable
,
93 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
94 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
96 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
97 "Expected inlined-at fields to agree");
98 auto MIB
= buildInstrNoInsert(TargetOpcode::DBG_VALUE
);
100 auto *NumericConstant
= [&] () -> const Constant
* {
101 if (const auto *CE
= dyn_cast
<ConstantExpr
>(&C
))
102 if (CE
->getOpcode() == Instruction::IntToPtr
)
103 return CE
->getOperand(0);
107 if (auto *CI
= dyn_cast
<ConstantInt
>(NumericConstant
)) {
108 if (CI
->getBitWidth() > 64)
111 MIB
.addImm(CI
->getZExtValue());
112 } else if (auto *CFP
= dyn_cast
<ConstantFP
>(NumericConstant
)) {
114 } else if (isa
<ConstantPointerNull
>(NumericConstant
)) {
117 // Insert $noreg if we didn't find a usable constant and had to drop it.
118 MIB
.addReg(Register());
121 MIB
.addImm(0).addMetadata(Variable
).addMetadata(Expr
);
122 return insertInstr(MIB
);
125 MachineInstrBuilder
MachineIRBuilder::buildDbgLabel(const MDNode
*Label
) {
126 assert(isa
<DILabel
>(Label
) && "not a label");
127 assert(cast
<DILabel
>(Label
)->isValidLocationForIntrinsic(State
.DL
) &&
128 "Expected inlined-at fields to agree");
129 auto MIB
= buildInstr(TargetOpcode::DBG_LABEL
);
131 return MIB
.addMetadata(Label
);
134 MachineInstrBuilder
MachineIRBuilder::buildDynStackAlloc(const DstOp
&Res
,
137 assert(Res
.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
138 auto MIB
= buildInstr(TargetOpcode::G_DYN_STACKALLOC
);
139 Res
.addDefToMIB(*getMRI(), MIB
);
140 Size
.addSrcToMIB(MIB
);
141 MIB
.addImm(Alignment
.value());
145 MachineInstrBuilder
MachineIRBuilder::buildFrameIndex(const DstOp
&Res
,
147 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
148 auto MIB
= buildInstr(TargetOpcode::G_FRAME_INDEX
);
149 Res
.addDefToMIB(*getMRI(), MIB
);
150 MIB
.addFrameIndex(Idx
);
154 MachineInstrBuilder
MachineIRBuilder::buildGlobalValue(const DstOp
&Res
,
155 const GlobalValue
*GV
) {
156 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
157 assert(Res
.getLLTTy(*getMRI()).getAddressSpace() ==
158 GV
->getType()->getAddressSpace() &&
159 "address space mismatch");
161 auto MIB
= buildInstr(TargetOpcode::G_GLOBAL_VALUE
);
162 Res
.addDefToMIB(*getMRI(), MIB
);
163 MIB
.addGlobalAddress(GV
);
167 MachineInstrBuilder
MachineIRBuilder::buildConstantPool(const DstOp
&Res
,
169 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
170 auto MIB
= buildInstr(TargetOpcode::G_CONSTANT_POOL
);
171 Res
.addDefToMIB(*getMRI(), MIB
);
172 MIB
.addConstantPoolIndex(Idx
);
176 MachineInstrBuilder
MachineIRBuilder::buildJumpTable(const LLT PtrTy
,
178 return buildInstr(TargetOpcode::G_JUMP_TABLE
, {PtrTy
}, {})
179 .addJumpTableIndex(JTI
);
182 void MachineIRBuilder::validateUnaryOp(const LLT Res
, const LLT Op0
) {
183 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
184 assert((Res
== Op0
) && "type mismatch");
187 void MachineIRBuilder::validateBinaryOp(const LLT Res
, const LLT Op0
,
189 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
190 assert((Res
== Op0
&& Res
== Op1
) && "type mismatch");
193 void MachineIRBuilder::validateShiftOp(const LLT Res
, const LLT Op0
,
195 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
196 assert((Res
== Op0
) && "type mismatch");
200 MachineIRBuilder::buildPtrAdd(const DstOp
&Res
, const SrcOp
&Op0
,
201 const SrcOp
&Op1
, std::optional
<unsigned> Flags
) {
202 assert(Res
.getLLTTy(*getMRI()).getScalarType().isPointer() &&
203 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
204 assert(Op1
.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
206 return buildInstr(TargetOpcode::G_PTR_ADD
, {Res
}, {Op0
, Op1
}, Flags
);
209 std::optional
<MachineInstrBuilder
>
210 MachineIRBuilder::materializePtrAdd(Register
&Res
, Register Op0
,
211 const LLT ValueTy
, uint64_t Value
) {
212 assert(Res
== 0 && "Res is a result argument");
213 assert(ValueTy
.isScalar() && "invalid offset type");
220 Res
= getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0
));
221 auto Cst
= buildConstant(ValueTy
, Value
);
222 return buildPtrAdd(Res
, Op0
, Cst
.getReg(0));
225 MachineInstrBuilder
MachineIRBuilder::buildMaskLowPtrBits(const DstOp
&Res
,
228 LLT PtrTy
= Res
.getLLTTy(*getMRI());
229 LLT MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
230 Register MaskReg
= getMRI()->createGenericVirtualRegister(MaskTy
);
231 buildConstant(MaskReg
, maskTrailingZeros
<uint64_t>(NumBits
));
232 return buildPtrMask(Res
, Op0
, MaskReg
);
236 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp
&Res
,
238 LLT ResTy
= Res
.getLLTTy(*getMRI());
239 LLT Op0Ty
= Op0
.getLLTTy(*getMRI());
241 assert(ResTy
.isVector() && "Res non vector type");
243 SmallVector
<Register
, 8> Regs
;
244 if (Op0Ty
.isVector()) {
245 assert((ResTy
.getElementType() == Op0Ty
.getElementType()) &&
246 "Different vector element types");
247 assert((ResTy
.getNumElements() > Op0Ty
.getNumElements()) &&
248 "Op0 has more elements");
249 auto Unmerge
= buildUnmerge(Op0Ty
.getElementType(), Op0
);
251 for (auto Op
: Unmerge
.getInstr()->defs())
252 Regs
.push_back(Op
.getReg());
254 assert((ResTy
.getSizeInBits() > Op0Ty
.getSizeInBits()) &&
255 "Op0 has more size");
256 Regs
.push_back(Op0
.getReg());
259 buildUndef(Op0Ty
.isVector() ? Op0Ty
.getElementType() : Op0Ty
).getReg(0);
260 unsigned NumberOfPadElts
= ResTy
.getNumElements() - Regs
.size();
261 for (unsigned i
= 0; i
< NumberOfPadElts
; ++i
)
262 Regs
.push_back(Undef
);
263 return buildMergeLikeInstr(Res
, Regs
);
267 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp
&Res
,
269 LLT ResTy
= Res
.getLLTTy(*getMRI());
270 LLT Op0Ty
= Op0
.getLLTTy(*getMRI());
272 assert((ResTy
.isVector() && Op0Ty
.isVector()) && "Non vector type");
273 assert((ResTy
.getElementType() == Op0Ty
.getElementType()) &&
274 "Different vector element types");
275 assert((ResTy
.getNumElements() < Op0Ty
.getNumElements()) &&
276 "Op0 has fewer elements");
278 SmallVector
<Register
, 8> Regs
;
279 auto Unmerge
= buildUnmerge(Op0Ty
.getElementType(), Op0
);
280 for (unsigned i
= 0; i
< ResTy
.getNumElements(); ++i
)
281 Regs
.push_back(Unmerge
.getReg(i
));
282 return buildMergeLikeInstr(Res
, Regs
);
285 MachineInstrBuilder
MachineIRBuilder::buildBr(MachineBasicBlock
&Dest
) {
286 return buildInstr(TargetOpcode::G_BR
).addMBB(&Dest
);
289 MachineInstrBuilder
MachineIRBuilder::buildBrIndirect(Register Tgt
) {
290 assert(getMRI()->getType(Tgt
).isPointer() && "invalid branch destination");
291 return buildInstr(TargetOpcode::G_BRINDIRECT
).addUse(Tgt
);
294 MachineInstrBuilder
MachineIRBuilder::buildBrJT(Register TablePtr
,
297 assert(getMRI()->getType(TablePtr
).isPointer() &&
298 "Table reg must be a pointer");
299 return buildInstr(TargetOpcode::G_BRJT
)
301 .addJumpTableIndex(JTI
)
305 MachineInstrBuilder
MachineIRBuilder::buildCopy(const DstOp
&Res
,
307 return buildInstr(TargetOpcode::COPY
, Res
, Op
);
310 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
311 const ConstantInt
&Val
) {
312 LLT Ty
= Res
.getLLTTy(*getMRI());
313 LLT EltTy
= Ty
.getScalarType();
314 assert(EltTy
.getScalarSizeInBits() == Val
.getBitWidth() &&
315 "creating constant with the wrong size");
317 assert(!Ty
.isScalableVector() &&
318 "unexpected scalable vector in buildConstant");
320 if (Ty
.isFixedVector()) {
321 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
)
322 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
324 return buildSplatVector(Res
, Const
);
327 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
);
328 Const
->setDebugLoc(DebugLoc());
329 Res
.addDefToMIB(*getMRI(), Const
);
334 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
336 auto IntN
= IntegerType::get(getMF().getFunction().getContext(),
337 Res
.getLLTTy(*getMRI()).getScalarSizeInBits());
338 ConstantInt
*CI
= ConstantInt::get(IntN
, Val
, true);
339 return buildConstant(Res
, *CI
);
342 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
343 const ConstantFP
&Val
) {
344 LLT Ty
= Res
.getLLTTy(*getMRI());
345 LLT EltTy
= Ty
.getScalarType();
347 assert(APFloat::getSizeInBits(Val
.getValueAPF().getSemantics())
348 == EltTy
.getSizeInBits() &&
349 "creating fconstant with the wrong size");
351 assert(!Ty
.isPointer() && "invalid operand type");
353 assert(!Ty
.isScalableVector() &&
354 "unexpected scalable vector in buildFConstant");
356 if (Ty
.isFixedVector()) {
357 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
)
358 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
361 return buildSplatVector(Res
, Const
);
364 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
);
365 Const
->setDebugLoc(DebugLoc());
366 Res
.addDefToMIB(*getMRI(), Const
);
367 Const
.addFPImm(&Val
);
371 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
373 ConstantInt
*CI
= ConstantInt::get(getMF().getFunction().getContext(), Val
);
374 return buildConstant(Res
, *CI
);
377 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
379 LLT DstTy
= Res
.getLLTTy(*getMRI());
380 auto &Ctx
= getMF().getFunction().getContext();
382 ConstantFP::get(Ctx
, getAPFloatFromSize(Val
, DstTy
.getScalarSizeInBits()));
383 return buildFConstant(Res
, *CFP
);
386 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
387 const APFloat
&Val
) {
388 auto &Ctx
= getMF().getFunction().getContext();
389 auto *CFP
= ConstantFP::get(Ctx
, Val
);
390 return buildFConstant(Res
, *CFP
);
393 MachineInstrBuilder
MachineIRBuilder::buildBrCond(const SrcOp
&Tst
,
394 MachineBasicBlock
&Dest
) {
395 assert(Tst
.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
397 auto MIB
= buildInstr(TargetOpcode::G_BRCOND
);
398 Tst
.addSrcToMIB(MIB
);
404 MachineIRBuilder::buildLoad(const DstOp
&Dst
, const SrcOp
&Addr
,
405 MachinePointerInfo PtrInfo
, Align Alignment
,
406 MachineMemOperand::Flags MMOFlags
,
407 const AAMDNodes
&AAInfo
) {
408 MMOFlags
|= MachineMemOperand::MOLoad
;
409 assert((MMOFlags
& MachineMemOperand::MOStore
) == 0);
411 LLT Ty
= Dst
.getLLTTy(*getMRI());
412 MachineMemOperand
*MMO
=
413 getMF().getMachineMemOperand(PtrInfo
, MMOFlags
, Ty
, Alignment
, AAInfo
);
414 return buildLoad(Dst
, Addr
, *MMO
);
417 MachineInstrBuilder
MachineIRBuilder::buildLoadInstr(unsigned Opcode
,
420 MachineMemOperand
&MMO
) {
421 assert(Res
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
422 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
424 auto MIB
= buildInstr(Opcode
);
425 Res
.addDefToMIB(*getMRI(), MIB
);
426 Addr
.addSrcToMIB(MIB
);
427 MIB
.addMemOperand(&MMO
);
431 MachineInstrBuilder
MachineIRBuilder::buildLoadFromOffset(
432 const DstOp
&Dst
, const SrcOp
&BasePtr
,
433 MachineMemOperand
&BaseMMO
, int64_t Offset
) {
434 LLT LoadTy
= Dst
.getLLTTy(*getMRI());
435 MachineMemOperand
*OffsetMMO
=
436 getMF().getMachineMemOperand(&BaseMMO
, Offset
, LoadTy
);
438 if (Offset
== 0) // This may be a size or type changing load.
439 return buildLoad(Dst
, BasePtr
, *OffsetMMO
);
441 LLT PtrTy
= BasePtr
.getLLTTy(*getMRI());
442 LLT OffsetTy
= LLT::scalar(PtrTy
.getSizeInBits());
443 auto ConstOffset
= buildConstant(OffsetTy
, Offset
);
444 auto Ptr
= buildPtrAdd(PtrTy
, BasePtr
, ConstOffset
);
445 return buildLoad(Dst
, Ptr
, *OffsetMMO
);
448 MachineInstrBuilder
MachineIRBuilder::buildStore(const SrcOp
&Val
,
450 MachineMemOperand
&MMO
) {
451 assert(Val
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
452 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
454 auto MIB
= buildInstr(TargetOpcode::G_STORE
);
455 Val
.addSrcToMIB(MIB
);
456 Addr
.addSrcToMIB(MIB
);
457 MIB
.addMemOperand(&MMO
);
462 MachineIRBuilder::buildStore(const SrcOp
&Val
, const SrcOp
&Addr
,
463 MachinePointerInfo PtrInfo
, Align Alignment
,
464 MachineMemOperand::Flags MMOFlags
,
465 const AAMDNodes
&AAInfo
) {
466 MMOFlags
|= MachineMemOperand::MOStore
;
467 assert((MMOFlags
& MachineMemOperand::MOLoad
) == 0);
469 LLT Ty
= Val
.getLLTTy(*getMRI());
470 MachineMemOperand
*MMO
=
471 getMF().getMachineMemOperand(PtrInfo
, MMOFlags
, Ty
, Alignment
, AAInfo
);
472 return buildStore(Val
, Addr
, *MMO
);
475 MachineInstrBuilder
MachineIRBuilder::buildAnyExt(const DstOp
&Res
,
477 return buildInstr(TargetOpcode::G_ANYEXT
, Res
, Op
);
480 MachineInstrBuilder
MachineIRBuilder::buildSExt(const DstOp
&Res
,
482 return buildInstr(TargetOpcode::G_SEXT
, Res
, Op
);
485 MachineInstrBuilder
MachineIRBuilder::buildZExt(const DstOp
&Res
,
487 return buildInstr(TargetOpcode::G_ZEXT
, Res
, Op
);
490 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec
, bool IsFP
) const {
491 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
492 switch (TLI
->getBooleanContents(IsVec
, IsFP
)) {
493 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
494 return TargetOpcode::G_SEXT
;
495 case TargetLoweringBase::ZeroOrOneBooleanContent
:
496 return TargetOpcode::G_ZEXT
;
498 return TargetOpcode::G_ANYEXT
;
502 MachineInstrBuilder
MachineIRBuilder::buildBoolExt(const DstOp
&Res
,
505 unsigned ExtOp
= getBoolExtOp(getMRI()->getType(Op
.getReg()).isVector(), IsFP
);
506 return buildInstr(ExtOp
, Res
, Op
);
509 MachineInstrBuilder
MachineIRBuilder::buildBoolExtInReg(const DstOp
&Res
,
513 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
514 switch (TLI
->getBooleanContents(IsVector
, IsFP
)) {
515 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
516 return buildSExtInReg(Res
, Op
, 1);
517 case TargetLoweringBase::ZeroOrOneBooleanContent
:
518 return buildZExtInReg(Res
, Op
, 1);
519 case TargetLoweringBase::UndefinedBooleanContent
:
520 return buildCopy(Res
, Op
);
523 llvm_unreachable("unexpected BooleanContent");
526 MachineInstrBuilder
MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc
,
529 assert((TargetOpcode::G_ANYEXT
== ExtOpc
|| TargetOpcode::G_ZEXT
== ExtOpc
||
530 TargetOpcode::G_SEXT
== ExtOpc
) &&
531 "Expecting Extending Opc");
532 assert(Res
.getLLTTy(*getMRI()).isScalar() ||
533 Res
.getLLTTy(*getMRI()).isVector());
534 assert(Res
.getLLTTy(*getMRI()).isScalar() ==
535 Op
.getLLTTy(*getMRI()).isScalar());
537 unsigned Opcode
= TargetOpcode::COPY
;
538 if (Res
.getLLTTy(*getMRI()).getSizeInBits() >
539 Op
.getLLTTy(*getMRI()).getSizeInBits())
541 else if (Res
.getLLTTy(*getMRI()).getSizeInBits() <
542 Op
.getLLTTy(*getMRI()).getSizeInBits())
543 Opcode
= TargetOpcode::G_TRUNC
;
545 assert(Res
.getLLTTy(*getMRI()) == Op
.getLLTTy(*getMRI()));
547 return buildInstr(Opcode
, Res
, Op
);
550 MachineInstrBuilder
MachineIRBuilder::buildSExtOrTrunc(const DstOp
&Res
,
552 return buildExtOrTrunc(TargetOpcode::G_SEXT
, Res
, Op
);
555 MachineInstrBuilder
MachineIRBuilder::buildZExtOrTrunc(const DstOp
&Res
,
557 return buildExtOrTrunc(TargetOpcode::G_ZEXT
, Res
, Op
);
560 MachineInstrBuilder
MachineIRBuilder::buildAnyExtOrTrunc(const DstOp
&Res
,
562 return buildExtOrTrunc(TargetOpcode::G_ANYEXT
, Res
, Op
);
565 MachineInstrBuilder
MachineIRBuilder::buildZExtInReg(const DstOp
&Res
,
568 LLT ResTy
= Res
.getLLTTy(*getMRI());
569 auto Mask
= buildConstant(
570 ResTy
, APInt::getLowBitsSet(ResTy
.getScalarSizeInBits(), ImmOp
));
571 return buildAnd(Res
, Op
, Mask
);
574 MachineInstrBuilder
MachineIRBuilder::buildCast(const DstOp
&Dst
,
576 LLT SrcTy
= Src
.getLLTTy(*getMRI());
577 LLT DstTy
= Dst
.getLLTTy(*getMRI());
579 return buildCopy(Dst
, Src
);
582 if (SrcTy
.isPointer() && DstTy
.isScalar())
583 Opcode
= TargetOpcode::G_PTRTOINT
;
584 else if (DstTy
.isPointer() && SrcTy
.isScalar())
585 Opcode
= TargetOpcode::G_INTTOPTR
;
587 assert(!SrcTy
.isPointer() && !DstTy
.isPointer() && "n G_ADDRCAST yet");
588 Opcode
= TargetOpcode::G_BITCAST
;
591 return buildInstr(Opcode
, Dst
, Src
);
594 MachineInstrBuilder
MachineIRBuilder::buildExtract(const DstOp
&Dst
,
597 LLT SrcTy
= Src
.getLLTTy(*getMRI());
598 LLT DstTy
= Dst
.getLLTTy(*getMRI());
601 assert(SrcTy
.isValid() && "invalid operand type");
602 assert(DstTy
.isValid() && "invalid operand type");
603 assert(Index
+ DstTy
.getSizeInBits() <= SrcTy
.getSizeInBits() &&
604 "extracting off end of register");
607 if (DstTy
.getSizeInBits() == SrcTy
.getSizeInBits()) {
608 assert(Index
== 0 && "insertion past the end of a register");
609 return buildCast(Dst
, Src
);
612 auto Extract
= buildInstr(TargetOpcode::G_EXTRACT
);
613 Dst
.addDefToMIB(*getMRI(), Extract
);
614 Src
.addSrcToMIB(Extract
);
615 Extract
.addImm(Index
);
619 MachineInstrBuilder
MachineIRBuilder::buildUndef(const DstOp
&Res
) {
620 return buildInstr(TargetOpcode::G_IMPLICIT_DEF
, {Res
}, {});
623 MachineInstrBuilder
MachineIRBuilder::buildMergeValues(const DstOp
&Res
,
624 ArrayRef
<Register
> Ops
) {
625 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
626 // we need some temporary storage for the DstOp objects. Here we use a
627 // sufficiently large SmallVector to not go through the heap.
628 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
629 assert(TmpVec
.size() > 1);
630 return buildInstr(TargetOpcode::G_MERGE_VALUES
, Res
, TmpVec
);
634 MachineIRBuilder::buildMergeLikeInstr(const DstOp
&Res
,
635 ArrayRef
<Register
> Ops
) {
636 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
637 // we need some temporary storage for the DstOp objects. Here we use a
638 // sufficiently large SmallVector to not go through the heap.
639 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
640 assert(TmpVec
.size() > 1);
641 return buildInstr(getOpcodeForMerge(Res
, TmpVec
), Res
, TmpVec
);
645 MachineIRBuilder::buildMergeLikeInstr(const DstOp
&Res
,
646 std::initializer_list
<SrcOp
> Ops
) {
647 assert(Ops
.size() > 1);
648 return buildInstr(getOpcodeForMerge(Res
, Ops
), Res
, Ops
);
651 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp
&DstOp
,
652 ArrayRef
<SrcOp
> SrcOps
) const {
653 if (DstOp
.getLLTTy(*getMRI()).isVector()) {
654 if (SrcOps
[0].getLLTTy(*getMRI()).isVector())
655 return TargetOpcode::G_CONCAT_VECTORS
;
656 return TargetOpcode::G_BUILD_VECTOR
;
659 return TargetOpcode::G_MERGE_VALUES
;
662 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<LLT
> Res
,
664 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
665 // we need some temporary storage for the DstOp objects. Here we use a
666 // sufficiently large SmallVector to not go through the heap.
667 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
668 assert(TmpVec
.size() > 1);
669 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
672 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(LLT Res
,
674 unsigned NumReg
= Op
.getLLTTy(*getMRI()).getSizeInBits() / Res
.getSizeInBits();
675 SmallVector
<DstOp
, 8> TmpVec(NumReg
, Res
);
676 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
679 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<Register
> Res
,
681 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
682 // we need some temporary storage for the DstOp objects. Here we use a
683 // sufficiently large SmallVector to not go through the heap.
684 SmallVector
<DstOp
, 8> TmpVec(Res
.begin(), Res
.end());
685 assert(TmpVec
.size() > 1);
686 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
689 MachineInstrBuilder
MachineIRBuilder::buildBuildVector(const DstOp
&Res
,
690 ArrayRef
<Register
> Ops
) {
691 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
692 // we need some temporary storage for the DstOp objects. Here we use a
693 // sufficiently large SmallVector to not go through the heap.
694 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
695 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
699 MachineIRBuilder::buildBuildVectorConstant(const DstOp
&Res
,
700 ArrayRef
<APInt
> Ops
) {
701 SmallVector
<SrcOp
> TmpVec
;
702 TmpVec
.reserve(Ops
.size());
703 LLT EltTy
= Res
.getLLTTy(*getMRI()).getElementType();
704 for (const auto &Op
: Ops
)
705 TmpVec
.push_back(buildConstant(EltTy
, Op
));
706 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
709 MachineInstrBuilder
MachineIRBuilder::buildSplatVector(const DstOp
&Res
,
711 SmallVector
<SrcOp
, 8> TmpVec(Res
.getLLTTy(*getMRI()).getNumElements(), Src
);
712 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
716 MachineIRBuilder::buildBuildVectorTrunc(const DstOp
&Res
,
717 ArrayRef
<Register
> Ops
) {
718 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
719 // we need some temporary storage for the DstOp objects. Here we use a
720 // sufficiently large SmallVector to not go through the heap.
721 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
722 if (TmpVec
[0].getLLTTy(*getMRI()).getSizeInBits() ==
723 Res
.getLLTTy(*getMRI()).getElementType().getSizeInBits())
724 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
725 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC
, Res
, TmpVec
);
728 MachineInstrBuilder
MachineIRBuilder::buildShuffleSplat(const DstOp
&Res
,
730 LLT DstTy
= Res
.getLLTTy(*getMRI());
731 assert(Src
.getLLTTy(*getMRI()) == DstTy
.getElementType() &&
732 "Expected Src to match Dst elt ty");
733 auto UndefVec
= buildUndef(DstTy
);
734 auto Zero
= buildConstant(LLT::scalar(64), 0);
735 auto InsElt
= buildInsertVectorElement(DstTy
, UndefVec
, Src
, Zero
);
736 SmallVector
<int, 16> ZeroMask(DstTy
.getNumElements());
737 return buildShuffleVector(DstTy
, InsElt
, UndefVec
, ZeroMask
);
740 MachineInstrBuilder
MachineIRBuilder::buildShuffleVector(const DstOp
&Res
,
743 ArrayRef
<int> Mask
) {
744 LLT DstTy
= Res
.getLLTTy(*getMRI());
745 LLT Src1Ty
= Src1
.getLLTTy(*getMRI());
746 LLT Src2Ty
= Src2
.getLLTTy(*getMRI());
747 assert((size_t)(Src1Ty
.getNumElements() + Src2Ty
.getNumElements()) >=
749 assert(DstTy
.getElementType() == Src1Ty
.getElementType() &&
750 DstTy
.getElementType() == Src2Ty
.getElementType());
754 ArrayRef
<int> MaskAlloc
= getMF().allocateShuffleMask(Mask
);
755 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR
, {Res
}, {Src1
, Src2
})
756 .addShuffleMask(MaskAlloc
);
760 MachineIRBuilder::buildConcatVectors(const DstOp
&Res
, ArrayRef
<Register
> Ops
) {
761 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
762 // we need some temporary storage for the DstOp objects. Here we use a
763 // sufficiently large SmallVector to not go through the heap.
764 SmallVector
<SrcOp
, 8> TmpVec(Ops
.begin(), Ops
.end());
765 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, Res
, TmpVec
);
768 MachineInstrBuilder
MachineIRBuilder::buildInsert(const DstOp
&Res
,
772 assert(Index
+ Op
.getLLTTy(*getMRI()).getSizeInBits() <=
773 Res
.getLLTTy(*getMRI()).getSizeInBits() &&
774 "insertion past the end of a register");
776 if (Res
.getLLTTy(*getMRI()).getSizeInBits() ==
777 Op
.getLLTTy(*getMRI()).getSizeInBits()) {
778 return buildCast(Res
, Op
);
781 return buildInstr(TargetOpcode::G_INSERT
, Res
, {Src
, Op
, uint64_t(Index
)});
784 static unsigned getIntrinsicOpcode(bool HasSideEffects
, bool IsConvergent
) {
785 if (HasSideEffects
&& IsConvergent
)
786 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
;
788 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
;
790 return TargetOpcode::G_INTRINSIC_CONVERGENT
;
791 return TargetOpcode::G_INTRINSIC
;
795 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
796 ArrayRef
<Register
> ResultRegs
,
797 bool HasSideEffects
, bool isConvergent
) {
798 auto MIB
= buildInstr(getIntrinsicOpcode(HasSideEffects
, isConvergent
));
799 for (unsigned ResultReg
: ResultRegs
)
800 MIB
.addDef(ResultReg
);
801 MIB
.addIntrinsicID(ID
);
806 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
807 ArrayRef
<Register
> ResultRegs
) {
808 auto Attrs
= Intrinsic::getAttributes(getContext(), ID
);
809 bool HasSideEffects
= !Attrs
.getMemoryEffects().doesNotAccessMemory();
810 bool isConvergent
= Attrs
.hasFnAttr(Attribute::Convergent
);
811 return buildIntrinsic(ID
, ResultRegs
, HasSideEffects
, isConvergent
);
814 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
815 ArrayRef
<DstOp
> Results
,
818 auto MIB
= buildInstr(getIntrinsicOpcode(HasSideEffects
, isConvergent
));
819 for (DstOp Result
: Results
)
820 Result
.addDefToMIB(*getMRI(), MIB
);
821 MIB
.addIntrinsicID(ID
);
825 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
826 ArrayRef
<DstOp
> Results
) {
827 auto Attrs
= Intrinsic::getAttributes(getContext(), ID
);
828 bool HasSideEffects
= !Attrs
.getMemoryEffects().doesNotAccessMemory();
829 bool isConvergent
= Attrs
.hasFnAttr(Attribute::Convergent
);
830 return buildIntrinsic(ID
, Results
, HasSideEffects
, isConvergent
);
833 MachineInstrBuilder
MachineIRBuilder::buildTrunc(const DstOp
&Res
,
835 return buildInstr(TargetOpcode::G_TRUNC
, Res
, Op
);
839 MachineIRBuilder::buildFPTrunc(const DstOp
&Res
, const SrcOp
&Op
,
840 std::optional
<unsigned> Flags
) {
841 return buildInstr(TargetOpcode::G_FPTRUNC
, Res
, Op
, Flags
);
844 MachineInstrBuilder
MachineIRBuilder::buildICmp(CmpInst::Predicate Pred
,
848 return buildInstr(TargetOpcode::G_ICMP
, Res
, {Pred
, Op0
, Op1
});
851 MachineInstrBuilder
MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred
,
855 std::optional
<unsigned> Flags
) {
857 return buildInstr(TargetOpcode::G_FCMP
, Res
, {Pred
, Op0
, Op1
}, Flags
);
861 MachineIRBuilder::buildSelect(const DstOp
&Res
, const SrcOp
&Tst
,
862 const SrcOp
&Op0
, const SrcOp
&Op1
,
863 std::optional
<unsigned> Flags
) {
865 return buildInstr(TargetOpcode::G_SELECT
, {Res
}, {Tst
, Op0
, Op1
}, Flags
);
869 MachineIRBuilder::buildInsertVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
870 const SrcOp
&Elt
, const SrcOp
&Idx
) {
871 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT
, Res
, {Val
, Elt
, Idx
});
875 MachineIRBuilder::buildExtractVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
877 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT
, Res
, {Val
, Idx
});
880 MachineInstrBuilder
MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
881 Register OldValRes
, Register SuccessRes
, Register Addr
, Register CmpVal
,
882 Register NewVal
, MachineMemOperand
&MMO
) {
884 LLT OldValResTy
= getMRI()->getType(OldValRes
);
885 LLT SuccessResTy
= getMRI()->getType(SuccessRes
);
886 LLT AddrTy
= getMRI()->getType(Addr
);
887 LLT CmpValTy
= getMRI()->getType(CmpVal
);
888 LLT NewValTy
= getMRI()->getType(NewVal
);
889 assert(OldValResTy
.isScalar() && "invalid operand type");
890 assert(SuccessResTy
.isScalar() && "invalid operand type");
891 assert(AddrTy
.isPointer() && "invalid operand type");
892 assert(CmpValTy
.isValid() && "invalid operand type");
893 assert(NewValTy
.isValid() && "invalid operand type");
894 assert(OldValResTy
== CmpValTy
&& "type mismatch");
895 assert(OldValResTy
== NewValTy
&& "type mismatch");
898 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS
)
904 .addMemOperand(&MMO
);
908 MachineIRBuilder::buildAtomicCmpXchg(Register OldValRes
, Register Addr
,
909 Register CmpVal
, Register NewVal
,
910 MachineMemOperand
&MMO
) {
912 LLT OldValResTy
= getMRI()->getType(OldValRes
);
913 LLT AddrTy
= getMRI()->getType(Addr
);
914 LLT CmpValTy
= getMRI()->getType(CmpVal
);
915 LLT NewValTy
= getMRI()->getType(NewVal
);
916 assert(OldValResTy
.isScalar() && "invalid operand type");
917 assert(AddrTy
.isPointer() && "invalid operand type");
918 assert(CmpValTy
.isValid() && "invalid operand type");
919 assert(NewValTy
.isValid() && "invalid operand type");
920 assert(OldValResTy
== CmpValTy
&& "type mismatch");
921 assert(OldValResTy
== NewValTy
&& "type mismatch");
924 return buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG
)
929 .addMemOperand(&MMO
);
932 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMW(
933 unsigned Opcode
, const DstOp
&OldValRes
,
934 const SrcOp
&Addr
, const SrcOp
&Val
,
935 MachineMemOperand
&MMO
) {
938 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
939 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
940 LLT ValTy
= Val
.getLLTTy(*getMRI());
941 assert(OldValResTy
.isScalar() && "invalid operand type");
942 assert(AddrTy
.isPointer() && "invalid operand type");
943 assert(ValTy
.isValid() && "invalid operand type");
944 assert(OldValResTy
== ValTy
&& "type mismatch");
945 assert(MMO
.isAtomic() && "not atomic mem operand");
948 auto MIB
= buildInstr(Opcode
);
949 OldValRes
.addDefToMIB(*getMRI(), MIB
);
950 Addr
.addSrcToMIB(MIB
);
951 Val
.addSrcToMIB(MIB
);
952 MIB
.addMemOperand(&MMO
);
957 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes
, Register Addr
,
958 Register Val
, MachineMemOperand
&MMO
) {
959 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG
, OldValRes
, Addr
, Val
,
963 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes
, Register Addr
,
964 Register Val
, MachineMemOperand
&MMO
) {
965 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD
, OldValRes
, Addr
, Val
,
969 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes
, Register Addr
,
970 Register Val
, MachineMemOperand
&MMO
) {
971 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB
, OldValRes
, Addr
, Val
,
975 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes
, Register Addr
,
976 Register Val
, MachineMemOperand
&MMO
) {
977 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND
, OldValRes
, Addr
, Val
,
981 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes
, Register Addr
,
982 Register Val
, MachineMemOperand
&MMO
) {
983 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND
, OldValRes
, Addr
, Val
,
986 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMWOr(Register OldValRes
,
989 MachineMemOperand
&MMO
) {
990 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR
, OldValRes
, Addr
, Val
,
994 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes
, Register Addr
,
995 Register Val
, MachineMemOperand
&MMO
) {
996 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR
, OldValRes
, Addr
, Val
,
1000 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes
, Register Addr
,
1001 Register Val
, MachineMemOperand
&MMO
) {
1002 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX
, OldValRes
, Addr
, Val
,
1006 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes
, Register Addr
,
1007 Register Val
, MachineMemOperand
&MMO
) {
1008 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN
, OldValRes
, Addr
, Val
,
1012 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes
, Register Addr
,
1013 Register Val
, MachineMemOperand
&MMO
) {
1014 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX
, OldValRes
, Addr
, Val
,
1018 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes
, Register Addr
,
1019 Register Val
, MachineMemOperand
&MMO
) {
1020 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN
, OldValRes
, Addr
, Val
,
1025 MachineIRBuilder::buildAtomicRMWFAdd(
1026 const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
1027 MachineMemOperand
&MMO
) {
1028 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD
, OldValRes
, Addr
, Val
,
1033 MachineIRBuilder::buildAtomicRMWFSub(const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
1034 MachineMemOperand
&MMO
) {
1035 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB
, OldValRes
, Addr
, Val
,
1040 MachineIRBuilder::buildAtomicRMWFMax(const DstOp
&OldValRes
, const SrcOp
&Addr
,
1041 const SrcOp
&Val
, MachineMemOperand
&MMO
) {
1042 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX
, OldValRes
, Addr
, Val
,
1047 MachineIRBuilder::buildAtomicRMWFMin(const DstOp
&OldValRes
, const SrcOp
&Addr
,
1048 const SrcOp
&Val
, MachineMemOperand
&MMO
) {
1049 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN
, OldValRes
, Addr
, Val
,
1054 MachineIRBuilder::buildFence(unsigned Ordering
, unsigned Scope
) {
1055 return buildInstr(TargetOpcode::G_FENCE
)
1060 MachineInstrBuilder
MachineIRBuilder::buildPrefetch(const SrcOp
&Addr
,
1064 MachineMemOperand
&MMO
) {
1065 auto MIB
= buildInstr(TargetOpcode::G_PREFETCH
);
1066 Addr
.addSrcToMIB(MIB
);
1067 MIB
.addImm(RW
).addImm(Locality
).addImm(CacheType
);
1068 MIB
.addMemOperand(&MMO
);
1073 MachineIRBuilder::buildBlockAddress(Register Res
, const BlockAddress
*BA
) {
1075 assert(getMRI()->getType(Res
).isPointer() && "invalid res type");
1078 return buildInstr(TargetOpcode::G_BLOCK_ADDR
).addDef(Res
).addBlockAddress(BA
);
1081 void MachineIRBuilder::validateTruncExt(const LLT DstTy
, const LLT SrcTy
,
1084 if (DstTy
.isVector()) {
1085 assert(SrcTy
.isVector() && "mismatched cast between vector and non-vector");
1086 assert(SrcTy
.getElementCount() == DstTy
.getElementCount() &&
1087 "different number of elements in a trunc/ext");
1089 assert(DstTy
.isScalar() && SrcTy
.isScalar() && "invalid extend/trunc");
1092 assert(TypeSize::isKnownGT(DstTy
.getSizeInBits(), SrcTy
.getSizeInBits()) &&
1093 "invalid narrowing extend");
1095 assert(TypeSize::isKnownLT(DstTy
.getSizeInBits(), SrcTy
.getSizeInBits()) &&
1096 "invalid widening trunc");
1100 void MachineIRBuilder::validateSelectOp(const LLT ResTy
, const LLT TstTy
,
1101 const LLT Op0Ty
, const LLT Op1Ty
) {
1103 assert((ResTy
.isScalar() || ResTy
.isVector() || ResTy
.isPointer()) &&
1104 "invalid operand type");
1105 assert((ResTy
== Op0Ty
&& ResTy
== Op1Ty
) && "type mismatch");
1106 if (ResTy
.isScalar() || ResTy
.isPointer())
1107 assert(TstTy
.isScalar() && "type mismatch");
1109 assert((TstTy
.isScalar() ||
1110 (TstTy
.isVector() &&
1111 TstTy
.getNumElements() == Op0Ty
.getNumElements())) &&
1117 MachineIRBuilder::buildInstr(unsigned Opc
, ArrayRef
<DstOp
> DstOps
,
1118 ArrayRef
<SrcOp
> SrcOps
,
1119 std::optional
<unsigned> Flags
) {
1123 case TargetOpcode::G_SELECT
: {
1124 assert(DstOps
.size() == 1 && "Invalid select");
1125 assert(SrcOps
.size() == 3 && "Invalid select");
1127 DstOps
[0].getLLTTy(*getMRI()), SrcOps
[0].getLLTTy(*getMRI()),
1128 SrcOps
[1].getLLTTy(*getMRI()), SrcOps
[2].getLLTTy(*getMRI()));
1131 case TargetOpcode::G_FNEG
:
1132 case TargetOpcode::G_ABS
:
1133 // All these are unary ops.
1134 assert(DstOps
.size() == 1 && "Invalid Dst");
1135 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1136 validateUnaryOp(DstOps
[0].getLLTTy(*getMRI()),
1137 SrcOps
[0].getLLTTy(*getMRI()));
1139 case TargetOpcode::G_ADD
:
1140 case TargetOpcode::G_AND
:
1141 case TargetOpcode::G_MUL
:
1142 case TargetOpcode::G_OR
:
1143 case TargetOpcode::G_SUB
:
1144 case TargetOpcode::G_XOR
:
1145 case TargetOpcode::G_UDIV
:
1146 case TargetOpcode::G_SDIV
:
1147 case TargetOpcode::G_UREM
:
1148 case TargetOpcode::G_SREM
:
1149 case TargetOpcode::G_SMIN
:
1150 case TargetOpcode::G_SMAX
:
1151 case TargetOpcode::G_UMIN
:
1152 case TargetOpcode::G_UMAX
:
1153 case TargetOpcode::G_UADDSAT
:
1154 case TargetOpcode::G_SADDSAT
:
1155 case TargetOpcode::G_USUBSAT
:
1156 case TargetOpcode::G_SSUBSAT
: {
1157 // All these are binary ops.
1158 assert(DstOps
.size() == 1 && "Invalid Dst");
1159 assert(SrcOps
.size() == 2 && "Invalid Srcs");
1160 validateBinaryOp(DstOps
[0].getLLTTy(*getMRI()),
1161 SrcOps
[0].getLLTTy(*getMRI()),
1162 SrcOps
[1].getLLTTy(*getMRI()));
1165 case TargetOpcode::G_SHL
:
1166 case TargetOpcode::G_ASHR
:
1167 case TargetOpcode::G_LSHR
:
1168 case TargetOpcode::G_USHLSAT
:
1169 case TargetOpcode::G_SSHLSAT
: {
1170 assert(DstOps
.size() == 1 && "Invalid Dst");
1171 assert(SrcOps
.size() == 2 && "Invalid Srcs");
1172 validateShiftOp(DstOps
[0].getLLTTy(*getMRI()),
1173 SrcOps
[0].getLLTTy(*getMRI()),
1174 SrcOps
[1].getLLTTy(*getMRI()));
1177 case TargetOpcode::G_SEXT
:
1178 case TargetOpcode::G_ZEXT
:
1179 case TargetOpcode::G_ANYEXT
:
1180 assert(DstOps
.size() == 1 && "Invalid Dst");
1181 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1182 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1183 SrcOps
[0].getLLTTy(*getMRI()), true);
1185 case TargetOpcode::G_TRUNC
:
1186 case TargetOpcode::G_FPTRUNC
: {
1187 assert(DstOps
.size() == 1 && "Invalid Dst");
1188 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1189 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1190 SrcOps
[0].getLLTTy(*getMRI()), false);
1193 case TargetOpcode::G_BITCAST
: {
1194 assert(DstOps
.size() == 1 && "Invalid Dst");
1195 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1196 assert(DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1197 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1200 case TargetOpcode::COPY
:
1201 assert(DstOps
.size() == 1 && "Invalid Dst");
1202 // If the caller wants to add a subreg source it has to be done separately
1203 // so we may not have any SrcOps at this point yet.
1205 case TargetOpcode::G_FCMP
:
1206 case TargetOpcode::G_ICMP
: {
1207 assert(DstOps
.size() == 1 && "Invalid Dst Operands");
1208 assert(SrcOps
.size() == 3 && "Invalid Src Operands");
1209 // For F/ICMP, the first src operand is the predicate, followed by
1210 // the two comparands.
1211 assert(SrcOps
[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate
&&
1212 "Expecting predicate");
1213 assert([&]() -> bool {
1214 CmpInst::Predicate Pred
= SrcOps
[0].getPredicate();
1215 return Opc
== TargetOpcode::G_ICMP
? CmpInst::isIntPredicate(Pred
)
1216 : CmpInst::isFPPredicate(Pred
);
1217 }() && "Invalid predicate");
1218 assert(SrcOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1220 assert([&]() -> bool {
1221 LLT Op0Ty
= SrcOps
[1].getLLTTy(*getMRI());
1222 LLT DstTy
= DstOps
[0].getLLTTy(*getMRI());
1223 if (Op0Ty
.isScalar() || Op0Ty
.isPointer())
1224 return DstTy
.isScalar();
1226 return DstTy
.isVector() &&
1227 DstTy
.getNumElements() == Op0Ty
.getNumElements();
1228 }() && "Type Mismatch");
1231 case TargetOpcode::G_UNMERGE_VALUES
: {
1232 assert(!DstOps
.empty() && "Invalid trivial sequence");
1233 assert(SrcOps
.size() == 1 && "Invalid src for Unmerge");
1234 assert(llvm::all_of(DstOps
,
1235 [&, this](const DstOp
&Op
) {
1236 return Op
.getLLTTy(*getMRI()) ==
1237 DstOps
[0].getLLTTy(*getMRI());
1239 "type mismatch in output list");
1240 assert((TypeSize::ScalarTy
)DstOps
.size() *
1241 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1242 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1243 "input operands do not cover output register");
1246 case TargetOpcode::G_MERGE_VALUES
: {
1247 assert(SrcOps
.size() >= 2 && "invalid trivial sequence");
1248 assert(DstOps
.size() == 1 && "Invalid Dst");
1249 assert(llvm::all_of(SrcOps
,
1250 [&, this](const SrcOp
&Op
) {
1251 return Op
.getLLTTy(*getMRI()) ==
1252 SrcOps
[0].getLLTTy(*getMRI());
1254 "type mismatch in input list");
1255 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1256 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1257 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1258 "input operands do not cover output register");
1259 assert(!DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1260 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1263 case TargetOpcode::G_EXTRACT_VECTOR_ELT
: {
1264 assert(DstOps
.size() == 1 && "Invalid Dst size");
1265 assert(SrcOps
.size() == 2 && "Invalid Src size");
1266 assert(SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1267 assert((DstOps
[0].getLLTTy(*getMRI()).isScalar() ||
1268 DstOps
[0].getLLTTy(*getMRI()).isPointer()) &&
1269 "Invalid operand type");
1270 assert(SrcOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1271 assert(SrcOps
[0].getLLTTy(*getMRI()).getElementType() ==
1272 DstOps
[0].getLLTTy(*getMRI()) &&
1276 case TargetOpcode::G_INSERT_VECTOR_ELT
: {
1277 assert(DstOps
.size() == 1 && "Invalid dst size");
1278 assert(SrcOps
.size() == 3 && "Invalid src size");
1279 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1280 SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1281 assert(DstOps
[0].getLLTTy(*getMRI()).getElementType() ==
1282 SrcOps
[1].getLLTTy(*getMRI()) &&
1284 assert(SrcOps
[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1285 assert(DstOps
[0].getLLTTy(*getMRI()).getNumElements() ==
1286 SrcOps
[0].getLLTTy(*getMRI()).getNumElements() &&
1290 case TargetOpcode::G_BUILD_VECTOR
: {
1291 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1292 "Must have at least 2 operands");
1293 assert(DstOps
.size() == 1 && "Invalid DstOps");
1294 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1295 "Res type must be a vector");
1296 assert(llvm::all_of(SrcOps
,
1297 [&, this](const SrcOp
&Op
) {
1298 return Op
.getLLTTy(*getMRI()) ==
1299 SrcOps
[0].getLLTTy(*getMRI());
1301 "type mismatch in input list");
1302 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1303 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1304 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1305 "input scalars do not exactly cover the output vector register");
1308 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
1309 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1310 "Must have at least 2 operands");
1311 assert(DstOps
.size() == 1 && "Invalid DstOps");
1312 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1313 "Res type must be a vector");
1314 assert(llvm::all_of(SrcOps
,
1315 [&, this](const SrcOp
&Op
) {
1316 return Op
.getLLTTy(*getMRI()) ==
1317 SrcOps
[0].getLLTTy(*getMRI());
1319 "type mismatch in input list");
1322 case TargetOpcode::G_CONCAT_VECTORS
: {
1323 assert(DstOps
.size() == 1 && "Invalid DstOps");
1324 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1325 "Must have at least 2 operands");
1326 assert(llvm::all_of(SrcOps
,
1327 [&, this](const SrcOp
&Op
) {
1328 return (Op
.getLLTTy(*getMRI()).isVector() &&
1329 Op
.getLLTTy(*getMRI()) ==
1330 SrcOps
[0].getLLTTy(*getMRI()));
1332 "type mismatch in input list");
1333 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1334 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1335 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1336 "input vectors do not exactly cover the output vector register");
1339 case TargetOpcode::G_UADDE
: {
1340 assert(DstOps
.size() == 2 && "Invalid no of dst operands");
1341 assert(SrcOps
.size() == 3 && "Invalid no of src operands");
1342 assert(DstOps
[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1343 assert((DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[0].getLLTTy(*getMRI())) &&
1344 (DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[1].getLLTTy(*getMRI())) &&
1346 assert(DstOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1347 assert(DstOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1353 auto MIB
= buildInstr(Opc
);
1354 for (const DstOp
&Op
: DstOps
)
1355 Op
.addDefToMIB(*getMRI(), MIB
);
1356 for (const SrcOp
&Op
: SrcOps
)
1357 Op
.addSrcToMIB(MIB
);
1359 MIB
->setFlags(*Flags
);