1 //===-- llvm/CodeGen/GlobalISel/MachineIRBuilder.cpp - MIBuilder--*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the MachineIRBuidler class.
10 //===----------------------------------------------------------------------===//
11 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
12 #include "llvm/CodeGen/MachineFunction.h"
13 #include "llvm/CodeGen/MachineInstr.h"
14 #include "llvm/CodeGen/MachineInstrBuilder.h"
15 #include "llvm/CodeGen/MachineRegisterInfo.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetOpcodes.h"
19 #include "llvm/CodeGen/TargetSubtargetInfo.h"
20 #include "llvm/IR/DebugInfoMetadata.h"
24 void MachineIRBuilder::setMF(MachineFunction
&MF
) {
27 State
.MRI
= &MF
.getRegInfo();
28 State
.TII
= MF
.getSubtarget().getInstrInfo();
29 State
.DL
= DebugLoc();
30 State
.PCSections
= nullptr;
32 State
.II
= MachineBasicBlock::iterator();
33 State
.Observer
= nullptr;
36 //------------------------------------------------------------------------------
37 // Build instruction variants.
38 //------------------------------------------------------------------------------
40 MachineInstrBuilder
MachineIRBuilder::buildInstrNoInsert(unsigned Opcode
) {
41 return BuildMI(getMF(), {getDL(), getPCSections(), getMMRAMetadata()},
42 getTII().get(Opcode
));
45 MachineInstrBuilder
MachineIRBuilder::insertInstr(MachineInstrBuilder MIB
) {
46 getMBB().insert(getInsertPt(), MIB
);
52 MachineIRBuilder::buildDirectDbgValue(Register Reg
, const MDNode
*Variable
,
54 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
55 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
57 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
58 "Expected inlined-at fields to agree");
59 return insertInstr(BuildMI(getMF(), getDL(),
60 getTII().get(TargetOpcode::DBG_VALUE
),
61 /*IsIndirect*/ false, Reg
, Variable
, Expr
));
65 MachineIRBuilder::buildIndirectDbgValue(Register Reg
, const MDNode
*Variable
,
67 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
68 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
70 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
71 "Expected inlined-at fields to agree");
72 return insertInstr(BuildMI(getMF(), getDL(),
73 getTII().get(TargetOpcode::DBG_VALUE
),
74 /*IsIndirect*/ true, Reg
, Variable
, Expr
));
77 MachineInstrBuilder
MachineIRBuilder::buildFIDbgValue(int FI
,
78 const MDNode
*Variable
,
80 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
81 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
83 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
84 "Expected inlined-at fields to agree");
85 return insertInstr(buildInstrNoInsert(TargetOpcode::DBG_VALUE
)
88 .addMetadata(Variable
)
92 MachineInstrBuilder
MachineIRBuilder::buildConstDbgValue(const Constant
&C
,
93 const MDNode
*Variable
,
95 assert(isa
<DILocalVariable
>(Variable
) && "not a variable");
96 assert(cast
<DIExpression
>(Expr
)->isValid() && "not an expression");
98 cast
<DILocalVariable
>(Variable
)->isValidLocationForIntrinsic(getDL()) &&
99 "Expected inlined-at fields to agree");
100 auto MIB
= buildInstrNoInsert(TargetOpcode::DBG_VALUE
);
102 auto *NumericConstant
= [&] () -> const Constant
* {
103 if (const auto *CE
= dyn_cast
<ConstantExpr
>(&C
))
104 if (CE
->getOpcode() == Instruction::IntToPtr
)
105 return CE
->getOperand(0);
109 if (auto *CI
= dyn_cast
<ConstantInt
>(NumericConstant
)) {
110 if (CI
->getBitWidth() > 64)
113 MIB
.addImm(CI
->getZExtValue());
114 } else if (auto *CFP
= dyn_cast
<ConstantFP
>(NumericConstant
)) {
116 } else if (isa
<ConstantPointerNull
>(NumericConstant
)) {
119 // Insert $noreg if we didn't find a usable constant and had to drop it.
120 MIB
.addReg(Register());
123 MIB
.addImm(0).addMetadata(Variable
).addMetadata(Expr
);
124 return insertInstr(MIB
);
127 MachineInstrBuilder
MachineIRBuilder::buildDbgLabel(const MDNode
*Label
) {
128 assert(isa
<DILabel
>(Label
) && "not a label");
129 assert(cast
<DILabel
>(Label
)->isValidLocationForIntrinsic(State
.DL
) &&
130 "Expected inlined-at fields to agree");
131 auto MIB
= buildInstr(TargetOpcode::DBG_LABEL
);
133 return MIB
.addMetadata(Label
);
136 MachineInstrBuilder
MachineIRBuilder::buildDynStackAlloc(const DstOp
&Res
,
139 assert(Res
.getLLTTy(*getMRI()).isPointer() && "expected ptr dst type");
140 auto MIB
= buildInstr(TargetOpcode::G_DYN_STACKALLOC
);
141 Res
.addDefToMIB(*getMRI(), MIB
);
142 Size
.addSrcToMIB(MIB
);
143 MIB
.addImm(Alignment
.value());
147 MachineInstrBuilder
MachineIRBuilder::buildFrameIndex(const DstOp
&Res
,
149 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
150 auto MIB
= buildInstr(TargetOpcode::G_FRAME_INDEX
);
151 Res
.addDefToMIB(*getMRI(), MIB
);
152 MIB
.addFrameIndex(Idx
);
156 MachineInstrBuilder
MachineIRBuilder::buildGlobalValue(const DstOp
&Res
,
157 const GlobalValue
*GV
) {
158 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
159 assert(Res
.getLLTTy(*getMRI()).getAddressSpace() ==
160 GV
->getType()->getAddressSpace() &&
161 "address space mismatch");
163 auto MIB
= buildInstr(TargetOpcode::G_GLOBAL_VALUE
);
164 Res
.addDefToMIB(*getMRI(), MIB
);
165 MIB
.addGlobalAddress(GV
);
169 MachineInstrBuilder
MachineIRBuilder::buildConstantPool(const DstOp
&Res
,
171 assert(Res
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
172 auto MIB
= buildInstr(TargetOpcode::G_CONSTANT_POOL
);
173 Res
.addDefToMIB(*getMRI(), MIB
);
174 MIB
.addConstantPoolIndex(Idx
);
178 MachineInstrBuilder
MachineIRBuilder::buildJumpTable(const LLT PtrTy
,
180 return buildInstr(TargetOpcode::G_JUMP_TABLE
, {PtrTy
}, {})
181 .addJumpTableIndex(JTI
);
184 void MachineIRBuilder::validateUnaryOp(const LLT Res
, const LLT Op0
) {
185 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
186 assert((Res
== Op0
) && "type mismatch");
189 void MachineIRBuilder::validateBinaryOp(const LLT Res
, const LLT Op0
,
191 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
192 assert((Res
== Op0
&& Res
== Op1
) && "type mismatch");
195 void MachineIRBuilder::validateShiftOp(const LLT Res
, const LLT Op0
,
197 assert((Res
.isScalar() || Res
.isVector()) && "invalid operand type");
198 assert((Res
== Op0
) && "type mismatch");
202 MachineIRBuilder::buildPtrAdd(const DstOp
&Res
, const SrcOp
&Op0
,
203 const SrcOp
&Op1
, std::optional
<unsigned> Flags
) {
204 assert(Res
.getLLTTy(*getMRI()).isPointerOrPointerVector() &&
205 Res
.getLLTTy(*getMRI()) == Op0
.getLLTTy(*getMRI()) && "type mismatch");
206 assert(Op1
.getLLTTy(*getMRI()).getScalarType().isScalar() && "invalid offset type");
208 return buildInstr(TargetOpcode::G_PTR_ADD
, {Res
}, {Op0
, Op1
}, Flags
);
211 std::optional
<MachineInstrBuilder
>
212 MachineIRBuilder::materializePtrAdd(Register
&Res
, Register Op0
,
213 const LLT ValueTy
, uint64_t Value
) {
214 assert(Res
== 0 && "Res is a result argument");
215 assert(ValueTy
.isScalar() && "invalid offset type");
222 Res
= getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0
));
223 auto Cst
= buildConstant(ValueTy
, Value
);
224 return buildPtrAdd(Res
, Op0
, Cst
.getReg(0));
227 MachineInstrBuilder
MachineIRBuilder::buildMaskLowPtrBits(const DstOp
&Res
,
230 LLT PtrTy
= Res
.getLLTTy(*getMRI());
231 LLT MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
232 Register MaskReg
= getMRI()->createGenericVirtualRegister(MaskTy
);
233 buildConstant(MaskReg
, maskTrailingZeros
<uint64_t>(NumBits
));
234 return buildPtrMask(Res
, Op0
, MaskReg
);
238 MachineIRBuilder::buildPadVectorWithUndefElements(const DstOp
&Res
,
240 LLT ResTy
= Res
.getLLTTy(*getMRI());
241 LLT Op0Ty
= Op0
.getLLTTy(*getMRI());
243 assert(ResTy
.isVector() && "Res non vector type");
245 SmallVector
<Register
, 8> Regs
;
246 if (Op0Ty
.isVector()) {
247 assert((ResTy
.getElementType() == Op0Ty
.getElementType()) &&
248 "Different vector element types");
249 assert((ResTy
.getNumElements() > Op0Ty
.getNumElements()) &&
250 "Op0 has more elements");
251 auto Unmerge
= buildUnmerge(Op0Ty
.getElementType(), Op0
);
253 for (auto Op
: Unmerge
.getInstr()->defs())
254 Regs
.push_back(Op
.getReg());
256 assert((ResTy
.getSizeInBits() > Op0Ty
.getSizeInBits()) &&
257 "Op0 has more size");
258 Regs
.push_back(Op0
.getReg());
261 buildUndef(Op0Ty
.isVector() ? Op0Ty
.getElementType() : Op0Ty
).getReg(0);
262 unsigned NumberOfPadElts
= ResTy
.getNumElements() - Regs
.size();
263 for (unsigned i
= 0; i
< NumberOfPadElts
; ++i
)
264 Regs
.push_back(Undef
);
265 return buildMergeLikeInstr(Res
, Regs
);
269 MachineIRBuilder::buildDeleteTrailingVectorElements(const DstOp
&Res
,
271 LLT ResTy
= Res
.getLLTTy(*getMRI());
272 LLT Op0Ty
= Op0
.getLLTTy(*getMRI());
274 assert(Op0Ty
.isVector() && "Non vector type");
275 assert(((ResTy
.isScalar() && (ResTy
== Op0Ty
.getElementType())) ||
277 (ResTy
.getElementType() == Op0Ty
.getElementType()))) &&
278 "Different vector element types");
280 (ResTy
.isScalar() || (ResTy
.getNumElements() < Op0Ty
.getNumElements())) &&
281 "Op0 has fewer elements");
283 auto Unmerge
= buildUnmerge(Op0Ty
.getElementType(), Op0
);
284 if (ResTy
.isScalar())
285 return buildCopy(Res
, Unmerge
.getReg(0));
286 SmallVector
<Register
, 8> Regs
;
287 for (unsigned i
= 0; i
< ResTy
.getNumElements(); ++i
)
288 Regs
.push_back(Unmerge
.getReg(i
));
289 return buildMergeLikeInstr(Res
, Regs
);
292 MachineInstrBuilder
MachineIRBuilder::buildBr(MachineBasicBlock
&Dest
) {
293 return buildInstr(TargetOpcode::G_BR
).addMBB(&Dest
);
296 MachineInstrBuilder
MachineIRBuilder::buildBrIndirect(Register Tgt
) {
297 assert(getMRI()->getType(Tgt
).isPointer() && "invalid branch destination");
298 return buildInstr(TargetOpcode::G_BRINDIRECT
).addUse(Tgt
);
301 MachineInstrBuilder
MachineIRBuilder::buildBrJT(Register TablePtr
,
304 assert(getMRI()->getType(TablePtr
).isPointer() &&
305 "Table reg must be a pointer");
306 return buildInstr(TargetOpcode::G_BRJT
)
308 .addJumpTableIndex(JTI
)
312 MachineInstrBuilder
MachineIRBuilder::buildCopy(const DstOp
&Res
,
314 return buildInstr(TargetOpcode::COPY
, Res
, Op
);
317 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
318 const ConstantInt
&Val
) {
319 LLT Ty
= Res
.getLLTTy(*getMRI());
320 LLT EltTy
= Ty
.getScalarType();
321 assert(EltTy
.getScalarSizeInBits() == Val
.getBitWidth() &&
322 "creating constant with the wrong size");
324 assert(!Ty
.isScalableVector() &&
325 "unexpected scalable vector in buildConstant");
327 if (Ty
.isFixedVector()) {
328 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
)
329 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
331 return buildSplatBuildVector(Res
, Const
);
334 auto Const
= buildInstr(TargetOpcode::G_CONSTANT
);
335 Const
->setDebugLoc(DebugLoc());
336 Res
.addDefToMIB(*getMRI(), Const
);
341 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
343 auto IntN
= IntegerType::get(getMF().getFunction().getContext(),
344 Res
.getLLTTy(*getMRI()).getScalarSizeInBits());
345 ConstantInt
*CI
= ConstantInt::get(IntN
, Val
, true);
346 return buildConstant(Res
, *CI
);
349 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
350 const ConstantFP
&Val
) {
351 LLT Ty
= Res
.getLLTTy(*getMRI());
352 LLT EltTy
= Ty
.getScalarType();
354 assert(APFloat::getSizeInBits(Val
.getValueAPF().getSemantics())
355 == EltTy
.getSizeInBits() &&
356 "creating fconstant with the wrong size");
358 assert(!Ty
.isPointer() && "invalid operand type");
360 assert(!Ty
.isScalableVector() &&
361 "unexpected scalable vector in buildFConstant");
363 if (Ty
.isFixedVector()) {
364 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
)
365 .addDef(getMRI()->createGenericVirtualRegister(EltTy
))
368 return buildSplatBuildVector(Res
, Const
);
371 auto Const
= buildInstr(TargetOpcode::G_FCONSTANT
);
372 Const
->setDebugLoc(DebugLoc());
373 Res
.addDefToMIB(*getMRI(), Const
);
374 Const
.addFPImm(&Val
);
378 MachineInstrBuilder
MachineIRBuilder::buildConstant(const DstOp
&Res
,
380 ConstantInt
*CI
= ConstantInt::get(getMF().getFunction().getContext(), Val
);
381 return buildConstant(Res
, *CI
);
384 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
386 LLT DstTy
= Res
.getLLTTy(*getMRI());
387 auto &Ctx
= getMF().getFunction().getContext();
389 ConstantFP::get(Ctx
, getAPFloatFromSize(Val
, DstTy
.getScalarSizeInBits()));
390 return buildFConstant(Res
, *CFP
);
393 MachineInstrBuilder
MachineIRBuilder::buildFConstant(const DstOp
&Res
,
394 const APFloat
&Val
) {
395 auto &Ctx
= getMF().getFunction().getContext();
396 auto *CFP
= ConstantFP::get(Ctx
, Val
);
397 return buildFConstant(Res
, *CFP
);
401 MachineIRBuilder::buildConstantPtrAuth(const DstOp
&Res
,
402 const ConstantPtrAuth
*CPA
,
403 Register Addr
, Register AddrDisc
) {
404 auto MIB
= buildInstr(TargetOpcode::G_PTRAUTH_GLOBAL_VALUE
);
405 Res
.addDefToMIB(*getMRI(), MIB
);
407 MIB
.addImm(CPA
->getKey()->getZExtValue());
408 MIB
.addUse(AddrDisc
);
409 MIB
.addImm(CPA
->getDiscriminator()->getZExtValue());
413 MachineInstrBuilder
MachineIRBuilder::buildBrCond(const SrcOp
&Tst
,
414 MachineBasicBlock
&Dest
) {
415 assert(Tst
.getLLTTy(*getMRI()).isScalar() && "invalid operand type");
417 auto MIB
= buildInstr(TargetOpcode::G_BRCOND
);
418 Tst
.addSrcToMIB(MIB
);
424 MachineIRBuilder::buildLoad(const DstOp
&Dst
, const SrcOp
&Addr
,
425 MachinePointerInfo PtrInfo
, Align Alignment
,
426 MachineMemOperand::Flags MMOFlags
,
427 const AAMDNodes
&AAInfo
) {
428 MMOFlags
|= MachineMemOperand::MOLoad
;
429 assert((MMOFlags
& MachineMemOperand::MOStore
) == 0);
431 LLT Ty
= Dst
.getLLTTy(*getMRI());
432 MachineMemOperand
*MMO
=
433 getMF().getMachineMemOperand(PtrInfo
, MMOFlags
, Ty
, Alignment
, AAInfo
);
434 return buildLoad(Dst
, Addr
, *MMO
);
437 MachineInstrBuilder
MachineIRBuilder::buildLoadInstr(unsigned Opcode
,
440 MachineMemOperand
&MMO
) {
441 assert(Res
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
442 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
444 auto MIB
= buildInstr(Opcode
);
445 Res
.addDefToMIB(*getMRI(), MIB
);
446 Addr
.addSrcToMIB(MIB
);
447 MIB
.addMemOperand(&MMO
);
451 MachineInstrBuilder
MachineIRBuilder::buildLoadFromOffset(
452 const DstOp
&Dst
, const SrcOp
&BasePtr
,
453 MachineMemOperand
&BaseMMO
, int64_t Offset
) {
454 LLT LoadTy
= Dst
.getLLTTy(*getMRI());
455 MachineMemOperand
*OffsetMMO
=
456 getMF().getMachineMemOperand(&BaseMMO
, Offset
, LoadTy
);
458 if (Offset
== 0) // This may be a size or type changing load.
459 return buildLoad(Dst
, BasePtr
, *OffsetMMO
);
461 LLT PtrTy
= BasePtr
.getLLTTy(*getMRI());
462 LLT OffsetTy
= LLT::scalar(PtrTy
.getSizeInBits());
463 auto ConstOffset
= buildConstant(OffsetTy
, Offset
);
464 auto Ptr
= buildPtrAdd(PtrTy
, BasePtr
, ConstOffset
);
465 return buildLoad(Dst
, Ptr
, *OffsetMMO
);
468 MachineInstrBuilder
MachineIRBuilder::buildStore(const SrcOp
&Val
,
470 MachineMemOperand
&MMO
) {
471 assert(Val
.getLLTTy(*getMRI()).isValid() && "invalid operand type");
472 assert(Addr
.getLLTTy(*getMRI()).isPointer() && "invalid operand type");
474 auto MIB
= buildInstr(TargetOpcode::G_STORE
);
475 Val
.addSrcToMIB(MIB
);
476 Addr
.addSrcToMIB(MIB
);
477 MIB
.addMemOperand(&MMO
);
482 MachineIRBuilder::buildStore(const SrcOp
&Val
, const SrcOp
&Addr
,
483 MachinePointerInfo PtrInfo
, Align Alignment
,
484 MachineMemOperand::Flags MMOFlags
,
485 const AAMDNodes
&AAInfo
) {
486 MMOFlags
|= MachineMemOperand::MOStore
;
487 assert((MMOFlags
& MachineMemOperand::MOLoad
) == 0);
489 LLT Ty
= Val
.getLLTTy(*getMRI());
490 MachineMemOperand
*MMO
=
491 getMF().getMachineMemOperand(PtrInfo
, MMOFlags
, Ty
, Alignment
, AAInfo
);
492 return buildStore(Val
, Addr
, *MMO
);
495 MachineInstrBuilder
MachineIRBuilder::buildAnyExt(const DstOp
&Res
,
497 return buildInstr(TargetOpcode::G_ANYEXT
, Res
, Op
);
500 MachineInstrBuilder
MachineIRBuilder::buildSExt(const DstOp
&Res
,
502 return buildInstr(TargetOpcode::G_SEXT
, Res
, Op
);
505 MachineInstrBuilder
MachineIRBuilder::buildZExt(const DstOp
&Res
,
507 std::optional
<unsigned> Flags
) {
508 return buildInstr(TargetOpcode::G_ZEXT
, Res
, Op
, Flags
);
511 unsigned MachineIRBuilder::getBoolExtOp(bool IsVec
, bool IsFP
) const {
512 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
513 switch (TLI
->getBooleanContents(IsVec
, IsFP
)) {
514 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
515 return TargetOpcode::G_SEXT
;
516 case TargetLoweringBase::ZeroOrOneBooleanContent
:
517 return TargetOpcode::G_ZEXT
;
519 return TargetOpcode::G_ANYEXT
;
523 MachineInstrBuilder
MachineIRBuilder::buildBoolExt(const DstOp
&Res
,
526 unsigned ExtOp
= getBoolExtOp(getMRI()->getType(Op
.getReg()).isVector(), IsFP
);
527 return buildInstr(ExtOp
, Res
, Op
);
530 MachineInstrBuilder
MachineIRBuilder::buildBoolExtInReg(const DstOp
&Res
,
534 const auto *TLI
= getMF().getSubtarget().getTargetLowering();
535 switch (TLI
->getBooleanContents(IsVector
, IsFP
)) {
536 case TargetLoweringBase::ZeroOrNegativeOneBooleanContent
:
537 return buildSExtInReg(Res
, Op
, 1);
538 case TargetLoweringBase::ZeroOrOneBooleanContent
:
539 return buildZExtInReg(Res
, Op
, 1);
540 case TargetLoweringBase::UndefinedBooleanContent
:
541 return buildCopy(Res
, Op
);
544 llvm_unreachable("unexpected BooleanContent");
547 MachineInstrBuilder
MachineIRBuilder::buildExtOrTrunc(unsigned ExtOpc
,
550 assert((TargetOpcode::G_ANYEXT
== ExtOpc
|| TargetOpcode::G_ZEXT
== ExtOpc
||
551 TargetOpcode::G_SEXT
== ExtOpc
) &&
552 "Expecting Extending Opc");
553 assert(Res
.getLLTTy(*getMRI()).isScalar() ||
554 Res
.getLLTTy(*getMRI()).isVector());
555 assert(Res
.getLLTTy(*getMRI()).isScalar() ==
556 Op
.getLLTTy(*getMRI()).isScalar());
558 unsigned Opcode
= TargetOpcode::COPY
;
559 if (Res
.getLLTTy(*getMRI()).getSizeInBits() >
560 Op
.getLLTTy(*getMRI()).getSizeInBits())
562 else if (Res
.getLLTTy(*getMRI()).getSizeInBits() <
563 Op
.getLLTTy(*getMRI()).getSizeInBits())
564 Opcode
= TargetOpcode::G_TRUNC
;
566 assert(Res
.getLLTTy(*getMRI()) == Op
.getLLTTy(*getMRI()));
568 return buildInstr(Opcode
, Res
, Op
);
571 MachineInstrBuilder
MachineIRBuilder::buildSExtOrTrunc(const DstOp
&Res
,
573 return buildExtOrTrunc(TargetOpcode::G_SEXT
, Res
, Op
);
576 MachineInstrBuilder
MachineIRBuilder::buildZExtOrTrunc(const DstOp
&Res
,
578 return buildExtOrTrunc(TargetOpcode::G_ZEXT
, Res
, Op
);
581 MachineInstrBuilder
MachineIRBuilder::buildAnyExtOrTrunc(const DstOp
&Res
,
583 return buildExtOrTrunc(TargetOpcode::G_ANYEXT
, Res
, Op
);
586 MachineInstrBuilder
MachineIRBuilder::buildZExtInReg(const DstOp
&Res
,
589 LLT ResTy
= Res
.getLLTTy(*getMRI());
590 auto Mask
= buildConstant(
591 ResTy
, APInt::getLowBitsSet(ResTy
.getScalarSizeInBits(), ImmOp
));
592 return buildAnd(Res
, Op
, Mask
);
595 MachineInstrBuilder
MachineIRBuilder::buildCast(const DstOp
&Dst
,
597 LLT SrcTy
= Src
.getLLTTy(*getMRI());
598 LLT DstTy
= Dst
.getLLTTy(*getMRI());
600 return buildCopy(Dst
, Src
);
603 if (SrcTy
.isPointerOrPointerVector())
604 Opcode
= TargetOpcode::G_PTRTOINT
;
605 else if (DstTy
.isPointerOrPointerVector())
606 Opcode
= TargetOpcode::G_INTTOPTR
;
608 assert(!SrcTy
.isPointerOrPointerVector() &&
609 !DstTy
.isPointerOrPointerVector() && "no G_ADDRCAST yet");
610 Opcode
= TargetOpcode::G_BITCAST
;
613 return buildInstr(Opcode
, Dst
, Src
);
616 MachineInstrBuilder
MachineIRBuilder::buildExtract(const DstOp
&Dst
,
619 LLT SrcTy
= Src
.getLLTTy(*getMRI());
620 LLT DstTy
= Dst
.getLLTTy(*getMRI());
623 assert(SrcTy
.isValid() && "invalid operand type");
624 assert(DstTy
.isValid() && "invalid operand type");
625 assert(Index
+ DstTy
.getSizeInBits() <= SrcTy
.getSizeInBits() &&
626 "extracting off end of register");
629 if (DstTy
.getSizeInBits() == SrcTy
.getSizeInBits()) {
630 assert(Index
== 0 && "insertion past the end of a register");
631 return buildCast(Dst
, Src
);
634 auto Extract
= buildInstr(TargetOpcode::G_EXTRACT
);
635 Dst
.addDefToMIB(*getMRI(), Extract
);
636 Src
.addSrcToMIB(Extract
);
637 Extract
.addImm(Index
);
641 MachineInstrBuilder
MachineIRBuilder::buildUndef(const DstOp
&Res
) {
642 return buildInstr(TargetOpcode::G_IMPLICIT_DEF
, {Res
}, {});
645 MachineInstrBuilder
MachineIRBuilder::buildMergeValues(const DstOp
&Res
,
646 ArrayRef
<Register
> Ops
) {
647 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
648 // we need some temporary storage for the DstOp objects. Here we use a
649 // sufficiently large SmallVector to not go through the heap.
650 SmallVector
<SrcOp
, 8> TmpVec(Ops
);
651 assert(TmpVec
.size() > 1);
652 return buildInstr(TargetOpcode::G_MERGE_VALUES
, Res
, TmpVec
);
656 MachineIRBuilder::buildMergeLikeInstr(const DstOp
&Res
,
657 ArrayRef
<Register
> Ops
) {
658 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<SrcOp>,
659 // we need some temporary storage for the DstOp objects. Here we use a
660 // sufficiently large SmallVector to not go through the heap.
661 SmallVector
<SrcOp
, 8> TmpVec(Ops
);
662 assert(TmpVec
.size() > 1);
663 return buildInstr(getOpcodeForMerge(Res
, TmpVec
), Res
, TmpVec
);
667 MachineIRBuilder::buildMergeLikeInstr(const DstOp
&Res
,
668 std::initializer_list
<SrcOp
> Ops
) {
669 assert(Ops
.size() > 1);
670 return buildInstr(getOpcodeForMerge(Res
, Ops
), Res
, Ops
);
673 unsigned MachineIRBuilder::getOpcodeForMerge(const DstOp
&DstOp
,
674 ArrayRef
<SrcOp
> SrcOps
) const {
675 if (DstOp
.getLLTTy(*getMRI()).isVector()) {
676 if (SrcOps
[0].getLLTTy(*getMRI()).isVector())
677 return TargetOpcode::G_CONCAT_VECTORS
;
678 return TargetOpcode::G_BUILD_VECTOR
;
681 return TargetOpcode::G_MERGE_VALUES
;
684 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<LLT
> Res
,
686 // Unfortunately to convert from ArrayRef<LLT> to ArrayRef<DstOp>,
687 // we need some temporary storage for the DstOp objects. Here we use a
688 // sufficiently large SmallVector to not go through the heap.
689 SmallVector
<DstOp
, 8> TmpVec(Res
);
690 assert(TmpVec
.size() > 1);
691 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
694 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(LLT Res
,
696 unsigned NumReg
= Op
.getLLTTy(*getMRI()).getSizeInBits() / Res
.getSizeInBits();
697 SmallVector
<DstOp
, 8> TmpVec(NumReg
, Res
);
698 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
701 MachineInstrBuilder
MachineIRBuilder::buildUnmerge(ArrayRef
<Register
> Res
,
703 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<DstOp>,
704 // we need some temporary storage for the DstOp objects. Here we use a
705 // sufficiently large SmallVector to not go through the heap.
706 SmallVector
<DstOp
, 8> TmpVec(Res
);
707 assert(TmpVec
.size() > 1);
708 return buildInstr(TargetOpcode::G_UNMERGE_VALUES
, TmpVec
, Op
);
711 MachineInstrBuilder
MachineIRBuilder::buildBuildVector(const DstOp
&Res
,
712 ArrayRef
<Register
> Ops
) {
713 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
714 // we need some temporary storage for the DstOp objects. Here we use a
715 // sufficiently large SmallVector to not go through the heap.
716 SmallVector
<SrcOp
, 8> TmpVec(Ops
);
717 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
721 MachineIRBuilder::buildBuildVectorConstant(const DstOp
&Res
,
722 ArrayRef
<APInt
> Ops
) {
723 SmallVector
<SrcOp
> TmpVec
;
724 TmpVec
.reserve(Ops
.size());
725 LLT EltTy
= Res
.getLLTTy(*getMRI()).getElementType();
726 for (const auto &Op
: Ops
)
727 TmpVec
.push_back(buildConstant(EltTy
, Op
));
728 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
731 MachineInstrBuilder
MachineIRBuilder::buildSplatBuildVector(const DstOp
&Res
,
733 SmallVector
<SrcOp
, 8> TmpVec(Res
.getLLTTy(*getMRI()).getNumElements(), Src
);
734 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
738 MachineIRBuilder::buildBuildVectorTrunc(const DstOp
&Res
,
739 ArrayRef
<Register
> Ops
) {
740 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
741 // we need some temporary storage for the DstOp objects. Here we use a
742 // sufficiently large SmallVector to not go through the heap.
743 SmallVector
<SrcOp
, 8> TmpVec(Ops
);
744 if (TmpVec
[0].getLLTTy(*getMRI()).getSizeInBits() ==
745 Res
.getLLTTy(*getMRI()).getElementType().getSizeInBits())
746 return buildInstr(TargetOpcode::G_BUILD_VECTOR
, Res
, TmpVec
);
747 return buildInstr(TargetOpcode::G_BUILD_VECTOR_TRUNC
, Res
, TmpVec
);
750 MachineInstrBuilder
MachineIRBuilder::buildShuffleSplat(const DstOp
&Res
,
752 LLT DstTy
= Res
.getLLTTy(*getMRI());
753 assert(Src
.getLLTTy(*getMRI()) == DstTy
.getElementType() &&
754 "Expected Src to match Dst elt ty");
755 auto UndefVec
= buildUndef(DstTy
);
756 auto Zero
= buildConstant(LLT::scalar(64), 0);
757 auto InsElt
= buildInsertVectorElement(DstTy
, UndefVec
, Src
, Zero
);
758 SmallVector
<int, 16> ZeroMask(DstTy
.getNumElements());
759 return buildShuffleVector(DstTy
, InsElt
, UndefVec
, ZeroMask
);
762 MachineInstrBuilder
MachineIRBuilder::buildSplatVector(const DstOp
&Res
,
764 assert(Src
.getLLTTy(*getMRI()) == Res
.getLLTTy(*getMRI()).getElementType() &&
765 "Expected Src to match Dst elt ty");
766 return buildInstr(TargetOpcode::G_SPLAT_VECTOR
, Res
, Src
);
769 MachineInstrBuilder
MachineIRBuilder::buildShuffleVector(const DstOp
&Res
,
772 ArrayRef
<int> Mask
) {
773 LLT DstTy
= Res
.getLLTTy(*getMRI());
774 LLT Src1Ty
= Src1
.getLLTTy(*getMRI());
775 LLT Src2Ty
= Src2
.getLLTTy(*getMRI());
776 const LLT DstElemTy
= DstTy
.isVector() ? DstTy
.getElementType() : DstTy
;
777 const LLT ElemTy1
= Src1Ty
.isVector() ? Src1Ty
.getElementType() : Src1Ty
;
778 const LLT ElemTy2
= Src2Ty
.isVector() ? Src2Ty
.getElementType() : Src2Ty
;
779 assert(DstElemTy
== ElemTy1
&& DstElemTy
== ElemTy2
);
783 ArrayRef
<int> MaskAlloc
= getMF().allocateShuffleMask(Mask
);
784 return buildInstr(TargetOpcode::G_SHUFFLE_VECTOR
, {Res
}, {Src1
, Src2
})
785 .addShuffleMask(MaskAlloc
);
789 MachineIRBuilder::buildConcatVectors(const DstOp
&Res
, ArrayRef
<Register
> Ops
) {
790 // Unfortunately to convert from ArrayRef<Register> to ArrayRef<SrcOp>,
791 // we need some temporary storage for the DstOp objects. Here we use a
792 // sufficiently large SmallVector to not go through the heap.
793 SmallVector
<SrcOp
, 8> TmpVec(Ops
);
794 return buildInstr(TargetOpcode::G_CONCAT_VECTORS
, Res
, TmpVec
);
797 MachineInstrBuilder
MachineIRBuilder::buildInsert(const DstOp
&Res
,
801 assert(Index
+ Op
.getLLTTy(*getMRI()).getSizeInBits() <=
802 Res
.getLLTTy(*getMRI()).getSizeInBits() &&
803 "insertion past the end of a register");
805 if (Res
.getLLTTy(*getMRI()).getSizeInBits() ==
806 Op
.getLLTTy(*getMRI()).getSizeInBits()) {
807 return buildCast(Res
, Op
);
810 return buildInstr(TargetOpcode::G_INSERT
, Res
, {Src
, Op
, uint64_t(Index
)});
813 MachineInstrBuilder
MachineIRBuilder::buildStepVector(const DstOp
&Res
,
815 unsigned Bitwidth
= Res
.getLLTTy(*getMRI()).getElementType().getSizeInBits();
816 ConstantInt
*CI
= ConstantInt::get(getMF().getFunction().getContext(),
817 APInt(Bitwidth
, Step
));
818 auto StepVector
= buildInstr(TargetOpcode::G_STEP_VECTOR
);
819 StepVector
->setDebugLoc(DebugLoc());
820 Res
.addDefToMIB(*getMRI(), StepVector
);
821 StepVector
.addCImm(CI
);
825 MachineInstrBuilder
MachineIRBuilder::buildVScale(const DstOp
&Res
,
828 auto IntN
= IntegerType::get(getMF().getFunction().getContext(),
829 Res
.getLLTTy(*getMRI()).getScalarSizeInBits());
830 ConstantInt
*CI
= ConstantInt::get(IntN
, MinElts
);
831 return buildVScale(Res
, *CI
);
834 MachineInstrBuilder
MachineIRBuilder::buildVScale(const DstOp
&Res
,
835 const ConstantInt
&MinElts
) {
836 auto VScale
= buildInstr(TargetOpcode::G_VSCALE
);
837 VScale
->setDebugLoc(DebugLoc());
838 Res
.addDefToMIB(*getMRI(), VScale
);
839 VScale
.addCImm(&MinElts
);
843 MachineInstrBuilder
MachineIRBuilder::buildVScale(const DstOp
&Res
,
844 const APInt
&MinElts
) {
846 ConstantInt::get(getMF().getFunction().getContext(), MinElts
);
847 return buildVScale(Res
, *CI
);
850 static unsigned getIntrinsicOpcode(bool HasSideEffects
, bool IsConvergent
) {
851 if (HasSideEffects
&& IsConvergent
)
852 return TargetOpcode::G_INTRINSIC_CONVERGENT_W_SIDE_EFFECTS
;
854 return TargetOpcode::G_INTRINSIC_W_SIDE_EFFECTS
;
856 return TargetOpcode::G_INTRINSIC_CONVERGENT
;
857 return TargetOpcode::G_INTRINSIC
;
861 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
862 ArrayRef
<Register
> ResultRegs
,
863 bool HasSideEffects
, bool isConvergent
) {
864 auto MIB
= buildInstr(getIntrinsicOpcode(HasSideEffects
, isConvergent
));
865 for (unsigned ResultReg
: ResultRegs
)
866 MIB
.addDef(ResultReg
);
867 MIB
.addIntrinsicID(ID
);
872 MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
873 ArrayRef
<Register
> ResultRegs
) {
874 auto Attrs
= Intrinsic::getAttributes(getContext(), ID
);
875 bool HasSideEffects
= !Attrs
.getMemoryEffects().doesNotAccessMemory();
876 bool isConvergent
= Attrs
.hasFnAttr(Attribute::Convergent
);
877 return buildIntrinsic(ID
, ResultRegs
, HasSideEffects
, isConvergent
);
880 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
881 ArrayRef
<DstOp
> Results
,
884 auto MIB
= buildInstr(getIntrinsicOpcode(HasSideEffects
, isConvergent
));
885 for (DstOp Result
: Results
)
886 Result
.addDefToMIB(*getMRI(), MIB
);
887 MIB
.addIntrinsicID(ID
);
891 MachineInstrBuilder
MachineIRBuilder::buildIntrinsic(Intrinsic::ID ID
,
892 ArrayRef
<DstOp
> Results
) {
893 auto Attrs
= Intrinsic::getAttributes(getContext(), ID
);
894 bool HasSideEffects
= !Attrs
.getMemoryEffects().doesNotAccessMemory();
895 bool isConvergent
= Attrs
.hasFnAttr(Attribute::Convergent
);
896 return buildIntrinsic(ID
, Results
, HasSideEffects
, isConvergent
);
900 MachineIRBuilder::buildTrunc(const DstOp
&Res
, const SrcOp
&Op
,
901 std::optional
<unsigned> Flags
) {
902 return buildInstr(TargetOpcode::G_TRUNC
, Res
, Op
, Flags
);
906 MachineIRBuilder::buildFPTrunc(const DstOp
&Res
, const SrcOp
&Op
,
907 std::optional
<unsigned> Flags
) {
908 return buildInstr(TargetOpcode::G_FPTRUNC
, Res
, Op
, Flags
);
911 MachineInstrBuilder
MachineIRBuilder::buildICmp(CmpInst::Predicate Pred
,
915 std::optional
<unsigned> Flags
) {
916 return buildInstr(TargetOpcode::G_ICMP
, Res
, {Pred
, Op0
, Op1
}, Flags
);
919 MachineInstrBuilder
MachineIRBuilder::buildFCmp(CmpInst::Predicate Pred
,
923 std::optional
<unsigned> Flags
) {
925 return buildInstr(TargetOpcode::G_FCMP
, Res
, {Pred
, Op0
, Op1
}, Flags
);
928 MachineInstrBuilder
MachineIRBuilder::buildSCmp(const DstOp
&Res
,
931 return buildInstr(TargetOpcode::G_SCMP
, Res
, {Op0
, Op1
});
934 MachineInstrBuilder
MachineIRBuilder::buildUCmp(const DstOp
&Res
,
937 return buildInstr(TargetOpcode::G_UCMP
, Res
, {Op0
, Op1
});
941 MachineIRBuilder::buildSelect(const DstOp
&Res
, const SrcOp
&Tst
,
942 const SrcOp
&Op0
, const SrcOp
&Op1
,
943 std::optional
<unsigned> Flags
) {
945 return buildInstr(TargetOpcode::G_SELECT
, {Res
}, {Tst
, Op0
, Op1
}, Flags
);
948 MachineInstrBuilder
MachineIRBuilder::buildInsertSubvector(const DstOp
&Res
,
952 return buildInstr(TargetOpcode::G_INSERT_SUBVECTOR
, Res
,
953 {Src0
, Src1
, uint64_t(Idx
)});
956 MachineInstrBuilder
MachineIRBuilder::buildExtractSubvector(const DstOp
&Res
,
959 return buildInstr(TargetOpcode::G_EXTRACT_SUBVECTOR
, Res
,
960 {Src
, uint64_t(Idx
)});
964 MachineIRBuilder::buildInsertVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
965 const SrcOp
&Elt
, const SrcOp
&Idx
) {
966 return buildInstr(TargetOpcode::G_INSERT_VECTOR_ELT
, Res
, {Val
, Elt
, Idx
});
970 MachineIRBuilder::buildExtractVectorElement(const DstOp
&Res
, const SrcOp
&Val
,
972 return buildInstr(TargetOpcode::G_EXTRACT_VECTOR_ELT
, Res
, {Val
, Idx
});
975 MachineInstrBuilder
MachineIRBuilder::buildAtomicCmpXchgWithSuccess(
976 const DstOp
&OldValRes
, const DstOp
&SuccessRes
, const SrcOp
&Addr
,
977 const SrcOp
&CmpVal
, const SrcOp
&NewVal
, MachineMemOperand
&MMO
) {
979 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
980 LLT SuccessResTy
= SuccessRes
.getLLTTy(*getMRI());
981 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
982 LLT CmpValTy
= CmpVal
.getLLTTy(*getMRI());
983 LLT NewValTy
= NewVal
.getLLTTy(*getMRI());
984 assert(OldValResTy
.isScalar() && "invalid operand type");
985 assert(SuccessResTy
.isScalar() && "invalid operand type");
986 assert(AddrTy
.isPointer() && "invalid operand type");
987 assert(CmpValTy
.isValid() && "invalid operand type");
988 assert(NewValTy
.isValid() && "invalid operand type");
989 assert(OldValResTy
== CmpValTy
&& "type mismatch");
990 assert(OldValResTy
== NewValTy
&& "type mismatch");
993 auto MIB
= buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG_WITH_SUCCESS
);
994 OldValRes
.addDefToMIB(*getMRI(), MIB
);
995 SuccessRes
.addDefToMIB(*getMRI(), MIB
);
996 Addr
.addSrcToMIB(MIB
);
997 CmpVal
.addSrcToMIB(MIB
);
998 NewVal
.addSrcToMIB(MIB
);
999 MIB
.addMemOperand(&MMO
);
1004 MachineIRBuilder::buildAtomicCmpXchg(const DstOp
&OldValRes
, const SrcOp
&Addr
,
1005 const SrcOp
&CmpVal
, const SrcOp
&NewVal
,
1006 MachineMemOperand
&MMO
) {
1008 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
1009 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
1010 LLT CmpValTy
= CmpVal
.getLLTTy(*getMRI());
1011 LLT NewValTy
= NewVal
.getLLTTy(*getMRI());
1012 assert(OldValResTy
.isScalar() && "invalid operand type");
1013 assert(AddrTy
.isPointer() && "invalid operand type");
1014 assert(CmpValTy
.isValid() && "invalid operand type");
1015 assert(NewValTy
.isValid() && "invalid operand type");
1016 assert(OldValResTy
== CmpValTy
&& "type mismatch");
1017 assert(OldValResTy
== NewValTy
&& "type mismatch");
1020 auto MIB
= buildInstr(TargetOpcode::G_ATOMIC_CMPXCHG
);
1021 OldValRes
.addDefToMIB(*getMRI(), MIB
);
1022 Addr
.addSrcToMIB(MIB
);
1023 CmpVal
.addSrcToMIB(MIB
);
1024 NewVal
.addSrcToMIB(MIB
);
1025 MIB
.addMemOperand(&MMO
);
1029 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMW(
1030 unsigned Opcode
, const DstOp
&OldValRes
,
1031 const SrcOp
&Addr
, const SrcOp
&Val
,
1032 MachineMemOperand
&MMO
) {
1035 LLT OldValResTy
= OldValRes
.getLLTTy(*getMRI());
1036 LLT AddrTy
= Addr
.getLLTTy(*getMRI());
1037 LLT ValTy
= Val
.getLLTTy(*getMRI());
1038 assert(AddrTy
.isPointer() && "invalid operand type");
1039 assert(ValTy
.isValid() && "invalid operand type");
1040 assert(OldValResTy
== ValTy
&& "type mismatch");
1041 assert(MMO
.isAtomic() && "not atomic mem operand");
1044 auto MIB
= buildInstr(Opcode
);
1045 OldValRes
.addDefToMIB(*getMRI(), MIB
);
1046 Addr
.addSrcToMIB(MIB
);
1047 Val
.addSrcToMIB(MIB
);
1048 MIB
.addMemOperand(&MMO
);
1053 MachineIRBuilder::buildAtomicRMWXchg(Register OldValRes
, Register Addr
,
1054 Register Val
, MachineMemOperand
&MMO
) {
1055 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XCHG
, OldValRes
, Addr
, Val
,
1059 MachineIRBuilder::buildAtomicRMWAdd(Register OldValRes
, Register Addr
,
1060 Register Val
, MachineMemOperand
&MMO
) {
1061 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_ADD
, OldValRes
, Addr
, Val
,
1065 MachineIRBuilder::buildAtomicRMWSub(Register OldValRes
, Register Addr
,
1066 Register Val
, MachineMemOperand
&MMO
) {
1067 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_SUB
, OldValRes
, Addr
, Val
,
1071 MachineIRBuilder::buildAtomicRMWAnd(Register OldValRes
, Register Addr
,
1072 Register Val
, MachineMemOperand
&MMO
) {
1073 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_AND
, OldValRes
, Addr
, Val
,
1077 MachineIRBuilder::buildAtomicRMWNand(Register OldValRes
, Register Addr
,
1078 Register Val
, MachineMemOperand
&MMO
) {
1079 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_NAND
, OldValRes
, Addr
, Val
,
1082 MachineInstrBuilder
MachineIRBuilder::buildAtomicRMWOr(Register OldValRes
,
1085 MachineMemOperand
&MMO
) {
1086 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_OR
, OldValRes
, Addr
, Val
,
1090 MachineIRBuilder::buildAtomicRMWXor(Register OldValRes
, Register Addr
,
1091 Register Val
, MachineMemOperand
&MMO
) {
1092 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_XOR
, OldValRes
, Addr
, Val
,
1096 MachineIRBuilder::buildAtomicRMWMax(Register OldValRes
, Register Addr
,
1097 Register Val
, MachineMemOperand
&MMO
) {
1098 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MAX
, OldValRes
, Addr
, Val
,
1102 MachineIRBuilder::buildAtomicRMWMin(Register OldValRes
, Register Addr
,
1103 Register Val
, MachineMemOperand
&MMO
) {
1104 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_MIN
, OldValRes
, Addr
, Val
,
1108 MachineIRBuilder::buildAtomicRMWUmax(Register OldValRes
, Register Addr
,
1109 Register Val
, MachineMemOperand
&MMO
) {
1110 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMAX
, OldValRes
, Addr
, Val
,
1114 MachineIRBuilder::buildAtomicRMWUmin(Register OldValRes
, Register Addr
,
1115 Register Val
, MachineMemOperand
&MMO
) {
1116 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_UMIN
, OldValRes
, Addr
, Val
,
1121 MachineIRBuilder::buildAtomicRMWFAdd(
1122 const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
1123 MachineMemOperand
&MMO
) {
1124 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FADD
, OldValRes
, Addr
, Val
,
1129 MachineIRBuilder::buildAtomicRMWFSub(const DstOp
&OldValRes
, const SrcOp
&Addr
, const SrcOp
&Val
,
1130 MachineMemOperand
&MMO
) {
1131 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FSUB
, OldValRes
, Addr
, Val
,
1136 MachineIRBuilder::buildAtomicRMWFMax(const DstOp
&OldValRes
, const SrcOp
&Addr
,
1137 const SrcOp
&Val
, MachineMemOperand
&MMO
) {
1138 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMAX
, OldValRes
, Addr
, Val
,
1143 MachineIRBuilder::buildAtomicRMWFMin(const DstOp
&OldValRes
, const SrcOp
&Addr
,
1144 const SrcOp
&Val
, MachineMemOperand
&MMO
) {
1145 return buildAtomicRMW(TargetOpcode::G_ATOMICRMW_FMIN
, OldValRes
, Addr
, Val
,
1150 MachineIRBuilder::buildFence(unsigned Ordering
, unsigned Scope
) {
1151 return buildInstr(TargetOpcode::G_FENCE
)
1156 MachineInstrBuilder
MachineIRBuilder::buildPrefetch(const SrcOp
&Addr
,
1160 MachineMemOperand
&MMO
) {
1161 auto MIB
= buildInstr(TargetOpcode::G_PREFETCH
);
1162 Addr
.addSrcToMIB(MIB
);
1163 MIB
.addImm(RW
).addImm(Locality
).addImm(CacheType
);
1164 MIB
.addMemOperand(&MMO
);
1169 MachineIRBuilder::buildBlockAddress(Register Res
, const BlockAddress
*BA
) {
1171 assert(getMRI()->getType(Res
).isPointer() && "invalid res type");
1174 return buildInstr(TargetOpcode::G_BLOCK_ADDR
).addDef(Res
).addBlockAddress(BA
);
1177 void MachineIRBuilder::validateTruncExt(const LLT DstTy
, const LLT SrcTy
,
1180 if (DstTy
.isVector()) {
1181 assert(SrcTy
.isVector() && "mismatched cast between vector and non-vector");
1182 assert(SrcTy
.getElementCount() == DstTy
.getElementCount() &&
1183 "different number of elements in a trunc/ext");
1185 assert(DstTy
.isScalar() && SrcTy
.isScalar() && "invalid extend/trunc");
1188 assert(TypeSize::isKnownGT(DstTy
.getSizeInBits(), SrcTy
.getSizeInBits()) &&
1189 "invalid narrowing extend");
1191 assert(TypeSize::isKnownLT(DstTy
.getSizeInBits(), SrcTy
.getSizeInBits()) &&
1192 "invalid widening trunc");
1196 void MachineIRBuilder::validateSelectOp(const LLT ResTy
, const LLT TstTy
,
1197 const LLT Op0Ty
, const LLT Op1Ty
) {
1199 assert((ResTy
.isScalar() || ResTy
.isVector() || ResTy
.isPointer()) &&
1200 "invalid operand type");
1201 assert((ResTy
== Op0Ty
&& ResTy
== Op1Ty
) && "type mismatch");
1202 if (ResTy
.isScalar() || ResTy
.isPointer())
1203 assert(TstTy
.isScalar() && "type mismatch");
1205 assert((TstTy
.isScalar() ||
1206 (TstTy
.isVector() &&
1207 TstTy
.getElementCount() == Op0Ty
.getElementCount())) &&
1213 MachineIRBuilder::buildInstr(unsigned Opc
, ArrayRef
<DstOp
> DstOps
,
1214 ArrayRef
<SrcOp
> SrcOps
,
1215 std::optional
<unsigned> Flags
) {
1219 case TargetOpcode::G_SELECT
: {
1220 assert(DstOps
.size() == 1 && "Invalid select");
1221 assert(SrcOps
.size() == 3 && "Invalid select");
1223 DstOps
[0].getLLTTy(*getMRI()), SrcOps
[0].getLLTTy(*getMRI()),
1224 SrcOps
[1].getLLTTy(*getMRI()), SrcOps
[2].getLLTTy(*getMRI()));
1227 case TargetOpcode::G_FNEG
:
1228 case TargetOpcode::G_ABS
:
1229 // All these are unary ops.
1230 assert(DstOps
.size() == 1 && "Invalid Dst");
1231 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1232 validateUnaryOp(DstOps
[0].getLLTTy(*getMRI()),
1233 SrcOps
[0].getLLTTy(*getMRI()));
1235 case TargetOpcode::G_ADD
:
1236 case TargetOpcode::G_AND
:
1237 case TargetOpcode::G_MUL
:
1238 case TargetOpcode::G_OR
:
1239 case TargetOpcode::G_SUB
:
1240 case TargetOpcode::G_XOR
:
1241 case TargetOpcode::G_UDIV
:
1242 case TargetOpcode::G_SDIV
:
1243 case TargetOpcode::G_UREM
:
1244 case TargetOpcode::G_SREM
:
1245 case TargetOpcode::G_SMIN
:
1246 case TargetOpcode::G_SMAX
:
1247 case TargetOpcode::G_UMIN
:
1248 case TargetOpcode::G_UMAX
:
1249 case TargetOpcode::G_UADDSAT
:
1250 case TargetOpcode::G_SADDSAT
:
1251 case TargetOpcode::G_USUBSAT
:
1252 case TargetOpcode::G_SSUBSAT
: {
1253 // All these are binary ops.
1254 assert(DstOps
.size() == 1 && "Invalid Dst");
1255 assert(SrcOps
.size() == 2 && "Invalid Srcs");
1256 validateBinaryOp(DstOps
[0].getLLTTy(*getMRI()),
1257 SrcOps
[0].getLLTTy(*getMRI()),
1258 SrcOps
[1].getLLTTy(*getMRI()));
1261 case TargetOpcode::G_SHL
:
1262 case TargetOpcode::G_ASHR
:
1263 case TargetOpcode::G_LSHR
:
1264 case TargetOpcode::G_USHLSAT
:
1265 case TargetOpcode::G_SSHLSAT
: {
1266 assert(DstOps
.size() == 1 && "Invalid Dst");
1267 assert(SrcOps
.size() == 2 && "Invalid Srcs");
1268 validateShiftOp(DstOps
[0].getLLTTy(*getMRI()),
1269 SrcOps
[0].getLLTTy(*getMRI()),
1270 SrcOps
[1].getLLTTy(*getMRI()));
1273 case TargetOpcode::G_SEXT
:
1274 case TargetOpcode::G_ZEXT
:
1275 case TargetOpcode::G_ANYEXT
:
1276 assert(DstOps
.size() == 1 && "Invalid Dst");
1277 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1278 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1279 SrcOps
[0].getLLTTy(*getMRI()), true);
1281 case TargetOpcode::G_TRUNC
:
1282 case TargetOpcode::G_FPTRUNC
: {
1283 assert(DstOps
.size() == 1 && "Invalid Dst");
1284 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1285 validateTruncExt(DstOps
[0].getLLTTy(*getMRI()),
1286 SrcOps
[0].getLLTTy(*getMRI()), false);
1289 case TargetOpcode::G_BITCAST
: {
1290 assert(DstOps
.size() == 1 && "Invalid Dst");
1291 assert(SrcOps
.size() == 1 && "Invalid Srcs");
1292 assert(DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1293 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() && "invalid bitcast");
1296 case TargetOpcode::COPY
:
1297 assert(DstOps
.size() == 1 && "Invalid Dst");
1298 // If the caller wants to add a subreg source it has to be done separately
1299 // so we may not have any SrcOps at this point yet.
1301 case TargetOpcode::G_FCMP
:
1302 case TargetOpcode::G_ICMP
: {
1303 assert(DstOps
.size() == 1 && "Invalid Dst Operands");
1304 assert(SrcOps
.size() == 3 && "Invalid Src Operands");
1305 // For F/ICMP, the first src operand is the predicate, followed by
1306 // the two comparands.
1307 assert(SrcOps
[0].getSrcOpKind() == SrcOp::SrcType::Ty_Predicate
&&
1308 "Expecting predicate");
1309 assert([&]() -> bool {
1310 CmpInst::Predicate Pred
= SrcOps
[0].getPredicate();
1311 return Opc
== TargetOpcode::G_ICMP
? CmpInst::isIntPredicate(Pred
)
1312 : CmpInst::isFPPredicate(Pred
);
1313 }() && "Invalid predicate");
1314 assert(SrcOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1316 assert([&]() -> bool {
1317 LLT Op0Ty
= SrcOps
[1].getLLTTy(*getMRI());
1318 LLT DstTy
= DstOps
[0].getLLTTy(*getMRI());
1319 if (Op0Ty
.isScalar() || Op0Ty
.isPointer())
1320 return DstTy
.isScalar();
1322 return DstTy
.isVector() &&
1323 DstTy
.getElementCount() == Op0Ty
.getElementCount();
1324 }() && "Type Mismatch");
1327 case TargetOpcode::G_UNMERGE_VALUES
: {
1328 assert(!DstOps
.empty() && "Invalid trivial sequence");
1329 assert(SrcOps
.size() == 1 && "Invalid src for Unmerge");
1330 assert(llvm::all_of(DstOps
,
1331 [&, this](const DstOp
&Op
) {
1332 return Op
.getLLTTy(*getMRI()) ==
1333 DstOps
[0].getLLTTy(*getMRI());
1335 "type mismatch in output list");
1336 assert((TypeSize::ScalarTy
)DstOps
.size() *
1337 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1338 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1339 "input operands do not cover output register");
1342 case TargetOpcode::G_MERGE_VALUES
: {
1343 assert(SrcOps
.size() >= 2 && "invalid trivial sequence");
1344 assert(DstOps
.size() == 1 && "Invalid Dst");
1345 assert(llvm::all_of(SrcOps
,
1346 [&, this](const SrcOp
&Op
) {
1347 return Op
.getLLTTy(*getMRI()) ==
1348 SrcOps
[0].getLLTTy(*getMRI());
1350 "type mismatch in input list");
1351 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1352 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1353 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1354 "input operands do not cover output register");
1355 assert(!DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1356 "vectors should be built with G_CONCAT_VECTOR or G_BUILD_VECTOR");
1359 case TargetOpcode::G_EXTRACT_VECTOR_ELT
: {
1360 assert(DstOps
.size() == 1 && "Invalid Dst size");
1361 assert(SrcOps
.size() == 2 && "Invalid Src size");
1362 assert(SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1363 assert((DstOps
[0].getLLTTy(*getMRI()).isScalar() ||
1364 DstOps
[0].getLLTTy(*getMRI()).isPointer()) &&
1365 "Invalid operand type");
1366 assert(SrcOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand type");
1367 assert(SrcOps
[0].getLLTTy(*getMRI()).getElementType() ==
1368 DstOps
[0].getLLTTy(*getMRI()) &&
1372 case TargetOpcode::G_INSERT_VECTOR_ELT
: {
1373 assert(DstOps
.size() == 1 && "Invalid dst size");
1374 assert(SrcOps
.size() == 3 && "Invalid src size");
1375 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1376 SrcOps
[0].getLLTTy(*getMRI()).isVector() && "Invalid operand type");
1377 assert(DstOps
[0].getLLTTy(*getMRI()).getElementType() ==
1378 SrcOps
[1].getLLTTy(*getMRI()) &&
1380 assert(SrcOps
[2].getLLTTy(*getMRI()).isScalar() && "Invalid index");
1381 assert(DstOps
[0].getLLTTy(*getMRI()).getElementCount() ==
1382 SrcOps
[0].getLLTTy(*getMRI()).getElementCount() &&
1386 case TargetOpcode::G_BUILD_VECTOR
: {
1387 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1388 "Must have at least 2 operands");
1389 assert(DstOps
.size() == 1 && "Invalid DstOps");
1390 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1391 "Res type must be a vector");
1392 assert(llvm::all_of(SrcOps
,
1393 [&, this](const SrcOp
&Op
) {
1394 return Op
.getLLTTy(*getMRI()) ==
1395 SrcOps
[0].getLLTTy(*getMRI());
1397 "type mismatch in input list");
1398 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1399 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1400 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1401 "input scalars do not exactly cover the output vector register");
1404 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
1405 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1406 "Must have at least 2 operands");
1407 assert(DstOps
.size() == 1 && "Invalid DstOps");
1408 assert(DstOps
[0].getLLTTy(*getMRI()).isVector() &&
1409 "Res type must be a vector");
1410 assert(llvm::all_of(SrcOps
,
1411 [&, this](const SrcOp
&Op
) {
1412 return Op
.getLLTTy(*getMRI()) ==
1413 SrcOps
[0].getLLTTy(*getMRI());
1415 "type mismatch in input list");
1418 case TargetOpcode::G_CONCAT_VECTORS
: {
1419 assert(DstOps
.size() == 1 && "Invalid DstOps");
1420 assert((!SrcOps
.empty() || SrcOps
.size() < 2) &&
1421 "Must have at least 2 operands");
1422 assert(llvm::all_of(SrcOps
,
1423 [&, this](const SrcOp
&Op
) {
1424 return (Op
.getLLTTy(*getMRI()).isVector() &&
1425 Op
.getLLTTy(*getMRI()) ==
1426 SrcOps
[0].getLLTTy(*getMRI()));
1428 "type mismatch in input list");
1429 assert((TypeSize::ScalarTy
)SrcOps
.size() *
1430 SrcOps
[0].getLLTTy(*getMRI()).getSizeInBits() ==
1431 DstOps
[0].getLLTTy(*getMRI()).getSizeInBits() &&
1432 "input vectors do not exactly cover the output vector register");
1435 case TargetOpcode::G_UADDE
: {
1436 assert(DstOps
.size() == 2 && "Invalid no of dst operands");
1437 assert(SrcOps
.size() == 3 && "Invalid no of src operands");
1438 assert(DstOps
[0].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1439 assert((DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[0].getLLTTy(*getMRI())) &&
1440 (DstOps
[0].getLLTTy(*getMRI()) == SrcOps
[1].getLLTTy(*getMRI())) &&
1442 assert(DstOps
[1].getLLTTy(*getMRI()).isScalar() && "Invalid operand");
1443 assert(DstOps
[1].getLLTTy(*getMRI()) == SrcOps
[2].getLLTTy(*getMRI()) &&
1449 auto MIB
= buildInstr(Opc
);
1450 for (const DstOp
&Op
: DstOps
)
1451 Op
.addDefToMIB(*getMRI(), MIB
);
1452 for (const SrcOp
&Op
: SrcOps
)
1453 Op
.addSrcToMIB(MIB
);
1455 MIB
->setFlags(*Flags
);