1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/Analysis/VectorUtils.h"
25 #include "llvm/CodeGen/Analysis.h"
26 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
27 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
28 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
29 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
30 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
31 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
32 #include "llvm/CodeGen/LowLevelTypeUtils.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/StackProtector.h"
42 #include "llvm/CodeGen/SwitchLoweringUtils.h"
43 #include "llvm/CodeGen/TargetFrameLowering.h"
44 #include "llvm/CodeGen/TargetInstrInfo.h"
45 #include "llvm/CodeGen/TargetLowering.h"
46 #include "llvm/CodeGen/TargetOpcodes.h"
47 #include "llvm/CodeGen/TargetPassConfig.h"
48 #include "llvm/CodeGen/TargetRegisterInfo.h"
49 #include "llvm/CodeGen/TargetSubtargetInfo.h"
50 #include "llvm/CodeGenTypes/LowLevelType.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/CFG.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/DiagnosticInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GetElementPtrTypeIterator.h"
60 #include "llvm/IR/InlineAsm.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAMDGPU.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/Metadata.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/Pass.h"
76 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/CodeGen.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Target/TargetIntrinsicInfo.h"
83 #include "llvm/Target/TargetMachine.h"
84 #include "llvm/Transforms/Utils/Local.h"
85 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
95 #define DEBUG_TYPE "irtranslator"
100 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
101 cl::desc("Should enable CSE in irtranslator"),
102 cl::Optional
, cl::init(false));
103 char IRTranslator::ID
= 0;
105 INITIALIZE_PASS_BEGIN(IRTranslator
, DEBUG_TYPE
, "IRTranslator LLVM IR -> MI",
107 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
108 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass
)
109 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass
)
110 INITIALIZE_PASS_DEPENDENCY(StackProtector
)
111 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
112 INITIALIZE_PASS_END(IRTranslator
, DEBUG_TYPE
, "IRTranslator LLVM IR -> MI",
115 static void reportTranslationError(MachineFunction
&MF
,
116 const TargetPassConfig
&TPC
,
117 OptimizationRemarkEmitter
&ORE
,
118 OptimizationRemarkMissed
&R
) {
119 MF
.getProperties().set(MachineFunctionProperties::Property::FailedISel
);
121 // Print the function name explicitly if we don't have a debug location (which
122 // makes the diagnostic less useful) or if we're going to emit a raw error.
123 if (!R
.getLocation().isValid() || TPC
.isGlobalISelAbortEnabled())
124 R
<< (" (in function: " + MF
.getName() + ")").str();
126 if (TPC
.isGlobalISelAbortEnabled())
127 report_fatal_error(Twine(R
.getMsg()));
132 IRTranslator::IRTranslator(CodeGenOptLevel optlevel
)
133 : MachineFunctionPass(ID
), OptLevel(optlevel
) {}
137 /// Verify that every instruction created has the same DILocation as the
138 /// instruction being translated.
139 class DILocationVerifier
: public GISelChangeObserver
{
140 const Instruction
*CurrInst
= nullptr;
143 DILocationVerifier() = default;
144 ~DILocationVerifier() = default;
146 const Instruction
*getCurrentInst() const { return CurrInst
; }
147 void setCurrentInst(const Instruction
*Inst
) { CurrInst
= Inst
; }
149 void erasingInstr(MachineInstr
&MI
) override
{}
150 void changingInstr(MachineInstr
&MI
) override
{}
151 void changedInstr(MachineInstr
&MI
) override
{}
153 void createdInstr(MachineInstr
&MI
) override
{
154 assert(getCurrentInst() && "Inserted instruction without a current MI");
156 // Only print the check message if we're actually checking it.
158 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
159 << " was copied to " << MI
);
161 // We allow insts in the entry block to have no debug loc because
162 // they could have originated from constants, and we don't want a jumpy
164 assert((CurrInst
->getDebugLoc() == MI
.getDebugLoc() ||
165 (MI
.getParent()->isEntryBlock() && !MI
.getDebugLoc()) ||
166 (MI
.isDebugInstr())) &&
167 "Line info was not transferred to all instructions");
171 #endif // ifndef NDEBUG
174 void IRTranslator::getAnalysisUsage(AnalysisUsage
&AU
) const {
175 AU
.addRequired
<StackProtector
>();
176 AU
.addRequired
<TargetPassConfig
>();
177 AU
.addRequired
<GISelCSEAnalysisWrapperPass
>();
178 AU
.addRequired
<AssumptionCacheTracker
>();
179 if (OptLevel
!= CodeGenOptLevel::None
) {
180 AU
.addRequired
<BranchProbabilityInfoWrapperPass
>();
181 AU
.addRequired
<AAResultsWrapperPass
>();
183 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
184 AU
.addPreserved
<TargetLibraryInfoWrapperPass
>();
185 getSelectionDAGFallbackAnalysisUsage(AU
);
186 MachineFunctionPass::getAnalysisUsage(AU
);
189 IRTranslator::ValueToVRegInfo::VRegListT
&
190 IRTranslator::allocateVRegs(const Value
&Val
) {
191 auto VRegsIt
= VMap
.findVRegs(Val
);
192 if (VRegsIt
!= VMap
.vregs_end())
193 return *VRegsIt
->second
;
194 auto *Regs
= VMap
.getVRegs(Val
);
195 auto *Offsets
= VMap
.getOffsets(Val
);
196 SmallVector
<LLT
, 4> SplitTys
;
197 computeValueLLTs(*DL
, *Val
.getType(), SplitTys
,
198 Offsets
->empty() ? Offsets
: nullptr);
199 for (unsigned i
= 0; i
< SplitTys
.size(); ++i
)
204 ArrayRef
<Register
> IRTranslator::getOrCreateVRegs(const Value
&Val
) {
205 auto VRegsIt
= VMap
.findVRegs(Val
);
206 if (VRegsIt
!= VMap
.vregs_end())
207 return *VRegsIt
->second
;
209 if (Val
.getType()->isVoidTy())
210 return *VMap
.getVRegs(Val
);
212 // Create entry for this type.
213 auto *VRegs
= VMap
.getVRegs(Val
);
214 auto *Offsets
= VMap
.getOffsets(Val
);
216 if (!Val
.getType()->isTokenTy())
217 assert(Val
.getType()->isSized() &&
218 "Don't know how to create an empty vreg");
220 SmallVector
<LLT
, 4> SplitTys
;
221 computeValueLLTs(*DL
, *Val
.getType(), SplitTys
,
222 Offsets
->empty() ? Offsets
: nullptr);
224 if (!isa
<Constant
>(Val
)) {
225 for (auto Ty
: SplitTys
)
226 VRegs
->push_back(MRI
->createGenericVirtualRegister(Ty
));
230 if (Val
.getType()->isAggregateType()) {
231 // UndefValue, ConstantAggregateZero
232 auto &C
= cast
<Constant
>(Val
);
234 while (auto Elt
= C
.getAggregateElement(Idx
++)) {
235 auto EltRegs
= getOrCreateVRegs(*Elt
);
236 llvm::copy(EltRegs
, std::back_inserter(*VRegs
));
239 assert(SplitTys
.size() == 1 && "unexpectedly split LLT");
240 VRegs
->push_back(MRI
->createGenericVirtualRegister(SplitTys
[0]));
241 bool Success
= translate(cast
<Constant
>(Val
), VRegs
->front());
243 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
244 MF
->getFunction().getSubprogram(),
245 &MF
->getFunction().getEntryBlock());
246 R
<< "unable to translate constant: " << ore::NV("Type", Val
.getType());
247 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
255 int IRTranslator::getOrCreateFrameIndex(const AllocaInst
&AI
) {
256 auto MapEntry
= FrameIndices
.find(&AI
);
257 if (MapEntry
!= FrameIndices
.end())
258 return MapEntry
->second
;
260 uint64_t ElementSize
= DL
->getTypeAllocSize(AI
.getAllocatedType());
262 ElementSize
* cast
<ConstantInt
>(AI
.getArraySize())->getZExtValue();
264 // Always allocate at least one byte.
265 Size
= std::max
<uint64_t>(Size
, 1u);
267 int &FI
= FrameIndices
[&AI
];
268 FI
= MF
->getFrameInfo().CreateStackObject(Size
, AI
.getAlign(), false, &AI
);
272 Align
IRTranslator::getMemOpAlign(const Instruction
&I
) {
273 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(&I
))
274 return SI
->getAlign();
275 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(&I
))
276 return LI
->getAlign();
277 if (const AtomicCmpXchgInst
*AI
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
278 return AI
->getAlign();
279 if (const AtomicRMWInst
*AI
= dyn_cast
<AtomicRMWInst
>(&I
))
280 return AI
->getAlign();
282 OptimizationRemarkMissed
R("gisel-irtranslator", "", &I
);
283 R
<< "unable to translate memop: " << ore::NV("Opcode", &I
);
284 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
288 MachineBasicBlock
&IRTranslator::getMBB(const BasicBlock
&BB
) {
289 MachineBasicBlock
*MBB
= FuncInfo
.getMBB(&BB
);
290 assert(MBB
&& "BasicBlock was not encountered before");
294 void IRTranslator::addMachineCFGPred(CFGEdge Edge
, MachineBasicBlock
*NewPred
) {
295 assert(NewPred
&& "new predecessor must be a real MachineBasicBlock");
296 MachinePreds
[Edge
].push_back(NewPred
);
299 bool IRTranslator::translateBinaryOp(unsigned Opcode
, const User
&U
,
300 MachineIRBuilder
&MIRBuilder
) {
301 // Get or create a virtual register for each value.
302 // Unless the value is a Constant => loadimm cst?
303 // or inline constant each time?
304 // Creation of a virtual register needs to have a size.
305 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
306 Register Op1
= getOrCreateVReg(*U
.getOperand(1));
307 Register Res
= getOrCreateVReg(U
);
309 if (isa
<Instruction
>(U
)) {
310 const Instruction
&I
= cast
<Instruction
>(U
);
311 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
314 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op0
, Op1
}, Flags
);
318 bool IRTranslator::translateUnaryOp(unsigned Opcode
, const User
&U
,
319 MachineIRBuilder
&MIRBuilder
) {
320 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
321 Register Res
= getOrCreateVReg(U
);
323 if (isa
<Instruction
>(U
)) {
324 const Instruction
&I
= cast
<Instruction
>(U
);
325 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
327 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op0
}, Flags
);
331 bool IRTranslator::translateFNeg(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
332 return translateUnaryOp(TargetOpcode::G_FNEG
, U
, MIRBuilder
);
335 bool IRTranslator::translateCompare(const User
&U
,
336 MachineIRBuilder
&MIRBuilder
) {
337 auto *CI
= cast
<CmpInst
>(&U
);
338 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
339 Register Op1
= getOrCreateVReg(*U
.getOperand(1));
340 Register Res
= getOrCreateVReg(U
);
341 CmpInst::Predicate Pred
= CI
->getPredicate();
342 uint32_t Flags
= MachineInstr::copyFlagsFromInstruction(*CI
);
343 if (CmpInst::isIntPredicate(Pred
))
344 MIRBuilder
.buildICmp(Pred
, Res
, Op0
, Op1
, Flags
);
345 else if (Pred
== CmpInst::FCMP_FALSE
)
346 MIRBuilder
.buildCopy(
347 Res
, getOrCreateVReg(*Constant::getNullValue(U
.getType())));
348 else if (Pred
== CmpInst::FCMP_TRUE
)
349 MIRBuilder
.buildCopy(
350 Res
, getOrCreateVReg(*Constant::getAllOnesValue(U
.getType())));
352 MIRBuilder
.buildFCmp(Pred
, Res
, Op0
, Op1
, Flags
);
357 bool IRTranslator::translateRet(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
358 const ReturnInst
&RI
= cast
<ReturnInst
>(U
);
359 const Value
*Ret
= RI
.getReturnValue();
360 if (Ret
&& DL
->getTypeStoreSize(Ret
->getType()).isZero())
363 ArrayRef
<Register
> VRegs
;
365 VRegs
= getOrCreateVRegs(*Ret
);
367 Register SwiftErrorVReg
= 0;
368 if (CLI
->supportSwiftError() && SwiftError
.getFunctionArg()) {
369 SwiftErrorVReg
= SwiftError
.getOrCreateVRegUseAt(
370 &RI
, &MIRBuilder
.getMBB(), SwiftError
.getFunctionArg());
373 // The target may mess up with the insertion point, but
374 // this is not important as a return is the last instruction
375 // of the block anyway.
376 return CLI
->lowerReturn(MIRBuilder
, Ret
, VRegs
, FuncInfo
, SwiftErrorVReg
);
379 void IRTranslator::emitBranchForMergedCondition(
380 const Value
*Cond
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
381 MachineBasicBlock
*CurBB
, MachineBasicBlock
*SwitchBB
,
382 BranchProbability TProb
, BranchProbability FProb
, bool InvertCond
) {
383 // If the leaf of the tree is a comparison, merge the condition into
385 if (const CmpInst
*BOp
= dyn_cast
<CmpInst
>(Cond
)) {
386 CmpInst::Predicate Condition
;
387 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(Cond
)) {
388 Condition
= InvertCond
? IC
->getInversePredicate() : IC
->getPredicate();
390 const FCmpInst
*FC
= cast
<FCmpInst
>(Cond
);
391 Condition
= InvertCond
? FC
->getInversePredicate() : FC
->getPredicate();
394 SwitchCG::CaseBlock
CB(Condition
, false, BOp
->getOperand(0),
395 BOp
->getOperand(1), nullptr, TBB
, FBB
, CurBB
,
396 CurBuilder
->getDebugLoc(), TProb
, FProb
);
397 SL
->SwitchCases
.push_back(CB
);
401 // Create a CaseBlock record representing this branch.
402 CmpInst::Predicate Pred
= InvertCond
? CmpInst::ICMP_NE
: CmpInst::ICMP_EQ
;
403 SwitchCG::CaseBlock
CB(
404 Pred
, false, Cond
, ConstantInt::getTrue(MF
->getFunction().getContext()),
405 nullptr, TBB
, FBB
, CurBB
, CurBuilder
->getDebugLoc(), TProb
, FProb
);
406 SL
->SwitchCases
.push_back(CB
);
409 static bool isValInBlock(const Value
*V
, const BasicBlock
*BB
) {
410 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
411 return I
->getParent() == BB
;
415 void IRTranslator::findMergedConditions(
416 const Value
*Cond
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
417 MachineBasicBlock
*CurBB
, MachineBasicBlock
*SwitchBB
,
418 Instruction::BinaryOps Opc
, BranchProbability TProb
,
419 BranchProbability FProb
, bool InvertCond
) {
420 using namespace PatternMatch
;
421 assert((Opc
== Instruction::And
|| Opc
== Instruction::Or
) &&
422 "Expected Opc to be AND/OR");
423 // Skip over not part of the tree and remember to invert op and operands at
426 if (match(Cond
, m_OneUse(m_Not(m_Value(NotCond
)))) &&
427 isValInBlock(NotCond
, CurBB
->getBasicBlock())) {
428 findMergedConditions(NotCond
, TBB
, FBB
, CurBB
, SwitchBB
, Opc
, TProb
, FProb
,
433 const Instruction
*BOp
= dyn_cast
<Instruction
>(Cond
);
434 const Value
*BOpOp0
, *BOpOp1
;
435 // Compute the effective opcode for Cond, taking into account whether it needs
436 // to be inverted, e.g.
437 // and (not (or A, B)), C
439 // and (and (not A, not B), C)
440 Instruction::BinaryOps BOpc
= (Instruction::BinaryOps
)0;
442 BOpc
= match(BOp
, m_LogicalAnd(m_Value(BOpOp0
), m_Value(BOpOp1
)))
444 : (match(BOp
, m_LogicalOr(m_Value(BOpOp0
), m_Value(BOpOp1
)))
446 : (Instruction::BinaryOps
)0);
448 if (BOpc
== Instruction::And
)
449 BOpc
= Instruction::Or
;
450 else if (BOpc
== Instruction::Or
)
451 BOpc
= Instruction::And
;
455 // If this node is not part of the or/and tree, emit it as a branch.
456 // Note that all nodes in the tree should have same opcode.
457 bool BOpIsInOrAndTree
= BOpc
&& BOpc
== Opc
&& BOp
->hasOneUse();
458 if (!BOpIsInOrAndTree
|| BOp
->getParent() != CurBB
->getBasicBlock() ||
459 !isValInBlock(BOpOp0
, CurBB
->getBasicBlock()) ||
460 !isValInBlock(BOpOp1
, CurBB
->getBasicBlock())) {
461 emitBranchForMergedCondition(Cond
, TBB
, FBB
, CurBB
, SwitchBB
, TProb
, FProb
,
466 // Create TmpBB after CurBB.
467 MachineFunction::iterator
BBI(CurBB
);
468 MachineBasicBlock
*TmpBB
=
469 MF
->CreateMachineBasicBlock(CurBB
->getBasicBlock());
470 CurBB
->getParent()->insert(++BBI
, TmpBB
);
472 if (Opc
== Instruction::Or
) {
482 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
483 // The requirement is that
484 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
485 // = TrueProb for original BB.
486 // Assuming the original probabilities are A and B, one choice is to set
487 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
488 // A/(1+B) and 2B/(1+B). This choice assumes that
489 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
490 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
491 // TmpBB, but the math is more complicated.
493 auto NewTrueProb
= TProb
/ 2;
494 auto NewFalseProb
= TProb
/ 2 + FProb
;
495 // Emit the LHS condition.
496 findMergedConditions(BOpOp0
, TBB
, TmpBB
, CurBB
, SwitchBB
, Opc
, NewTrueProb
,
497 NewFalseProb
, InvertCond
);
499 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
500 SmallVector
<BranchProbability
, 2> Probs
{TProb
/ 2, FProb
};
501 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
502 // Emit the RHS condition into TmpBB.
503 findMergedConditions(BOpOp1
, TBB
, FBB
, TmpBB
, SwitchBB
, Opc
, Probs
[0],
504 Probs
[1], InvertCond
);
506 assert(Opc
== Instruction::And
&& "Unknown merge op!");
515 // This requires creation of TmpBB after CurBB.
517 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
518 // The requirement is that
519 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
520 // = FalseProb for original BB.
521 // Assuming the original probabilities are A and B, one choice is to set
522 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
523 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
524 // TrueProb for BB1 * FalseProb for TmpBB.
526 auto NewTrueProb
= TProb
+ FProb
/ 2;
527 auto NewFalseProb
= FProb
/ 2;
528 // Emit the LHS condition.
529 findMergedConditions(BOpOp0
, TmpBB
, FBB
, CurBB
, SwitchBB
, Opc
, NewTrueProb
,
530 NewFalseProb
, InvertCond
);
532 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
533 SmallVector
<BranchProbability
, 2> Probs
{TProb
, FProb
/ 2};
534 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
535 // Emit the RHS condition into TmpBB.
536 findMergedConditions(BOpOp1
, TBB
, FBB
, TmpBB
, SwitchBB
, Opc
, Probs
[0],
537 Probs
[1], InvertCond
);
541 bool IRTranslator::shouldEmitAsBranches(
542 const std::vector
<SwitchCG::CaseBlock
> &Cases
) {
543 // For multiple cases, it's better to emit as branches.
544 if (Cases
.size() != 2)
547 // If this is two comparisons of the same values or'd or and'd together, they
548 // will get folded into a single comparison, so don't emit two blocks.
549 if ((Cases
[0].CmpLHS
== Cases
[1].CmpLHS
&&
550 Cases
[0].CmpRHS
== Cases
[1].CmpRHS
) ||
551 (Cases
[0].CmpRHS
== Cases
[1].CmpLHS
&&
552 Cases
[0].CmpLHS
== Cases
[1].CmpRHS
)) {
556 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
557 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
558 if (Cases
[0].CmpRHS
== Cases
[1].CmpRHS
&&
559 Cases
[0].PredInfo
.Pred
== Cases
[1].PredInfo
.Pred
&&
560 isa
<Constant
>(Cases
[0].CmpRHS
) &&
561 cast
<Constant
>(Cases
[0].CmpRHS
)->isNullValue()) {
562 if (Cases
[0].PredInfo
.Pred
== CmpInst::ICMP_EQ
&&
563 Cases
[0].TrueBB
== Cases
[1].ThisBB
)
565 if (Cases
[0].PredInfo
.Pred
== CmpInst::ICMP_NE
&&
566 Cases
[0].FalseBB
== Cases
[1].ThisBB
)
573 bool IRTranslator::translateBr(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
574 const BranchInst
&BrInst
= cast
<BranchInst
>(U
);
575 auto &CurMBB
= MIRBuilder
.getMBB();
576 auto *Succ0MBB
= &getMBB(*BrInst
.getSuccessor(0));
578 if (BrInst
.isUnconditional()) {
579 // If the unconditional target is the layout successor, fallthrough.
580 if (OptLevel
== CodeGenOptLevel::None
||
581 !CurMBB
.isLayoutSuccessor(Succ0MBB
))
582 MIRBuilder
.buildBr(*Succ0MBB
);
585 for (const BasicBlock
*Succ
: successors(&BrInst
))
586 CurMBB
.addSuccessor(&getMBB(*Succ
));
590 // If this condition is one of the special cases we handle, do special stuff
592 const Value
*CondVal
= BrInst
.getCondition();
593 MachineBasicBlock
*Succ1MBB
= &getMBB(*BrInst
.getSuccessor(1));
595 // If this is a series of conditions that are or'd or and'd together, emit
596 // this as a sequence of branches instead of setcc's with and/or operations.
597 // As long as jumps are not expensive (exceptions for multi-use logic ops,
598 // unpredictable branches, and vector extracts because those jumps are likely
599 // expensive for any target), this should improve performance.
600 // For example, instead of something like:
612 using namespace PatternMatch
;
613 const Instruction
*CondI
= dyn_cast
<Instruction
>(CondVal
);
614 if (!TLI
->isJumpExpensive() && CondI
&& CondI
->hasOneUse() &&
615 !BrInst
.hasMetadata(LLVMContext::MD_unpredictable
)) {
616 Instruction::BinaryOps Opcode
= (Instruction::BinaryOps
)0;
618 const Value
*BOp0
, *BOp1
;
619 if (match(CondI
, m_LogicalAnd(m_Value(BOp0
), m_Value(BOp1
))))
620 Opcode
= Instruction::And
;
621 else if (match(CondI
, m_LogicalOr(m_Value(BOp0
), m_Value(BOp1
))))
622 Opcode
= Instruction::Or
;
624 if (Opcode
&& !(match(BOp0
, m_ExtractElt(m_Value(Vec
), m_Value())) &&
625 match(BOp1
, m_ExtractElt(m_Specific(Vec
), m_Value())))) {
626 findMergedConditions(CondI
, Succ0MBB
, Succ1MBB
, &CurMBB
, &CurMBB
, Opcode
,
627 getEdgeProbability(&CurMBB
, Succ0MBB
),
628 getEdgeProbability(&CurMBB
, Succ1MBB
),
629 /*InvertCond=*/false);
630 assert(SL
->SwitchCases
[0].ThisBB
== &CurMBB
&& "Unexpected lowering!");
632 // Allow some cases to be rejected.
633 if (shouldEmitAsBranches(SL
->SwitchCases
)) {
634 // Emit the branch for this block.
635 emitSwitchCase(SL
->SwitchCases
[0], &CurMBB
, *CurBuilder
);
636 SL
->SwitchCases
.erase(SL
->SwitchCases
.begin());
640 // Okay, we decided not to do this, remove any inserted MBB's and clear
642 for (unsigned I
= 1, E
= SL
->SwitchCases
.size(); I
!= E
; ++I
)
643 MF
->erase(SL
->SwitchCases
[I
].ThisBB
);
645 SL
->SwitchCases
.clear();
649 // Create a CaseBlock record representing this branch.
650 SwitchCG::CaseBlock
CB(CmpInst::ICMP_EQ
, false, CondVal
,
651 ConstantInt::getTrue(MF
->getFunction().getContext()),
652 nullptr, Succ0MBB
, Succ1MBB
, &CurMBB
,
653 CurBuilder
->getDebugLoc());
655 // Use emitSwitchCase to actually insert the fast branch sequence for this
657 emitSwitchCase(CB
, &CurMBB
, *CurBuilder
);
661 void IRTranslator::addSuccessorWithProb(MachineBasicBlock
*Src
,
662 MachineBasicBlock
*Dst
,
663 BranchProbability Prob
) {
665 Src
->addSuccessorWithoutProb(Dst
);
668 if (Prob
.isUnknown())
669 Prob
= getEdgeProbability(Src
, Dst
);
670 Src
->addSuccessor(Dst
, Prob
);
674 IRTranslator::getEdgeProbability(const MachineBasicBlock
*Src
,
675 const MachineBasicBlock
*Dst
) const {
676 const BasicBlock
*SrcBB
= Src
->getBasicBlock();
677 const BasicBlock
*DstBB
= Dst
->getBasicBlock();
679 // If BPI is not available, set the default probability as 1 / N, where N is
680 // the number of successors.
681 auto SuccSize
= std::max
<uint32_t>(succ_size(SrcBB
), 1);
682 return BranchProbability(1, SuccSize
);
684 return FuncInfo
.BPI
->getEdgeProbability(SrcBB
, DstBB
);
687 bool IRTranslator::translateSwitch(const User
&U
, MachineIRBuilder
&MIB
) {
688 using namespace SwitchCG
;
689 // Extract cases from the switch.
690 const SwitchInst
&SI
= cast
<SwitchInst
>(U
);
691 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
692 CaseClusterVector Clusters
;
693 Clusters
.reserve(SI
.getNumCases());
694 for (const auto &I
: SI
.cases()) {
695 MachineBasicBlock
*Succ
= &getMBB(*I
.getCaseSuccessor());
696 assert(Succ
&& "Could not find successor mbb in mapping");
697 const ConstantInt
*CaseVal
= I
.getCaseValue();
698 BranchProbability Prob
=
699 BPI
? BPI
->getEdgeProbability(SI
.getParent(), I
.getSuccessorIndex())
700 : BranchProbability(1, SI
.getNumCases() + 1);
701 Clusters
.push_back(CaseCluster::range(CaseVal
, CaseVal
, Succ
, Prob
));
704 MachineBasicBlock
*DefaultMBB
= &getMBB(*SI
.getDefaultDest());
706 // Cluster adjacent cases with the same destination. We do this at all
707 // optimization levels because it's cheap to do and will make codegen faster
708 // if there are many clusters.
709 sortAndRangeify(Clusters
);
711 MachineBasicBlock
*SwitchMBB
= &getMBB(*SI
.getParent());
713 // If there is only the default destination, jump there directly.
714 if (Clusters
.empty()) {
715 SwitchMBB
->addSuccessor(DefaultMBB
);
716 if (DefaultMBB
!= SwitchMBB
->getNextNode())
717 MIB
.buildBr(*DefaultMBB
);
721 SL
->findJumpTables(Clusters
, &SI
, std::nullopt
, DefaultMBB
, nullptr, nullptr);
722 SL
->findBitTestClusters(Clusters
, &SI
);
725 dbgs() << "Case clusters: ";
726 for (const CaseCluster
&C
: Clusters
) {
727 if (C
.Kind
== CC_JumpTable
)
729 if (C
.Kind
== CC_BitTests
)
732 C
.Low
->getValue().print(dbgs(), true);
733 if (C
.Low
!= C
.High
) {
735 C
.High
->getValue().print(dbgs(), true);
742 assert(!Clusters
.empty());
743 SwitchWorkList WorkList
;
744 CaseClusterIt First
= Clusters
.begin();
745 CaseClusterIt Last
= Clusters
.end() - 1;
746 auto DefaultProb
= getEdgeProbability(SwitchMBB
, DefaultMBB
);
747 WorkList
.push_back({SwitchMBB
, First
, Last
, nullptr, nullptr, DefaultProb
});
749 while (!WorkList
.empty()) {
750 SwitchWorkListItem W
= WorkList
.pop_back_val();
752 unsigned NumClusters
= W
.LastCluster
- W
.FirstCluster
+ 1;
753 // For optimized builds, lower large range as a balanced binary tree.
754 if (NumClusters
> 3 &&
755 MF
->getTarget().getOptLevel() != CodeGenOptLevel::None
&&
756 !DefaultMBB
->getParent()->getFunction().hasMinSize()) {
757 splitWorkItem(WorkList
, W
, SI
.getCondition(), SwitchMBB
, MIB
);
761 if (!lowerSwitchWorkItem(W
, SI
.getCondition(), SwitchMBB
, DefaultMBB
, MIB
))
767 void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList
&WorkList
,
768 const SwitchCG::SwitchWorkListItem
&W
,
769 Value
*Cond
, MachineBasicBlock
*SwitchMBB
,
770 MachineIRBuilder
&MIB
) {
771 using namespace SwitchCG
;
772 assert(W
.FirstCluster
->Low
->getValue().slt(W
.LastCluster
->Low
->getValue()) &&
773 "Clusters not sorted?");
774 assert(W
.LastCluster
- W
.FirstCluster
+ 1 >= 2 && "Too small to split!");
776 auto [LastLeft
, FirstRight
, LeftProb
, RightProb
] =
777 SL
->computeSplitWorkItemInfo(W
);
779 // Use the first element on the right as pivot since we will make less-than
780 // comparisons against it.
781 CaseClusterIt PivotCluster
= FirstRight
;
782 assert(PivotCluster
> W
.FirstCluster
);
783 assert(PivotCluster
<= W
.LastCluster
);
785 CaseClusterIt FirstLeft
= W
.FirstCluster
;
786 CaseClusterIt LastRight
= W
.LastCluster
;
788 const ConstantInt
*Pivot
= PivotCluster
->Low
;
790 // New blocks will be inserted immediately after the current one.
791 MachineFunction::iterator
BBI(W
.MBB
);
794 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
795 // we can branch to its destination directly if it's squeezed exactly in
796 // between the known lower bound and Pivot - 1.
797 MachineBasicBlock
*LeftMBB
;
798 if (FirstLeft
== LastLeft
&& FirstLeft
->Kind
== CC_Range
&&
799 FirstLeft
->Low
== W
.GE
&&
800 (FirstLeft
->High
->getValue() + 1LL) == Pivot
->getValue()) {
801 LeftMBB
= FirstLeft
->MBB
;
803 LeftMBB
= FuncInfo
.MF
->CreateMachineBasicBlock(W
.MBB
->getBasicBlock());
804 FuncInfo
.MF
->insert(BBI
, LeftMBB
);
806 {LeftMBB
, FirstLeft
, LastLeft
, W
.GE
, Pivot
, W
.DefaultProb
/ 2});
809 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
810 // single cluster, RHS.Low == Pivot, and we can branch to its destination
811 // directly if RHS.High equals the current upper bound.
812 MachineBasicBlock
*RightMBB
;
813 if (FirstRight
== LastRight
&& FirstRight
->Kind
== CC_Range
&& W
.LT
&&
814 (FirstRight
->High
->getValue() + 1ULL) == W
.LT
->getValue()) {
815 RightMBB
= FirstRight
->MBB
;
817 RightMBB
= FuncInfo
.MF
->CreateMachineBasicBlock(W
.MBB
->getBasicBlock());
818 FuncInfo
.MF
->insert(BBI
, RightMBB
);
820 {RightMBB
, FirstRight
, LastRight
, Pivot
, W
.LT
, W
.DefaultProb
/ 2});
823 // Create the CaseBlock record that will be used to lower the branch.
824 CaseBlock
CB(ICmpInst::Predicate::ICMP_SLT
, false, Cond
, Pivot
, nullptr,
825 LeftMBB
, RightMBB
, W
.MBB
, MIB
.getDebugLoc(), LeftProb
,
828 if (W
.MBB
== SwitchMBB
)
829 emitSwitchCase(CB
, SwitchMBB
, MIB
);
831 SL
->SwitchCases
.push_back(CB
);
834 void IRTranslator::emitJumpTable(SwitchCG::JumpTable
&JT
,
835 MachineBasicBlock
*MBB
) {
836 // Emit the code for the jump table
837 assert(JT
.Reg
&& "Should lower JT Header first!");
838 MachineIRBuilder
MIB(*MBB
->getParent());
840 MIB
.setDebugLoc(CurBuilder
->getDebugLoc());
842 Type
*PtrIRTy
= PointerType::getUnqual(MF
->getFunction().getContext());
843 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
845 auto Table
= MIB
.buildJumpTable(PtrTy
, JT
.JTI
);
846 MIB
.buildBrJT(Table
.getReg(0), JT
.JTI
, JT
.Reg
);
849 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable
&JT
,
850 SwitchCG::JumpTableHeader
&JTH
,
851 MachineBasicBlock
*HeaderBB
) {
852 MachineIRBuilder
MIB(*HeaderBB
->getParent());
853 MIB
.setMBB(*HeaderBB
);
854 MIB
.setDebugLoc(CurBuilder
->getDebugLoc());
856 const Value
&SValue
= *JTH
.SValue
;
857 // Subtract the lowest switch case value from the value being switched on.
858 const LLT SwitchTy
= getLLTForType(*SValue
.getType(), *DL
);
859 Register SwitchOpReg
= getOrCreateVReg(SValue
);
860 auto FirstCst
= MIB
.buildConstant(SwitchTy
, JTH
.First
);
861 auto Sub
= MIB
.buildSub({SwitchTy
}, SwitchOpReg
, FirstCst
);
863 // This value may be smaller or larger than the target's pointer type, and
864 // therefore require extension or truncating.
865 auto *PtrIRTy
= PointerType::getUnqual(SValue
.getContext());
866 const LLT PtrScalarTy
= LLT::scalar(DL
->getTypeSizeInBits(PtrIRTy
));
867 Sub
= MIB
.buildZExtOrTrunc(PtrScalarTy
, Sub
);
869 JT
.Reg
= Sub
.getReg(0);
871 if (JTH
.FallthroughUnreachable
) {
872 if (JT
.MBB
!= HeaderBB
->getNextNode())
873 MIB
.buildBr(*JT
.MBB
);
877 // Emit the range check for the jump table, and branch to the default block
878 // for the switch statement if the value being switched on exceeds the
879 // largest case in the switch.
880 auto Cst
= getOrCreateVReg(
881 *ConstantInt::get(SValue
.getType(), JTH
.Last
- JTH
.First
));
882 Cst
= MIB
.buildZExtOrTrunc(PtrScalarTy
, Cst
).getReg(0);
883 auto Cmp
= MIB
.buildICmp(CmpInst::ICMP_UGT
, LLT::scalar(1), Sub
, Cst
);
885 auto BrCond
= MIB
.buildBrCond(Cmp
.getReg(0), *JT
.Default
);
887 // Avoid emitting unnecessary branches to the next block.
888 if (JT
.MBB
!= HeaderBB
->getNextNode())
889 BrCond
= MIB
.buildBr(*JT
.MBB
);
893 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock
&CB
,
894 MachineBasicBlock
*SwitchBB
,
895 MachineIRBuilder
&MIB
) {
896 Register CondLHS
= getOrCreateVReg(*CB
.CmpLHS
);
898 DebugLoc OldDbgLoc
= MIB
.getDebugLoc();
899 MIB
.setDebugLoc(CB
.DbgLoc
);
900 MIB
.setMBB(*CB
.ThisBB
);
902 if (CB
.PredInfo
.NoCmp
) {
903 // Branch or fall through to TrueBB.
904 addSuccessorWithProb(CB
.ThisBB
, CB
.TrueBB
, CB
.TrueProb
);
905 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.TrueBB
->getBasicBlock()},
907 CB
.ThisBB
->normalizeSuccProbs();
908 if (CB
.TrueBB
!= CB
.ThisBB
->getNextNode())
909 MIB
.buildBr(*CB
.TrueBB
);
910 MIB
.setDebugLoc(OldDbgLoc
);
914 const LLT i1Ty
= LLT::scalar(1);
915 // Build the compare.
917 const auto *CI
= dyn_cast
<ConstantInt
>(CB
.CmpRHS
);
918 // For conditional branch lowering, we might try to do something silly like
919 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
920 // just re-use the existing condition vreg.
921 if (MRI
->getType(CondLHS
).getSizeInBits() == 1 && CI
&& CI
->isOne() &&
922 CB
.PredInfo
.Pred
== CmpInst::ICMP_EQ
) {
925 Register CondRHS
= getOrCreateVReg(*CB
.CmpRHS
);
926 if (CmpInst::isFPPredicate(CB
.PredInfo
.Pred
))
928 MIB
.buildFCmp(CB
.PredInfo
.Pred
, i1Ty
, CondLHS
, CondRHS
).getReg(0);
931 MIB
.buildICmp(CB
.PredInfo
.Pred
, i1Ty
, CondLHS
, CondRHS
).getReg(0);
934 assert(CB
.PredInfo
.Pred
== CmpInst::ICMP_SLE
&&
935 "Can only handle SLE ranges");
937 const APInt
& Low
= cast
<ConstantInt
>(CB
.CmpLHS
)->getValue();
938 const APInt
& High
= cast
<ConstantInt
>(CB
.CmpRHS
)->getValue();
940 Register CmpOpReg
= getOrCreateVReg(*CB
.CmpMHS
);
941 if (cast
<ConstantInt
>(CB
.CmpLHS
)->isMinValue(true)) {
942 Register CondRHS
= getOrCreateVReg(*CB
.CmpRHS
);
944 MIB
.buildICmp(CmpInst::ICMP_SLE
, i1Ty
, CmpOpReg
, CondRHS
).getReg(0);
946 const LLT CmpTy
= MRI
->getType(CmpOpReg
);
947 auto Sub
= MIB
.buildSub({CmpTy
}, CmpOpReg
, CondLHS
);
948 auto Diff
= MIB
.buildConstant(CmpTy
, High
- Low
);
949 Cond
= MIB
.buildICmp(CmpInst::ICMP_ULE
, i1Ty
, Sub
, Diff
).getReg(0);
953 // Update successor info
954 addSuccessorWithProb(CB
.ThisBB
, CB
.TrueBB
, CB
.TrueProb
);
956 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.TrueBB
->getBasicBlock()},
959 // TrueBB and FalseBB are always different unless the incoming IR is
960 // degenerate. This only happens when running llc on weird IR.
961 if (CB
.TrueBB
!= CB
.FalseBB
)
962 addSuccessorWithProb(CB
.ThisBB
, CB
.FalseBB
, CB
.FalseProb
);
963 CB
.ThisBB
->normalizeSuccProbs();
965 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.FalseBB
->getBasicBlock()},
968 MIB
.buildBrCond(Cond
, *CB
.TrueBB
);
969 MIB
.buildBr(*CB
.FalseBB
);
970 MIB
.setDebugLoc(OldDbgLoc
);
973 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W
,
974 MachineBasicBlock
*SwitchMBB
,
975 MachineBasicBlock
*CurMBB
,
976 MachineBasicBlock
*DefaultMBB
,
977 MachineIRBuilder
&MIB
,
978 MachineFunction::iterator BBI
,
979 BranchProbability UnhandledProbs
,
980 SwitchCG::CaseClusterIt I
,
981 MachineBasicBlock
*Fallthrough
,
982 bool FallthroughUnreachable
) {
983 using namespace SwitchCG
;
984 MachineFunction
*CurMF
= SwitchMBB
->getParent();
985 // FIXME: Optimize away range check based on pivot comparisons.
986 JumpTableHeader
*JTH
= &SL
->JTCases
[I
->JTCasesIndex
].first
;
987 SwitchCG::JumpTable
*JT
= &SL
->JTCases
[I
->JTCasesIndex
].second
;
988 BranchProbability DefaultProb
= W
.DefaultProb
;
990 // The jump block hasn't been inserted yet; insert it here.
991 MachineBasicBlock
*JumpMBB
= JT
->MBB
;
992 CurMF
->insert(BBI
, JumpMBB
);
994 // Since the jump table block is separate from the switch block, we need
995 // to keep track of it as a machine predecessor to the default block,
996 // otherwise we lose the phi edges.
997 addMachineCFGPred({SwitchMBB
->getBasicBlock(), DefaultMBB
->getBasicBlock()},
999 addMachineCFGPred({SwitchMBB
->getBasicBlock(), DefaultMBB
->getBasicBlock()},
1002 auto JumpProb
= I
->Prob
;
1003 auto FallthroughProb
= UnhandledProbs
;
1005 // If the default statement is a target of the jump table, we evenly
1006 // distribute the default probability to successors of CurMBB. Also
1007 // update the probability on the edge from JumpMBB to Fallthrough.
1008 for (MachineBasicBlock::succ_iterator SI
= JumpMBB
->succ_begin(),
1009 SE
= JumpMBB
->succ_end();
1011 if (*SI
== DefaultMBB
) {
1012 JumpProb
+= DefaultProb
/ 2;
1013 FallthroughProb
-= DefaultProb
/ 2;
1014 JumpMBB
->setSuccProbability(SI
, DefaultProb
/ 2);
1015 JumpMBB
->normalizeSuccProbs();
1017 // Also record edges from the jump table block to it's successors.
1018 addMachineCFGPred({SwitchMBB
->getBasicBlock(), (*SI
)->getBasicBlock()},
1023 if (FallthroughUnreachable
)
1024 JTH
->FallthroughUnreachable
= true;
1026 if (!JTH
->FallthroughUnreachable
)
1027 addSuccessorWithProb(CurMBB
, Fallthrough
, FallthroughProb
);
1028 addSuccessorWithProb(CurMBB
, JumpMBB
, JumpProb
);
1029 CurMBB
->normalizeSuccProbs();
1031 // The jump table header will be inserted in our current block, do the
1032 // range check, and fall through to our fallthrough block.
1033 JTH
->HeaderBB
= CurMBB
;
1034 JT
->Default
= Fallthrough
; // FIXME: Move Default to JumpTableHeader.
1036 // If we're in the right place, emit the jump table header right now.
1037 if (CurMBB
== SwitchMBB
) {
1038 if (!emitJumpTableHeader(*JT
, *JTH
, CurMBB
))
1040 JTH
->Emitted
= true;
1044 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I
,
1046 MachineBasicBlock
*Fallthrough
,
1047 bool FallthroughUnreachable
,
1048 BranchProbability UnhandledProbs
,
1049 MachineBasicBlock
*CurMBB
,
1050 MachineIRBuilder
&MIB
,
1051 MachineBasicBlock
*SwitchMBB
) {
1052 using namespace SwitchCG
;
1053 const Value
*RHS
, *LHS
, *MHS
;
1054 CmpInst::Predicate Pred
;
1055 if (I
->Low
== I
->High
) {
1056 // Check Cond == I->Low.
1057 Pred
= CmpInst::ICMP_EQ
;
1062 // Check I->Low <= Cond <= I->High.
1063 Pred
= CmpInst::ICMP_SLE
;
1069 // If Fallthrough is unreachable, fold away the comparison.
1070 // The false probability is the sum of all unhandled cases.
1071 CaseBlock
CB(Pred
, FallthroughUnreachable
, LHS
, RHS
, MHS
, I
->MBB
, Fallthrough
,
1072 CurMBB
, MIB
.getDebugLoc(), I
->Prob
, UnhandledProbs
);
1074 emitSwitchCase(CB
, SwitchMBB
, MIB
);
1078 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock
&B
,
1079 MachineBasicBlock
*SwitchBB
) {
1080 MachineIRBuilder
&MIB
= *CurBuilder
;
1081 MIB
.setMBB(*SwitchBB
);
1083 // Subtract the minimum value.
1084 Register SwitchOpReg
= getOrCreateVReg(*B
.SValue
);
1086 LLT SwitchOpTy
= MRI
->getType(SwitchOpReg
);
1087 Register MinValReg
= MIB
.buildConstant(SwitchOpTy
, B
.First
).getReg(0);
1088 auto RangeSub
= MIB
.buildSub(SwitchOpTy
, SwitchOpReg
, MinValReg
);
1090 Type
*PtrIRTy
= PointerType::getUnqual(MF
->getFunction().getContext());
1091 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1093 LLT MaskTy
= SwitchOpTy
;
1094 if (MaskTy
.getSizeInBits() > PtrTy
.getSizeInBits() ||
1095 !llvm::has_single_bit
<uint32_t>(MaskTy
.getSizeInBits()))
1096 MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
1098 // Ensure that the type will fit the mask value.
1099 for (unsigned I
= 0, E
= B
.Cases
.size(); I
!= E
; ++I
) {
1100 if (!isUIntN(SwitchOpTy
.getSizeInBits(), B
.Cases
[I
].Mask
)) {
1101 // Switch table case range are encoded into series of masks.
1102 // Just use pointer type, it's guaranteed to fit.
1103 MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
1108 Register SubReg
= RangeSub
.getReg(0);
1109 if (SwitchOpTy
!= MaskTy
)
1110 SubReg
= MIB
.buildZExtOrTrunc(MaskTy
, SubReg
).getReg(0);
1112 B
.RegVT
= getMVTForLLT(MaskTy
);
1115 MachineBasicBlock
*MBB
= B
.Cases
[0].ThisBB
;
1117 if (!B
.FallthroughUnreachable
)
1118 addSuccessorWithProb(SwitchBB
, B
.Default
, B
.DefaultProb
);
1119 addSuccessorWithProb(SwitchBB
, MBB
, B
.Prob
);
1121 SwitchBB
->normalizeSuccProbs();
1123 if (!B
.FallthroughUnreachable
) {
1124 // Conditional branch to the default block.
1125 auto RangeCst
= MIB
.buildConstant(SwitchOpTy
, B
.Range
);
1126 auto RangeCmp
= MIB
.buildICmp(CmpInst::Predicate::ICMP_UGT
, LLT::scalar(1),
1127 RangeSub
, RangeCst
);
1128 MIB
.buildBrCond(RangeCmp
, *B
.Default
);
1131 // Avoid emitting unnecessary branches to the next block.
1132 if (MBB
!= SwitchBB
->getNextNode())
1136 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock
&BB
,
1137 MachineBasicBlock
*NextMBB
,
1138 BranchProbability BranchProbToNext
,
1139 Register Reg
, SwitchCG::BitTestCase
&B
,
1140 MachineBasicBlock
*SwitchBB
) {
1141 MachineIRBuilder
&MIB
= *CurBuilder
;
1142 MIB
.setMBB(*SwitchBB
);
1144 LLT SwitchTy
= getLLTForMVT(BB
.RegVT
);
1146 unsigned PopCount
= llvm::popcount(B
.Mask
);
1147 if (PopCount
== 1) {
1148 // Testing for a single bit; just compare the shift count with what it
1149 // would need to be to shift a 1 bit in that position.
1150 auto MaskTrailingZeros
=
1151 MIB
.buildConstant(SwitchTy
, llvm::countr_zero(B
.Mask
));
1153 MIB
.buildICmp(ICmpInst::ICMP_EQ
, LLT::scalar(1), Reg
, MaskTrailingZeros
)
1155 } else if (PopCount
== BB
.Range
) {
1156 // There is only one zero bit in the range, test for it directly.
1157 auto MaskTrailingOnes
=
1158 MIB
.buildConstant(SwitchTy
, llvm::countr_one(B
.Mask
));
1159 Cmp
= MIB
.buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), Reg
, MaskTrailingOnes
)
1162 // Make desired shift.
1163 auto CstOne
= MIB
.buildConstant(SwitchTy
, 1);
1164 auto SwitchVal
= MIB
.buildShl(SwitchTy
, CstOne
, Reg
);
1166 // Emit bit tests and jumps.
1167 auto CstMask
= MIB
.buildConstant(SwitchTy
, B
.Mask
);
1168 auto AndOp
= MIB
.buildAnd(SwitchTy
, SwitchVal
, CstMask
);
1169 auto CstZero
= MIB
.buildConstant(SwitchTy
, 0);
1170 Cmp
= MIB
.buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), AndOp
, CstZero
)
1174 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1175 addSuccessorWithProb(SwitchBB
, B
.TargetBB
, B
.ExtraProb
);
1176 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1177 addSuccessorWithProb(SwitchBB
, NextMBB
, BranchProbToNext
);
1178 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1179 // one as they are relative probabilities (and thus work more like weights),
1180 // and hence we need to normalize them to let the sum of them become one.
1181 SwitchBB
->normalizeSuccProbs();
1183 // Record the fact that the IR edge from the header to the bit test target
1184 // will go through our new block. Neeeded for PHIs to have nodes added.
1185 addMachineCFGPred({BB
.Parent
->getBasicBlock(), B
.TargetBB
->getBasicBlock()},
1188 MIB
.buildBrCond(Cmp
, *B
.TargetBB
);
1190 // Avoid emitting unnecessary branches to the next block.
1191 if (NextMBB
!= SwitchBB
->getNextNode())
1192 MIB
.buildBr(*NextMBB
);
1195 bool IRTranslator::lowerBitTestWorkItem(
1196 SwitchCG::SwitchWorkListItem W
, MachineBasicBlock
*SwitchMBB
,
1197 MachineBasicBlock
*CurMBB
, MachineBasicBlock
*DefaultMBB
,
1198 MachineIRBuilder
&MIB
, MachineFunction::iterator BBI
,
1199 BranchProbability DefaultProb
, BranchProbability UnhandledProbs
,
1200 SwitchCG::CaseClusterIt I
, MachineBasicBlock
*Fallthrough
,
1201 bool FallthroughUnreachable
) {
1202 using namespace SwitchCG
;
1203 MachineFunction
*CurMF
= SwitchMBB
->getParent();
1204 // FIXME: Optimize away range check based on pivot comparisons.
1205 BitTestBlock
*BTB
= &SL
->BitTestCases
[I
->BTCasesIndex
];
1206 // The bit test blocks haven't been inserted yet; insert them here.
1207 for (BitTestCase
&BTC
: BTB
->Cases
)
1208 CurMF
->insert(BBI
, BTC
.ThisBB
);
1210 // Fill in fields of the BitTestBlock.
1211 BTB
->Parent
= CurMBB
;
1212 BTB
->Default
= Fallthrough
;
1214 BTB
->DefaultProb
= UnhandledProbs
;
1215 // If the cases in bit test don't form a contiguous range, we evenly
1216 // distribute the probability on the edge to Fallthrough to two
1217 // successors of CurMBB.
1218 if (!BTB
->ContiguousRange
) {
1219 BTB
->Prob
+= DefaultProb
/ 2;
1220 BTB
->DefaultProb
-= DefaultProb
/ 2;
1223 if (FallthroughUnreachable
)
1224 BTB
->FallthroughUnreachable
= true;
1226 // If we're in the right place, emit the bit test header right now.
1227 if (CurMBB
== SwitchMBB
) {
1228 emitBitTestHeader(*BTB
, SwitchMBB
);
1229 BTB
->Emitted
= true;
1234 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W
,
1236 MachineBasicBlock
*SwitchMBB
,
1237 MachineBasicBlock
*DefaultMBB
,
1238 MachineIRBuilder
&MIB
) {
1239 using namespace SwitchCG
;
1240 MachineFunction
*CurMF
= FuncInfo
.MF
;
1241 MachineBasicBlock
*NextMBB
= nullptr;
1242 MachineFunction::iterator
BBI(W
.MBB
);
1243 if (++BBI
!= FuncInfo
.MF
->end())
1247 // Here, we order cases by probability so the most likely case will be
1248 // checked first. However, two clusters can have the same probability in
1249 // which case their relative ordering is non-deterministic. So we use Low
1250 // as a tie-breaker as clusters are guaranteed to never overlap.
1251 llvm::sort(W
.FirstCluster
, W
.LastCluster
+ 1,
1252 [](const CaseCluster
&a
, const CaseCluster
&b
) {
1253 return a
.Prob
!= b
.Prob
1255 : a
.Low
->getValue().slt(b
.Low
->getValue());
1258 // Rearrange the case blocks so that the last one falls through if possible
1259 // without changing the order of probabilities.
1260 for (CaseClusterIt I
= W
.LastCluster
; I
> W
.FirstCluster
;) {
1262 if (I
->Prob
> W
.LastCluster
->Prob
)
1264 if (I
->Kind
== CC_Range
&& I
->MBB
== NextMBB
) {
1265 std::swap(*I
, *W
.LastCluster
);
1271 // Compute total probability.
1272 BranchProbability DefaultProb
= W
.DefaultProb
;
1273 BranchProbability UnhandledProbs
= DefaultProb
;
1274 for (CaseClusterIt I
= W
.FirstCluster
; I
<= W
.LastCluster
; ++I
)
1275 UnhandledProbs
+= I
->Prob
;
1277 MachineBasicBlock
*CurMBB
= W
.MBB
;
1278 for (CaseClusterIt I
= W
.FirstCluster
, E
= W
.LastCluster
; I
<= E
; ++I
) {
1279 bool FallthroughUnreachable
= false;
1280 MachineBasicBlock
*Fallthrough
;
1281 if (I
== W
.LastCluster
) {
1282 // For the last cluster, fall through to the default destination.
1283 Fallthrough
= DefaultMBB
;
1284 FallthroughUnreachable
= isa
<UnreachableInst
>(
1285 DefaultMBB
->getBasicBlock()->getFirstNonPHIOrDbg());
1287 Fallthrough
= CurMF
->CreateMachineBasicBlock(CurMBB
->getBasicBlock());
1288 CurMF
->insert(BBI
, Fallthrough
);
1290 UnhandledProbs
-= I
->Prob
;
1294 if (!lowerBitTestWorkItem(W
, SwitchMBB
, CurMBB
, DefaultMBB
, MIB
, BBI
,
1295 DefaultProb
, UnhandledProbs
, I
, Fallthrough
,
1296 FallthroughUnreachable
)) {
1297 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1303 case CC_JumpTable
: {
1304 if (!lowerJumpTableWorkItem(W
, SwitchMBB
, CurMBB
, DefaultMBB
, MIB
, BBI
,
1305 UnhandledProbs
, I
, Fallthrough
,
1306 FallthroughUnreachable
)) {
1307 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1313 if (!lowerSwitchRangeWorkItem(I
, Cond
, Fallthrough
,
1314 FallthroughUnreachable
, UnhandledProbs
,
1315 CurMBB
, MIB
, SwitchMBB
)) {
1316 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1322 CurMBB
= Fallthrough
;
1328 bool IRTranslator::translateIndirectBr(const User
&U
,
1329 MachineIRBuilder
&MIRBuilder
) {
1330 const IndirectBrInst
&BrInst
= cast
<IndirectBrInst
>(U
);
1332 const Register Tgt
= getOrCreateVReg(*BrInst
.getAddress());
1333 MIRBuilder
.buildBrIndirect(Tgt
);
1336 SmallPtrSet
<const BasicBlock
*, 32> AddedSuccessors
;
1337 MachineBasicBlock
&CurBB
= MIRBuilder
.getMBB();
1338 for (const BasicBlock
*Succ
: successors(&BrInst
)) {
1339 // It's legal for indirectbr instructions to have duplicate blocks in the
1340 // destination list. We don't allow this in MIR. Skip anything that's
1341 // already a successor.
1342 if (!AddedSuccessors
.insert(Succ
).second
)
1344 CurBB
.addSuccessor(&getMBB(*Succ
));
1350 static bool isSwiftError(const Value
*V
) {
1351 if (auto Arg
= dyn_cast
<Argument
>(V
))
1352 return Arg
->hasSwiftErrorAttr();
1353 if (auto AI
= dyn_cast
<AllocaInst
>(V
))
1354 return AI
->isSwiftError();
1358 bool IRTranslator::translateLoad(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
1359 const LoadInst
&LI
= cast
<LoadInst
>(U
);
1360 TypeSize StoreSize
= DL
->getTypeStoreSize(LI
.getType());
1361 if (StoreSize
.isZero())
1364 ArrayRef
<Register
> Regs
= getOrCreateVRegs(LI
);
1365 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(LI
);
1366 Register Base
= getOrCreateVReg(*LI
.getPointerOperand());
1367 AAMDNodes AAInfo
= LI
.getAAMetadata();
1369 const Value
*Ptr
= LI
.getPointerOperand();
1370 Type
*OffsetIRTy
= DL
->getIndexType(Ptr
->getType());
1371 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1373 if (CLI
->supportSwiftError() && isSwiftError(Ptr
)) {
1374 assert(Regs
.size() == 1 && "swifterror should be single pointer");
1376 SwiftError
.getOrCreateVRegUseAt(&LI
, &MIRBuilder
.getMBB(), Ptr
);
1377 MIRBuilder
.buildCopy(Regs
[0], VReg
);
1381 MachineMemOperand::Flags Flags
=
1382 TLI
->getLoadMemOperandFlags(LI
, *DL
, AC
, LibInfo
);
1383 if (AA
&& !(Flags
& MachineMemOperand::MOInvariant
)) {
1384 if (AA
->pointsToConstantMemory(
1385 MemoryLocation(Ptr
, LocationSize::precise(StoreSize
), AAInfo
))) {
1386 Flags
|= MachineMemOperand::MOInvariant
;
1390 const MDNode
*Ranges
=
1391 Regs
.size() == 1 ? LI
.getMetadata(LLVMContext::MD_range
) : nullptr;
1392 for (unsigned i
= 0; i
< Regs
.size(); ++i
) {
1394 MIRBuilder
.materializePtrAdd(Addr
, Base
, OffsetTy
, Offsets
[i
] / 8);
1396 MachinePointerInfo
Ptr(LI
.getPointerOperand(), Offsets
[i
] / 8);
1397 Align BaseAlign
= getMemOpAlign(LI
);
1398 auto MMO
= MF
->getMachineMemOperand(
1399 Ptr
, Flags
, MRI
->getType(Regs
[i
]),
1400 commonAlignment(BaseAlign
, Offsets
[i
] / 8), AAInfo
, Ranges
,
1401 LI
.getSyncScopeID(), LI
.getOrdering());
1402 MIRBuilder
.buildLoad(Regs
[i
], Addr
, *MMO
);
1408 bool IRTranslator::translateStore(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
1409 const StoreInst
&SI
= cast
<StoreInst
>(U
);
1410 if (DL
->getTypeStoreSize(SI
.getValueOperand()->getType()).isZero())
1413 ArrayRef
<Register
> Vals
= getOrCreateVRegs(*SI
.getValueOperand());
1414 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(*SI
.getValueOperand());
1415 Register Base
= getOrCreateVReg(*SI
.getPointerOperand());
1417 Type
*OffsetIRTy
= DL
->getIndexType(SI
.getPointerOperandType());
1418 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1420 if (CLI
->supportSwiftError() && isSwiftError(SI
.getPointerOperand())) {
1421 assert(Vals
.size() == 1 && "swifterror should be single pointer");
1423 Register VReg
= SwiftError
.getOrCreateVRegDefAt(&SI
, &MIRBuilder
.getMBB(),
1424 SI
.getPointerOperand());
1425 MIRBuilder
.buildCopy(VReg
, Vals
[0]);
1429 MachineMemOperand::Flags Flags
= TLI
->getStoreMemOperandFlags(SI
, *DL
);
1431 for (unsigned i
= 0; i
< Vals
.size(); ++i
) {
1433 MIRBuilder
.materializePtrAdd(Addr
, Base
, OffsetTy
, Offsets
[i
] / 8);
1435 MachinePointerInfo
Ptr(SI
.getPointerOperand(), Offsets
[i
] / 8);
1436 Align BaseAlign
= getMemOpAlign(SI
);
1437 auto MMO
= MF
->getMachineMemOperand(
1438 Ptr
, Flags
, MRI
->getType(Vals
[i
]),
1439 commonAlignment(BaseAlign
, Offsets
[i
] / 8), SI
.getAAMetadata(), nullptr,
1440 SI
.getSyncScopeID(), SI
.getOrdering());
1441 MIRBuilder
.buildStore(Vals
[i
], Addr
, *MMO
);
1446 static uint64_t getOffsetFromIndices(const User
&U
, const DataLayout
&DL
) {
1447 const Value
*Src
= U
.getOperand(0);
1448 Type
*Int32Ty
= Type::getInt32Ty(U
.getContext());
1450 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1451 // usual array element rather than looking into the actual aggregate.
1452 SmallVector
<Value
*, 1> Indices
;
1453 Indices
.push_back(ConstantInt::get(Int32Ty
, 0));
1455 if (const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(&U
)) {
1456 for (auto Idx
: EVI
->indices())
1457 Indices
.push_back(ConstantInt::get(Int32Ty
, Idx
));
1458 } else if (const InsertValueInst
*IVI
= dyn_cast
<InsertValueInst
>(&U
)) {
1459 for (auto Idx
: IVI
->indices())
1460 Indices
.push_back(ConstantInt::get(Int32Ty
, Idx
));
1462 for (Value
*Op
: drop_begin(U
.operands()))
1463 Indices
.push_back(Op
);
1466 return 8 * static_cast<uint64_t>(
1467 DL
.getIndexedOffsetInType(Src
->getType(), Indices
));
1470 bool IRTranslator::translateExtractValue(const User
&U
,
1471 MachineIRBuilder
&MIRBuilder
) {
1472 const Value
*Src
= U
.getOperand(0);
1473 uint64_t Offset
= getOffsetFromIndices(U
, *DL
);
1474 ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*Src
);
1475 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(*Src
);
1476 unsigned Idx
= llvm::lower_bound(Offsets
, Offset
) - Offsets
.begin();
1477 auto &DstRegs
= allocateVRegs(U
);
1479 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
)
1480 DstRegs
[i
] = SrcRegs
[Idx
++];
1485 bool IRTranslator::translateInsertValue(const User
&U
,
1486 MachineIRBuilder
&MIRBuilder
) {
1487 const Value
*Src
= U
.getOperand(0);
1488 uint64_t Offset
= getOffsetFromIndices(U
, *DL
);
1489 auto &DstRegs
= allocateVRegs(U
);
1490 ArrayRef
<uint64_t> DstOffsets
= *VMap
.getOffsets(U
);
1491 ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*Src
);
1492 ArrayRef
<Register
> InsertedRegs
= getOrCreateVRegs(*U
.getOperand(1));
1493 auto *InsertedIt
= InsertedRegs
.begin();
1495 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
) {
1496 if (DstOffsets
[i
] >= Offset
&& InsertedIt
!= InsertedRegs
.end())
1497 DstRegs
[i
] = *InsertedIt
++;
1499 DstRegs
[i
] = SrcRegs
[i
];
1505 bool IRTranslator::translateSelect(const User
&U
,
1506 MachineIRBuilder
&MIRBuilder
) {
1507 Register Tst
= getOrCreateVReg(*U
.getOperand(0));
1508 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(U
);
1509 ArrayRef
<Register
> Op0Regs
= getOrCreateVRegs(*U
.getOperand(1));
1510 ArrayRef
<Register
> Op1Regs
= getOrCreateVRegs(*U
.getOperand(2));
1513 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(&U
))
1514 Flags
= MachineInstr::copyFlagsFromInstruction(*SI
);
1516 for (unsigned i
= 0; i
< ResRegs
.size(); ++i
) {
1517 MIRBuilder
.buildSelect(ResRegs
[i
], Tst
, Op0Regs
[i
], Op1Regs
[i
], Flags
);
1523 bool IRTranslator::translateCopy(const User
&U
, const Value
&V
,
1524 MachineIRBuilder
&MIRBuilder
) {
1525 Register Src
= getOrCreateVReg(V
);
1526 auto &Regs
= *VMap
.getVRegs(U
);
1528 Regs
.push_back(Src
);
1529 VMap
.getOffsets(U
)->push_back(0);
1531 // If we already assigned a vreg for this instruction, we can't change that.
1532 // Emit a copy to satisfy the users we already emitted.
1533 MIRBuilder
.buildCopy(Regs
[0], Src
);
1538 bool IRTranslator::translateBitCast(const User
&U
,
1539 MachineIRBuilder
&MIRBuilder
) {
1540 // If we're bitcasting to the source type, we can reuse the source vreg.
1541 if (getLLTForType(*U
.getOperand(0)->getType(), *DL
) ==
1542 getLLTForType(*U
.getType(), *DL
)) {
1543 // If the source is a ConstantInt then it was probably created by
1544 // ConstantHoisting and we should leave it alone.
1545 if (isa
<ConstantInt
>(U
.getOperand(0)))
1546 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER
, U
,
1548 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
1551 return translateCast(TargetOpcode::G_BITCAST
, U
, MIRBuilder
);
1554 bool IRTranslator::translateCast(unsigned Opcode
, const User
&U
,
1555 MachineIRBuilder
&MIRBuilder
) {
1556 if (U
.getType()->getScalarType()->isBFloatTy() ||
1557 U
.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1561 if (const Instruction
*I
= dyn_cast
<Instruction
>(&U
))
1562 Flags
= MachineInstr::copyFlagsFromInstruction(*I
);
1564 Register Op
= getOrCreateVReg(*U
.getOperand(0));
1565 Register Res
= getOrCreateVReg(U
);
1566 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op
}, Flags
);
1570 bool IRTranslator::translateGetElementPtr(const User
&U
,
1571 MachineIRBuilder
&MIRBuilder
) {
1572 Value
&Op0
= *U
.getOperand(0);
1573 Register BaseReg
= getOrCreateVReg(Op0
);
1574 Type
*PtrIRTy
= Op0
.getType();
1575 LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1576 Type
*OffsetIRTy
= DL
->getIndexType(PtrIRTy
);
1577 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1580 if (const Instruction
*I
= dyn_cast
<Instruction
>(&U
))
1581 Flags
= MachineInstr::copyFlagsFromInstruction(*I
);
1583 // Normalize Vector GEP - all scalar operands should be converted to the
1585 unsigned VectorWidth
= 0;
1587 // True if we should use a splat vector; using VectorWidth alone is not
1589 bool WantSplatVector
= false;
1590 if (auto *VT
= dyn_cast
<VectorType
>(U
.getType())) {
1591 VectorWidth
= cast
<FixedVectorType
>(VT
)->getNumElements();
1592 // We don't produce 1 x N vectors; those are treated as scalars.
1593 WantSplatVector
= VectorWidth
> 1;
1596 // We might need to splat the base pointer into a vector if the offsets
1598 if (WantSplatVector
&& !PtrTy
.isVector()) {
1599 BaseReg
= MIRBuilder
1600 .buildSplatBuildVector(LLT::fixed_vector(VectorWidth
, PtrTy
),
1603 PtrIRTy
= FixedVectorType::get(PtrIRTy
, VectorWidth
);
1604 PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1605 OffsetIRTy
= DL
->getIndexType(PtrIRTy
);
1606 OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1610 for (gep_type_iterator GTI
= gep_type_begin(&U
), E
= gep_type_end(&U
);
1612 const Value
*Idx
= GTI
.getOperand();
1613 if (StructType
*StTy
= GTI
.getStructTypeOrNull()) {
1614 unsigned Field
= cast
<Constant
>(Idx
)->getUniqueInteger().getZExtValue();
1615 Offset
+= DL
->getStructLayout(StTy
)->getElementOffset(Field
);
1618 uint64_t ElementSize
= GTI
.getSequentialElementStride(*DL
);
1620 // If this is a scalar constant or a splat vector of constants,
1621 // handle it quickly.
1622 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
1623 if (std::optional
<int64_t> Val
= CI
->getValue().trySExtValue()) {
1624 Offset
+= ElementSize
* *Val
;
1630 auto OffsetMIB
= MIRBuilder
.buildConstant({OffsetTy
}, Offset
);
1631 BaseReg
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseReg
, OffsetMIB
.getReg(0))
1636 Register IdxReg
= getOrCreateVReg(*Idx
);
1637 LLT IdxTy
= MRI
->getType(IdxReg
);
1638 if (IdxTy
!= OffsetTy
) {
1639 if (!IdxTy
.isVector() && WantSplatVector
) {
1641 .buildSplatBuildVector(OffsetTy
.changeElementType(IdxTy
),
1646 IdxReg
= MIRBuilder
.buildSExtOrTrunc(OffsetTy
, IdxReg
).getReg(0);
1649 // N = N + Idx * ElementSize;
1650 // Avoid doing it for ElementSize of 1.
1651 Register GepOffsetReg
;
1652 if (ElementSize
!= 1) {
1653 auto ElementSizeMIB
= MIRBuilder
.buildConstant(
1654 getLLTForType(*OffsetIRTy
, *DL
), ElementSize
);
1656 MIRBuilder
.buildMul(OffsetTy
, IdxReg
, ElementSizeMIB
).getReg(0);
1658 GepOffsetReg
= IdxReg
;
1660 BaseReg
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseReg
, GepOffsetReg
).getReg(0);
1666 MIRBuilder
.buildConstant(OffsetTy
, Offset
);
1668 if (int64_t(Offset
) >= 0 && cast
<GEPOperator
>(U
).isInBounds())
1669 Flags
|= MachineInstr::MIFlag::NoUWrap
;
1671 MIRBuilder
.buildPtrAdd(getOrCreateVReg(U
), BaseReg
, OffsetMIB
.getReg(0),
1676 MIRBuilder
.buildCopy(getOrCreateVReg(U
), BaseReg
);
1680 bool IRTranslator::translateMemFunc(const CallInst
&CI
,
1681 MachineIRBuilder
&MIRBuilder
,
1683 const Value
*SrcPtr
= CI
.getArgOperand(1);
1684 // If the source is undef, then just emit a nop.
1685 if (isa
<UndefValue
>(SrcPtr
))
1688 SmallVector
<Register
, 3> SrcRegs
;
1690 unsigned MinPtrSize
= UINT_MAX
;
1691 for (auto AI
= CI
.arg_begin(), AE
= CI
.arg_end(); std::next(AI
) != AE
; ++AI
) {
1692 Register SrcReg
= getOrCreateVReg(**AI
);
1693 LLT SrcTy
= MRI
->getType(SrcReg
);
1694 if (SrcTy
.isPointer())
1695 MinPtrSize
= std::min
<unsigned>(SrcTy
.getSizeInBits(), MinPtrSize
);
1696 SrcRegs
.push_back(SrcReg
);
1699 LLT SizeTy
= LLT::scalar(MinPtrSize
);
1701 // The size operand should be the minimum of the pointer sizes.
1702 Register
&SizeOpReg
= SrcRegs
[SrcRegs
.size() - 1];
1703 if (MRI
->getType(SizeOpReg
) != SizeTy
)
1704 SizeOpReg
= MIRBuilder
.buildZExtOrTrunc(SizeTy
, SizeOpReg
).getReg(0);
1706 auto ICall
= MIRBuilder
.buildInstr(Opcode
);
1707 for (Register SrcReg
: SrcRegs
)
1708 ICall
.addUse(SrcReg
);
1713 cast
<ConstantInt
>(CI
.getArgOperand(CI
.arg_size() - 1))->getZExtValue();
1715 ConstantInt
*CopySize
= nullptr;
1717 if (auto *MCI
= dyn_cast
<MemCpyInst
>(&CI
)) {
1718 DstAlign
= MCI
->getDestAlign().valueOrOne();
1719 SrcAlign
= MCI
->getSourceAlign().valueOrOne();
1720 CopySize
= dyn_cast
<ConstantInt
>(MCI
->getArgOperand(2));
1721 } else if (auto *MCI
= dyn_cast
<MemCpyInlineInst
>(&CI
)) {
1722 DstAlign
= MCI
->getDestAlign().valueOrOne();
1723 SrcAlign
= MCI
->getSourceAlign().valueOrOne();
1724 CopySize
= dyn_cast
<ConstantInt
>(MCI
->getArgOperand(2));
1725 } else if (auto *MMI
= dyn_cast
<MemMoveInst
>(&CI
)) {
1726 DstAlign
= MMI
->getDestAlign().valueOrOne();
1727 SrcAlign
= MMI
->getSourceAlign().valueOrOne();
1728 CopySize
= dyn_cast
<ConstantInt
>(MMI
->getArgOperand(2));
1730 auto *MSI
= cast
<MemSetInst
>(&CI
);
1731 DstAlign
= MSI
->getDestAlign().valueOrOne();
1734 if (Opcode
!= TargetOpcode::G_MEMCPY_INLINE
) {
1735 // We need to propagate the tail call flag from the IR inst as an argument.
1736 // Otherwise, we have to pessimize and assume later that we cannot tail call
1737 // any memory intrinsics.
1738 ICall
.addImm(CI
.isTailCall() ? 1 : 0);
1741 // Create mem operands to store the alignment and volatile info.
1742 MachineMemOperand::Flags LoadFlags
= MachineMemOperand::MOLoad
;
1743 MachineMemOperand::Flags StoreFlags
= MachineMemOperand::MOStore
;
1745 LoadFlags
|= MachineMemOperand::MOVolatile
;
1746 StoreFlags
|= MachineMemOperand::MOVolatile
;
1749 AAMDNodes AAInfo
= CI
.getAAMetadata();
1750 if (AA
&& CopySize
&&
1751 AA
->pointsToConstantMemory(MemoryLocation(
1752 SrcPtr
, LocationSize::precise(CopySize
->getZExtValue()), AAInfo
))) {
1753 LoadFlags
|= MachineMemOperand::MOInvariant
;
1755 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1756 // but the previous usage implied it did. Probably should check
1757 // isDereferenceableAndAlignedPointer.
1758 LoadFlags
|= MachineMemOperand::MODereferenceable
;
1761 ICall
.addMemOperand(
1762 MF
->getMachineMemOperand(MachinePointerInfo(CI
.getArgOperand(0)),
1763 StoreFlags
, 1, DstAlign
, AAInfo
));
1764 if (Opcode
!= TargetOpcode::G_MEMSET
)
1765 ICall
.addMemOperand(MF
->getMachineMemOperand(
1766 MachinePointerInfo(SrcPtr
), LoadFlags
, 1, SrcAlign
, AAInfo
));
1771 bool IRTranslator::translateTrap(const CallInst
&CI
,
1772 MachineIRBuilder
&MIRBuilder
,
1774 StringRef TrapFuncName
=
1775 CI
.getAttributes().getFnAttr("trap-func-name").getValueAsString();
1776 if (TrapFuncName
.empty()) {
1777 if (Opcode
== TargetOpcode::G_UBSANTRAP
) {
1778 uint64_t Code
= cast
<ConstantInt
>(CI
.getOperand(0))->getZExtValue();
1779 MIRBuilder
.buildInstr(Opcode
, {}, ArrayRef
<llvm::SrcOp
>{Code
});
1781 MIRBuilder
.buildInstr(Opcode
);
1786 CallLowering::CallLoweringInfo Info
;
1787 if (Opcode
== TargetOpcode::G_UBSANTRAP
)
1788 Info
.OrigArgs
.push_back({getOrCreateVRegs(*CI
.getArgOperand(0)),
1789 CI
.getArgOperand(0)->getType(), 0});
1791 Info
.Callee
= MachineOperand::CreateES(TrapFuncName
.data());
1793 Info
.OrigRet
= {Register(), Type::getVoidTy(CI
.getContext()), 0};
1794 return CLI
->lowerCall(MIRBuilder
, Info
);
1797 bool IRTranslator::translateVectorInterleave2Intrinsic(
1798 const CallInst
&CI
, MachineIRBuilder
&MIRBuilder
) {
1799 assert(CI
.getIntrinsicID() == Intrinsic::vector_interleave2
&&
1800 "This function can only be called on the interleave2 intrinsic!");
1801 // Canonicalize interleave2 to G_SHUFFLE_VECTOR (similar to SelectionDAG).
1802 Register Op0
= getOrCreateVReg(*CI
.getOperand(0));
1803 Register Op1
= getOrCreateVReg(*CI
.getOperand(1));
1804 Register Res
= getOrCreateVReg(CI
);
1806 LLT OpTy
= MRI
->getType(Op0
);
1807 MIRBuilder
.buildShuffleVector(Res
, Op0
, Op1
,
1808 createInterleaveMask(OpTy
.getNumElements(), 2));
1813 bool IRTranslator::translateVectorDeinterleave2Intrinsic(
1814 const CallInst
&CI
, MachineIRBuilder
&MIRBuilder
) {
1815 assert(CI
.getIntrinsicID() == Intrinsic::vector_deinterleave2
&&
1816 "This function can only be called on the deinterleave2 intrinsic!");
1817 // Canonicalize deinterleave2 to shuffles that extract sub-vectors (similar to
1819 Register Op
= getOrCreateVReg(*CI
.getOperand(0));
1820 auto Undef
= MIRBuilder
.buildUndef(MRI
->getType(Op
));
1821 ArrayRef
<Register
> Res
= getOrCreateVRegs(CI
);
1823 LLT ResTy
= MRI
->getType(Res
[0]);
1824 MIRBuilder
.buildShuffleVector(Res
[0], Op
, Undef
,
1825 createStrideMask(0, 2, ResTy
.getNumElements()));
1826 MIRBuilder
.buildShuffleVector(Res
[1], Op
, Undef
,
1827 createStrideMask(1, 2, ResTy
.getNumElements()));
1832 void IRTranslator::getStackGuard(Register DstReg
,
1833 MachineIRBuilder
&MIRBuilder
) {
1834 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
1835 MRI
->setRegClass(DstReg
, TRI
->getPointerRegClass(*MF
));
1837 MIRBuilder
.buildInstr(TargetOpcode::LOAD_STACK_GUARD
, {DstReg
}, {});
1839 Value
*Global
= TLI
->getSDagStackGuard(*MF
->getFunction().getParent());
1843 unsigned AddrSpace
= Global
->getType()->getPointerAddressSpace();
1844 LLT PtrTy
= LLT::pointer(AddrSpace
, DL
->getPointerSizeInBits(AddrSpace
));
1846 MachinePointerInfo
MPInfo(Global
);
1847 auto Flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOInvariant
|
1848 MachineMemOperand::MODereferenceable
;
1849 MachineMemOperand
*MemRef
= MF
->getMachineMemOperand(
1850 MPInfo
, Flags
, PtrTy
, DL
->getPointerABIAlignment(AddrSpace
));
1851 MIB
.setMemRefs({MemRef
});
1854 bool IRTranslator::translateOverflowIntrinsic(const CallInst
&CI
, unsigned Op
,
1855 MachineIRBuilder
&MIRBuilder
) {
1856 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(CI
);
1857 MIRBuilder
.buildInstr(
1858 Op
, {ResRegs
[0], ResRegs
[1]},
1859 {getOrCreateVReg(*CI
.getOperand(0)), getOrCreateVReg(*CI
.getOperand(1))});
1864 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op
, const CallInst
&CI
,
1865 MachineIRBuilder
&MIRBuilder
) {
1866 Register Dst
= getOrCreateVReg(CI
);
1867 Register Src0
= getOrCreateVReg(*CI
.getOperand(0));
1868 Register Src1
= getOrCreateVReg(*CI
.getOperand(1));
1869 uint64_t Scale
= cast
<ConstantInt
>(CI
.getOperand(2))->getZExtValue();
1870 MIRBuilder
.buildInstr(Op
, {Dst
}, { Src0
, Src1
, Scale
});
1874 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID
) {
1878 case Intrinsic::acos
:
1879 return TargetOpcode::G_FACOS
;
1880 case Intrinsic::asin
:
1881 return TargetOpcode::G_FASIN
;
1882 case Intrinsic::atan
:
1883 return TargetOpcode::G_FATAN
;
1884 case Intrinsic::atan2
:
1885 return TargetOpcode::G_FATAN2
;
1886 case Intrinsic::bswap
:
1887 return TargetOpcode::G_BSWAP
;
1888 case Intrinsic::bitreverse
:
1889 return TargetOpcode::G_BITREVERSE
;
1890 case Intrinsic::fshl
:
1891 return TargetOpcode::G_FSHL
;
1892 case Intrinsic::fshr
:
1893 return TargetOpcode::G_FSHR
;
1894 case Intrinsic::ceil
:
1895 return TargetOpcode::G_FCEIL
;
1896 case Intrinsic::cos
:
1897 return TargetOpcode::G_FCOS
;
1898 case Intrinsic::cosh
:
1899 return TargetOpcode::G_FCOSH
;
1900 case Intrinsic::ctpop
:
1901 return TargetOpcode::G_CTPOP
;
1902 case Intrinsic::exp
:
1903 return TargetOpcode::G_FEXP
;
1904 case Intrinsic::exp2
:
1905 return TargetOpcode::G_FEXP2
;
1906 case Intrinsic::exp10
:
1907 return TargetOpcode::G_FEXP10
;
1908 case Intrinsic::fabs
:
1909 return TargetOpcode::G_FABS
;
1910 case Intrinsic::copysign
:
1911 return TargetOpcode::G_FCOPYSIGN
;
1912 case Intrinsic::minnum
:
1913 return TargetOpcode::G_FMINNUM
;
1914 case Intrinsic::maxnum
:
1915 return TargetOpcode::G_FMAXNUM
;
1916 case Intrinsic::minimum
:
1917 return TargetOpcode::G_FMINIMUM
;
1918 case Intrinsic::maximum
:
1919 return TargetOpcode::G_FMAXIMUM
;
1920 case Intrinsic::canonicalize
:
1921 return TargetOpcode::G_FCANONICALIZE
;
1922 case Intrinsic::floor
:
1923 return TargetOpcode::G_FFLOOR
;
1924 case Intrinsic::fma
:
1925 return TargetOpcode::G_FMA
;
1926 case Intrinsic::log
:
1927 return TargetOpcode::G_FLOG
;
1928 case Intrinsic::log2
:
1929 return TargetOpcode::G_FLOG2
;
1930 case Intrinsic::log10
:
1931 return TargetOpcode::G_FLOG10
;
1932 case Intrinsic::ldexp
:
1933 return TargetOpcode::G_FLDEXP
;
1934 case Intrinsic::nearbyint
:
1935 return TargetOpcode::G_FNEARBYINT
;
1936 case Intrinsic::pow
:
1937 return TargetOpcode::G_FPOW
;
1938 case Intrinsic::powi
:
1939 return TargetOpcode::G_FPOWI
;
1940 case Intrinsic::rint
:
1941 return TargetOpcode::G_FRINT
;
1942 case Intrinsic::round
:
1943 return TargetOpcode::G_INTRINSIC_ROUND
;
1944 case Intrinsic::roundeven
:
1945 return TargetOpcode::G_INTRINSIC_ROUNDEVEN
;
1946 case Intrinsic::sin
:
1947 return TargetOpcode::G_FSIN
;
1948 case Intrinsic::sinh
:
1949 return TargetOpcode::G_FSINH
;
1950 case Intrinsic::sqrt
:
1951 return TargetOpcode::G_FSQRT
;
1952 case Intrinsic::tan
:
1953 return TargetOpcode::G_FTAN
;
1954 case Intrinsic::tanh
:
1955 return TargetOpcode::G_FTANH
;
1956 case Intrinsic::trunc
:
1957 return TargetOpcode::G_INTRINSIC_TRUNC
;
1958 case Intrinsic::readcyclecounter
:
1959 return TargetOpcode::G_READCYCLECOUNTER
;
1960 case Intrinsic::readsteadycounter
:
1961 return TargetOpcode::G_READSTEADYCOUNTER
;
1962 case Intrinsic::ptrmask
:
1963 return TargetOpcode::G_PTRMASK
;
1964 case Intrinsic::lrint
:
1965 return TargetOpcode::G_INTRINSIC_LRINT
;
1966 case Intrinsic::llrint
:
1967 return TargetOpcode::G_INTRINSIC_LLRINT
;
1968 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1969 case Intrinsic::vector_reduce_fmin
:
1970 return TargetOpcode::G_VECREDUCE_FMIN
;
1971 case Intrinsic::vector_reduce_fmax
:
1972 return TargetOpcode::G_VECREDUCE_FMAX
;
1973 case Intrinsic::vector_reduce_fminimum
:
1974 return TargetOpcode::G_VECREDUCE_FMINIMUM
;
1975 case Intrinsic::vector_reduce_fmaximum
:
1976 return TargetOpcode::G_VECREDUCE_FMAXIMUM
;
1977 case Intrinsic::vector_reduce_add
:
1978 return TargetOpcode::G_VECREDUCE_ADD
;
1979 case Intrinsic::vector_reduce_mul
:
1980 return TargetOpcode::G_VECREDUCE_MUL
;
1981 case Intrinsic::vector_reduce_and
:
1982 return TargetOpcode::G_VECREDUCE_AND
;
1983 case Intrinsic::vector_reduce_or
:
1984 return TargetOpcode::G_VECREDUCE_OR
;
1985 case Intrinsic::vector_reduce_xor
:
1986 return TargetOpcode::G_VECREDUCE_XOR
;
1987 case Intrinsic::vector_reduce_smax
:
1988 return TargetOpcode::G_VECREDUCE_SMAX
;
1989 case Intrinsic::vector_reduce_smin
:
1990 return TargetOpcode::G_VECREDUCE_SMIN
;
1991 case Intrinsic::vector_reduce_umax
:
1992 return TargetOpcode::G_VECREDUCE_UMAX
;
1993 case Intrinsic::vector_reduce_umin
:
1994 return TargetOpcode::G_VECREDUCE_UMIN
;
1995 case Intrinsic::experimental_vector_compress
:
1996 return TargetOpcode::G_VECTOR_COMPRESS
;
1997 case Intrinsic::lround
:
1998 return TargetOpcode::G_LROUND
;
1999 case Intrinsic::llround
:
2000 return TargetOpcode::G_LLROUND
;
2001 case Intrinsic::get_fpenv
:
2002 return TargetOpcode::G_GET_FPENV
;
2003 case Intrinsic::get_fpmode
:
2004 return TargetOpcode::G_GET_FPMODE
;
2006 return Intrinsic::not_intrinsic
;
2009 bool IRTranslator::translateSimpleIntrinsic(const CallInst
&CI
,
2011 MachineIRBuilder
&MIRBuilder
) {
2013 unsigned Op
= getSimpleIntrinsicOpcode(ID
);
2015 // Is this a simple intrinsic?
2016 if (Op
== Intrinsic::not_intrinsic
)
2019 // Yes. Let's translate it.
2020 SmallVector
<llvm::SrcOp
, 4> VRegs
;
2021 for (const auto &Arg
: CI
.args())
2022 VRegs
.push_back(getOrCreateVReg(*Arg
));
2024 MIRBuilder
.buildInstr(Op
, {getOrCreateVReg(CI
)}, VRegs
,
2025 MachineInstr::copyFlagsFromInstruction(CI
));
2029 // TODO: Include ConstainedOps.def when all strict instructions are defined.
2030 static unsigned getConstrainedOpcode(Intrinsic::ID ID
) {
2032 case Intrinsic::experimental_constrained_fadd
:
2033 return TargetOpcode::G_STRICT_FADD
;
2034 case Intrinsic::experimental_constrained_fsub
:
2035 return TargetOpcode::G_STRICT_FSUB
;
2036 case Intrinsic::experimental_constrained_fmul
:
2037 return TargetOpcode::G_STRICT_FMUL
;
2038 case Intrinsic::experimental_constrained_fdiv
:
2039 return TargetOpcode::G_STRICT_FDIV
;
2040 case Intrinsic::experimental_constrained_frem
:
2041 return TargetOpcode::G_STRICT_FREM
;
2042 case Intrinsic::experimental_constrained_fma
:
2043 return TargetOpcode::G_STRICT_FMA
;
2044 case Intrinsic::experimental_constrained_sqrt
:
2045 return TargetOpcode::G_STRICT_FSQRT
;
2046 case Intrinsic::experimental_constrained_ldexp
:
2047 return TargetOpcode::G_STRICT_FLDEXP
;
2053 bool IRTranslator::translateConstrainedFPIntrinsic(
2054 const ConstrainedFPIntrinsic
&FPI
, MachineIRBuilder
&MIRBuilder
) {
2055 fp::ExceptionBehavior EB
= *FPI
.getExceptionBehavior();
2057 unsigned Opcode
= getConstrainedOpcode(FPI
.getIntrinsicID());
2061 uint32_t Flags
= MachineInstr::copyFlagsFromInstruction(FPI
);
2062 if (EB
== fp::ExceptionBehavior::ebIgnore
)
2063 Flags
|= MachineInstr::NoFPExcept
;
2065 SmallVector
<llvm::SrcOp
, 4> VRegs
;
2066 for (unsigned I
= 0, E
= FPI
.getNonMetadataArgCount(); I
!= E
; ++I
)
2067 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(I
)));
2069 MIRBuilder
.buildInstr(Opcode
, {getOrCreateVReg(FPI
)}, VRegs
, Flags
);
2073 std::optional
<MCRegister
> IRTranslator::getArgPhysReg(Argument
&Arg
) {
2074 auto VRegs
= getOrCreateVRegs(Arg
);
2075 if (VRegs
.size() != 1)
2076 return std::nullopt
;
2078 // Arguments are lowered as a copy of a livein physical register.
2079 auto *VRegDef
= MF
->getRegInfo().getVRegDef(VRegs
[0]);
2080 if (!VRegDef
|| !VRegDef
->isCopy())
2081 return std::nullopt
;
2082 return VRegDef
->getOperand(1).getReg().asMCReg();
2085 bool IRTranslator::translateIfEntryValueArgument(bool isDeclare
, Value
*Val
,
2086 const DILocalVariable
*Var
,
2087 const DIExpression
*Expr
,
2089 MachineIRBuilder
&MIRBuilder
) {
2090 auto *Arg
= dyn_cast
<Argument
>(Val
);
2094 if (!Expr
->isEntryValue())
2097 std::optional
<MCRegister
> PhysReg
= getArgPhysReg(*Arg
);
2099 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare
? "declare" : "value")
2100 << ": expression is entry_value but "
2101 << "couldn't find a physical register\n");
2102 LLVM_DEBUG(dbgs() << *Var
<< "\n");
2107 // Append an op deref to account for the fact that this is a dbg_declare.
2108 Expr
= DIExpression::append(Expr
, dwarf::DW_OP_deref
);
2109 MF
->setVariableDbgInfo(Var
, Expr
, *PhysReg
, DL
);
2111 MIRBuilder
.buildDirectDbgValue(*PhysReg
, Var
, Expr
);
2117 static unsigned getConvOpcode(Intrinsic::ID ID
) {
2120 llvm_unreachable("Unexpected intrinsic");
2121 case Intrinsic::experimental_convergence_anchor
:
2122 return TargetOpcode::CONVERGENCECTRL_ANCHOR
;
2123 case Intrinsic::experimental_convergence_entry
:
2124 return TargetOpcode::CONVERGENCECTRL_ENTRY
;
2125 case Intrinsic::experimental_convergence_loop
:
2126 return TargetOpcode::CONVERGENCECTRL_LOOP
;
2130 bool IRTranslator::translateConvergenceControlIntrinsic(
2131 const CallInst
&CI
, Intrinsic::ID ID
, MachineIRBuilder
&MIRBuilder
) {
2132 MachineInstrBuilder MIB
= MIRBuilder
.buildInstr(getConvOpcode(ID
));
2133 Register OutputReg
= getOrCreateConvergenceTokenVReg(CI
);
2134 MIB
.addDef(OutputReg
);
2136 if (ID
== Intrinsic::experimental_convergence_loop
) {
2137 auto Bundle
= CI
.getOperandBundle(LLVMContext::OB_convergencectrl
);
2138 assert(Bundle
&& "Expected a convergence control token.");
2140 getOrCreateConvergenceTokenVReg(*Bundle
->Inputs
[0].get());
2141 MIB
.addUse(InputReg
);
2147 bool IRTranslator::translateKnownIntrinsic(const CallInst
&CI
, Intrinsic::ID ID
,
2148 MachineIRBuilder
&MIRBuilder
) {
2149 if (auto *MI
= dyn_cast
<AnyMemIntrinsic
>(&CI
)) {
2150 if (ORE
->enabled()) {
2151 if (MemoryOpRemark::canHandle(MI
, *LibInfo
)) {
2152 MemoryOpRemark
R(*ORE
, "gisel-irtranslator-memsize", *DL
, *LibInfo
);
2158 // If this is a simple intrinsic (that is, we just need to add a def of
2159 // a vreg, and uses for each arg operand, then translate it.
2160 if (translateSimpleIntrinsic(CI
, ID
, MIRBuilder
))
2166 case Intrinsic::lifetime_start
:
2167 case Intrinsic::lifetime_end
: {
2168 // No stack colouring in O0, discard region information.
2169 if (MF
->getTarget().getOptLevel() == CodeGenOptLevel::None
||
2170 MF
->getFunction().hasOptNone())
2173 unsigned Op
= ID
== Intrinsic::lifetime_start
? TargetOpcode::LIFETIME_START
2174 : TargetOpcode::LIFETIME_END
;
2176 // Get the underlying objects for the location passed on the lifetime
2178 SmallVector
<const Value
*, 4> Allocas
;
2179 getUnderlyingObjects(CI
.getArgOperand(1), Allocas
);
2181 // Iterate over each underlying object, creating lifetime markers for each
2182 // static alloca. Quit if we find a non-static alloca.
2183 for (const Value
*V
: Allocas
) {
2184 const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
);
2188 if (!AI
->isStaticAlloca())
2191 MIRBuilder
.buildInstr(Op
).addFrameIndex(getOrCreateFrameIndex(*AI
));
2195 case Intrinsic::fake_use
: {
2196 SmallVector
<llvm::SrcOp
, 4> VRegs
;
2197 for (const auto &Arg
: CI
.args())
2198 for (auto VReg
: getOrCreateVRegs(*Arg
))
2199 VRegs
.push_back(VReg
);
2200 MIRBuilder
.buildInstr(TargetOpcode::FAKE_USE
, {}, VRegs
);
2201 MF
->setHasFakeUses(true);
2204 case Intrinsic::dbg_declare
: {
2205 const DbgDeclareInst
&DI
= cast
<DbgDeclareInst
>(CI
);
2206 assert(DI
.getVariable() && "Missing variable");
2207 translateDbgDeclareRecord(DI
.getAddress(), DI
.hasArgList(), DI
.getVariable(),
2208 DI
.getExpression(), DI
.getDebugLoc(), MIRBuilder
);
2211 case Intrinsic::dbg_label
: {
2212 const DbgLabelInst
&DI
= cast
<DbgLabelInst
>(CI
);
2213 assert(DI
.getLabel() && "Missing label");
2215 assert(DI
.getLabel()->isValidLocationForIntrinsic(
2216 MIRBuilder
.getDebugLoc()) &&
2217 "Expected inlined-at fields to agree");
2219 MIRBuilder
.buildDbgLabel(DI
.getLabel());
2222 case Intrinsic::vaend
:
2223 // No target I know of cares about va_end. Certainly no in-tree target
2224 // does. Simplest intrinsic ever!
2226 case Intrinsic::vastart
: {
2227 Value
*Ptr
= CI
.getArgOperand(0);
2228 unsigned ListSize
= TLI
->getVaListSizeInBits(*DL
) / 8;
2229 Align Alignment
= getKnownAlignment(Ptr
, *DL
);
2231 MIRBuilder
.buildInstr(TargetOpcode::G_VASTART
, {}, {getOrCreateVReg(*Ptr
)})
2232 .addMemOperand(MF
->getMachineMemOperand(MachinePointerInfo(Ptr
),
2233 MachineMemOperand::MOStore
,
2234 ListSize
, Alignment
));
2237 case Intrinsic::dbg_assign
:
2238 // A dbg.assign is a dbg.value with more information about stack locations,
2239 // typically produced during optimisation of variables with leaked
2240 // addresses. We can treat it like a normal dbg_value intrinsic here; to
2241 // benefit from the full analysis of stack/SSA locations, GlobalISel would
2242 // need to register for and use the AssignmentTrackingAnalysis pass.
2244 case Intrinsic::dbg_value
: {
2245 // This form of DBG_VALUE is target-independent.
2246 const DbgValueInst
&DI
= cast
<DbgValueInst
>(CI
);
2247 translateDbgValueRecord(DI
.getValue(), DI
.hasArgList(), DI
.getVariable(),
2248 DI
.getExpression(), DI
.getDebugLoc(), MIRBuilder
);
2251 case Intrinsic::uadd_with_overflow
:
2252 return translateOverflowIntrinsic(CI
, TargetOpcode::G_UADDO
, MIRBuilder
);
2253 case Intrinsic::sadd_with_overflow
:
2254 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SADDO
, MIRBuilder
);
2255 case Intrinsic::usub_with_overflow
:
2256 return translateOverflowIntrinsic(CI
, TargetOpcode::G_USUBO
, MIRBuilder
);
2257 case Intrinsic::ssub_with_overflow
:
2258 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SSUBO
, MIRBuilder
);
2259 case Intrinsic::umul_with_overflow
:
2260 return translateOverflowIntrinsic(CI
, TargetOpcode::G_UMULO
, MIRBuilder
);
2261 case Intrinsic::smul_with_overflow
:
2262 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SMULO
, MIRBuilder
);
2263 case Intrinsic::uadd_sat
:
2264 return translateBinaryOp(TargetOpcode::G_UADDSAT
, CI
, MIRBuilder
);
2265 case Intrinsic::sadd_sat
:
2266 return translateBinaryOp(TargetOpcode::G_SADDSAT
, CI
, MIRBuilder
);
2267 case Intrinsic::usub_sat
:
2268 return translateBinaryOp(TargetOpcode::G_USUBSAT
, CI
, MIRBuilder
);
2269 case Intrinsic::ssub_sat
:
2270 return translateBinaryOp(TargetOpcode::G_SSUBSAT
, CI
, MIRBuilder
);
2271 case Intrinsic::ushl_sat
:
2272 return translateBinaryOp(TargetOpcode::G_USHLSAT
, CI
, MIRBuilder
);
2273 case Intrinsic::sshl_sat
:
2274 return translateBinaryOp(TargetOpcode::G_SSHLSAT
, CI
, MIRBuilder
);
2275 case Intrinsic::umin
:
2276 return translateBinaryOp(TargetOpcode::G_UMIN
, CI
, MIRBuilder
);
2277 case Intrinsic::umax
:
2278 return translateBinaryOp(TargetOpcode::G_UMAX
, CI
, MIRBuilder
);
2279 case Intrinsic::smin
:
2280 return translateBinaryOp(TargetOpcode::G_SMIN
, CI
, MIRBuilder
);
2281 case Intrinsic::smax
:
2282 return translateBinaryOp(TargetOpcode::G_SMAX
, CI
, MIRBuilder
);
2283 case Intrinsic::abs
:
2284 // TODO: Preserve "int min is poison" arg in GMIR?
2285 return translateUnaryOp(TargetOpcode::G_ABS
, CI
, MIRBuilder
);
2286 case Intrinsic::smul_fix
:
2287 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX
, CI
, MIRBuilder
);
2288 case Intrinsic::umul_fix
:
2289 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX
, CI
, MIRBuilder
);
2290 case Intrinsic::smul_fix_sat
:
2291 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT
, CI
, MIRBuilder
);
2292 case Intrinsic::umul_fix_sat
:
2293 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT
, CI
, MIRBuilder
);
2294 case Intrinsic::sdiv_fix
:
2295 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX
, CI
, MIRBuilder
);
2296 case Intrinsic::udiv_fix
:
2297 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX
, CI
, MIRBuilder
);
2298 case Intrinsic::sdiv_fix_sat
:
2299 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT
, CI
, MIRBuilder
);
2300 case Intrinsic::udiv_fix_sat
:
2301 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT
, CI
, MIRBuilder
);
2302 case Intrinsic::fmuladd
: {
2303 const TargetMachine
&TM
= MF
->getTarget();
2304 Register Dst
= getOrCreateVReg(CI
);
2305 Register Op0
= getOrCreateVReg(*CI
.getArgOperand(0));
2306 Register Op1
= getOrCreateVReg(*CI
.getArgOperand(1));
2307 Register Op2
= getOrCreateVReg(*CI
.getArgOperand(2));
2308 if (TM
.Options
.AllowFPOpFusion
!= FPOpFusion::Strict
&&
2309 TLI
->isFMAFasterThanFMulAndFAdd(*MF
,
2310 TLI
->getValueType(*DL
, CI
.getType()))) {
2311 // TODO: Revisit this to see if we should move this part of the
2312 // lowering to the combiner.
2313 MIRBuilder
.buildFMA(Dst
, Op0
, Op1
, Op2
,
2314 MachineInstr::copyFlagsFromInstruction(CI
));
2316 LLT Ty
= getLLTForType(*CI
.getType(), *DL
);
2317 auto FMul
= MIRBuilder
.buildFMul(
2318 Ty
, Op0
, Op1
, MachineInstr::copyFlagsFromInstruction(CI
));
2319 MIRBuilder
.buildFAdd(Dst
, FMul
, Op2
,
2320 MachineInstr::copyFlagsFromInstruction(CI
));
2324 case Intrinsic::convert_from_fp16
:
2325 // FIXME: This intrinsic should probably be removed from the IR.
2326 MIRBuilder
.buildFPExt(getOrCreateVReg(CI
),
2327 getOrCreateVReg(*CI
.getArgOperand(0)),
2328 MachineInstr::copyFlagsFromInstruction(CI
));
2330 case Intrinsic::convert_to_fp16
:
2331 // FIXME: This intrinsic should probably be removed from the IR.
2332 MIRBuilder
.buildFPTrunc(getOrCreateVReg(CI
),
2333 getOrCreateVReg(*CI
.getArgOperand(0)),
2334 MachineInstr::copyFlagsFromInstruction(CI
));
2336 case Intrinsic::frexp
: {
2337 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(CI
);
2338 MIRBuilder
.buildFFrexp(VRegs
[0], VRegs
[1],
2339 getOrCreateVReg(*CI
.getArgOperand(0)),
2340 MachineInstr::copyFlagsFromInstruction(CI
));
2343 case Intrinsic::sincos
: {
2344 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(CI
);
2345 MIRBuilder
.buildFSincos(VRegs
[0], VRegs
[1],
2346 getOrCreateVReg(*CI
.getArgOperand(0)),
2347 MachineInstr::copyFlagsFromInstruction(CI
));
2350 case Intrinsic::fptosi_sat
:
2351 MIRBuilder
.buildFPTOSI_SAT(getOrCreateVReg(CI
),
2352 getOrCreateVReg(*CI
.getArgOperand(0)));
2354 case Intrinsic::fptoui_sat
:
2355 MIRBuilder
.buildFPTOUI_SAT(getOrCreateVReg(CI
),
2356 getOrCreateVReg(*CI
.getArgOperand(0)));
2358 case Intrinsic::memcpy_inline
:
2359 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMCPY_INLINE
);
2360 case Intrinsic::memcpy
:
2361 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMCPY
);
2362 case Intrinsic::memmove
:
2363 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMMOVE
);
2364 case Intrinsic::memset
:
2365 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMSET
);
2366 case Intrinsic::eh_typeid_for
: {
2367 GlobalValue
*GV
= ExtractTypeInfo(CI
.getArgOperand(0));
2368 Register Reg
= getOrCreateVReg(CI
);
2369 unsigned TypeID
= MF
->getTypeIDFor(GV
);
2370 MIRBuilder
.buildConstant(Reg
, TypeID
);
2373 case Intrinsic::objectsize
:
2374 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2376 case Intrinsic::is_constant
:
2377 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2379 case Intrinsic::stackguard
:
2380 getStackGuard(getOrCreateVReg(CI
), MIRBuilder
);
2382 case Intrinsic::stackprotector
: {
2383 LLT PtrTy
= getLLTForType(*CI
.getArgOperand(0)->getType(), *DL
);
2385 if (TLI
->useLoadStackGuardNode(*CI
.getModule())) {
2386 GuardVal
= MRI
->createGenericVirtualRegister(PtrTy
);
2387 getStackGuard(GuardVal
, MIRBuilder
);
2389 GuardVal
= getOrCreateVReg(*CI
.getArgOperand(0)); // The guard's value.
2391 AllocaInst
*Slot
= cast
<AllocaInst
>(CI
.getArgOperand(1));
2392 int FI
= getOrCreateFrameIndex(*Slot
);
2393 MF
->getFrameInfo().setStackProtectorIndex(FI
);
2395 MIRBuilder
.buildStore(
2396 GuardVal
, getOrCreateVReg(*Slot
),
2397 *MF
->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF
, FI
),
2398 MachineMemOperand::MOStore
|
2399 MachineMemOperand::MOVolatile
,
2403 case Intrinsic::stacksave
: {
2404 MIRBuilder
.buildInstr(TargetOpcode::G_STACKSAVE
, {getOrCreateVReg(CI
)}, {});
2407 case Intrinsic::stackrestore
: {
2408 MIRBuilder
.buildInstr(TargetOpcode::G_STACKRESTORE
, {},
2409 {getOrCreateVReg(*CI
.getArgOperand(0))});
2412 case Intrinsic::cttz
:
2413 case Intrinsic::ctlz
: {
2414 ConstantInt
*Cst
= cast
<ConstantInt
>(CI
.getArgOperand(1));
2415 bool isTrailing
= ID
== Intrinsic::cttz
;
2416 unsigned Opcode
= isTrailing
2417 ? Cst
->isZero() ? TargetOpcode::G_CTTZ
2418 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2419 : Cst
->isZero() ? TargetOpcode::G_CTLZ
2420 : TargetOpcode::G_CTLZ_ZERO_UNDEF
;
2421 MIRBuilder
.buildInstr(Opcode
, {getOrCreateVReg(CI
)},
2422 {getOrCreateVReg(*CI
.getArgOperand(0))});
2425 case Intrinsic::invariant_start
: {
2426 LLT PtrTy
= getLLTForType(*CI
.getArgOperand(0)->getType(), *DL
);
2427 Register Undef
= MRI
->createGenericVirtualRegister(PtrTy
);
2428 MIRBuilder
.buildUndef(Undef
);
2431 case Intrinsic::invariant_end
:
2433 case Intrinsic::expect
:
2434 case Intrinsic::expect_with_probability
:
2435 case Intrinsic::annotation
:
2436 case Intrinsic::ptr_annotation
:
2437 case Intrinsic::launder_invariant_group
:
2438 case Intrinsic::strip_invariant_group
: {
2439 // Drop the intrinsic, but forward the value.
2440 MIRBuilder
.buildCopy(getOrCreateVReg(CI
),
2441 getOrCreateVReg(*CI
.getArgOperand(0)));
2444 case Intrinsic::assume
:
2445 case Intrinsic::experimental_noalias_scope_decl
:
2446 case Intrinsic::var_annotation
:
2447 case Intrinsic::sideeffect
:
2448 // Discard annotate attributes, assumptions, and artificial side-effects.
2450 case Intrinsic::read_volatile_register
:
2451 case Intrinsic::read_register
: {
2452 Value
*Arg
= CI
.getArgOperand(0);
2454 .buildInstr(TargetOpcode::G_READ_REGISTER
, {getOrCreateVReg(CI
)}, {})
2455 .addMetadata(cast
<MDNode
>(cast
<MetadataAsValue
>(Arg
)->getMetadata()));
2458 case Intrinsic::write_register
: {
2459 Value
*Arg
= CI
.getArgOperand(0);
2460 MIRBuilder
.buildInstr(TargetOpcode::G_WRITE_REGISTER
)
2461 .addMetadata(cast
<MDNode
>(cast
<MetadataAsValue
>(Arg
)->getMetadata()))
2462 .addUse(getOrCreateVReg(*CI
.getArgOperand(1)));
2465 case Intrinsic::localescape
: {
2466 MachineBasicBlock
&EntryMBB
= MF
->front();
2467 StringRef EscapedName
= GlobalValue::dropLLVMManglingEscape(MF
->getName());
2469 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2470 // is the same on all targets.
2471 for (unsigned Idx
= 0, E
= CI
.arg_size(); Idx
< E
; ++Idx
) {
2472 Value
*Arg
= CI
.getArgOperand(Idx
)->stripPointerCasts();
2473 if (isa
<ConstantPointerNull
>(Arg
))
2474 continue; // Skip null pointers. They represent a hole in index space.
2476 int FI
= getOrCreateFrameIndex(*cast
<AllocaInst
>(Arg
));
2477 MCSymbol
*FrameAllocSym
=
2478 MF
->getContext().getOrCreateFrameAllocSymbol(EscapedName
, Idx
);
2480 // This should be inserted at the start of the entry block.
2482 MIRBuilder
.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE
)
2483 .addSym(FrameAllocSym
)
2486 EntryMBB
.insert(EntryMBB
.begin(), LocalEscape
);
2491 case Intrinsic::vector_reduce_fadd
:
2492 case Intrinsic::vector_reduce_fmul
: {
2493 // Need to check for the reassoc flag to decide whether we want a
2494 // sequential reduction opcode or not.
2495 Register Dst
= getOrCreateVReg(CI
);
2496 Register ScalarSrc
= getOrCreateVReg(*CI
.getArgOperand(0));
2497 Register VecSrc
= getOrCreateVReg(*CI
.getArgOperand(1));
2499 if (!CI
.hasAllowReassoc()) {
2500 // The sequential ordering case.
2501 Opc
= ID
== Intrinsic::vector_reduce_fadd
2502 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2503 : TargetOpcode::G_VECREDUCE_SEQ_FMUL
;
2504 MIRBuilder
.buildInstr(Opc
, {Dst
}, {ScalarSrc
, VecSrc
},
2505 MachineInstr::copyFlagsFromInstruction(CI
));
2508 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2509 // since the associativity doesn't matter.
2511 if (ID
== Intrinsic::vector_reduce_fadd
) {
2512 Opc
= TargetOpcode::G_VECREDUCE_FADD
;
2513 ScalarOpc
= TargetOpcode::G_FADD
;
2515 Opc
= TargetOpcode::G_VECREDUCE_FMUL
;
2516 ScalarOpc
= TargetOpcode::G_FMUL
;
2518 LLT DstTy
= MRI
->getType(Dst
);
2519 auto Rdx
= MIRBuilder
.buildInstr(
2520 Opc
, {DstTy
}, {VecSrc
}, MachineInstr::copyFlagsFromInstruction(CI
));
2521 MIRBuilder
.buildInstr(ScalarOpc
, {Dst
}, {ScalarSrc
, Rdx
},
2522 MachineInstr::copyFlagsFromInstruction(CI
));
2526 case Intrinsic::trap
:
2527 return translateTrap(CI
, MIRBuilder
, TargetOpcode::G_TRAP
);
2528 case Intrinsic::debugtrap
:
2529 return translateTrap(CI
, MIRBuilder
, TargetOpcode::G_DEBUGTRAP
);
2530 case Intrinsic::ubsantrap
:
2531 return translateTrap(CI
, MIRBuilder
, TargetOpcode::G_UBSANTRAP
);
2532 case Intrinsic::allow_runtime_check
:
2533 case Intrinsic::allow_ubsan_check
:
2534 MIRBuilder
.buildCopy(getOrCreateVReg(CI
),
2535 getOrCreateVReg(*ConstantInt::getTrue(CI
.getType())));
2537 case Intrinsic::amdgcn_cs_chain
:
2538 return translateCallBase(CI
, MIRBuilder
);
2539 case Intrinsic::fptrunc_round
: {
2540 uint32_t Flags
= MachineInstr::copyFlagsFromInstruction(CI
);
2542 // Convert the metadata argument to a constant integer
2543 Metadata
*MD
= cast
<MetadataAsValue
>(CI
.getArgOperand(1))->getMetadata();
2544 std::optional
<RoundingMode
> RoundMode
=
2545 convertStrToRoundingMode(cast
<MDString
>(MD
)->getString());
2547 // Add the Rounding mode as an integer
2549 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND
,
2550 {getOrCreateVReg(CI
)},
2551 {getOrCreateVReg(*CI
.getArgOperand(0))}, Flags
)
2552 .addImm((int)*RoundMode
);
2556 case Intrinsic::is_fpclass
: {
2557 Value
*FpValue
= CI
.getOperand(0);
2558 ConstantInt
*TestMaskValue
= cast
<ConstantInt
>(CI
.getOperand(1));
2561 .buildInstr(TargetOpcode::G_IS_FPCLASS
, {getOrCreateVReg(CI
)},
2562 {getOrCreateVReg(*FpValue
)})
2563 .addImm(TestMaskValue
->getZExtValue());
2567 case Intrinsic::set_fpenv
: {
2568 Value
*FPEnv
= CI
.getOperand(0);
2569 MIRBuilder
.buildSetFPEnv(getOrCreateVReg(*FPEnv
));
2572 case Intrinsic::reset_fpenv
:
2573 MIRBuilder
.buildResetFPEnv();
2575 case Intrinsic::set_fpmode
: {
2576 Value
*FPState
= CI
.getOperand(0);
2577 MIRBuilder
.buildSetFPMode(getOrCreateVReg(*FPState
));
2580 case Intrinsic::reset_fpmode
:
2581 MIRBuilder
.buildResetFPMode();
2583 case Intrinsic::vscale
: {
2584 MIRBuilder
.buildVScale(getOrCreateVReg(CI
), 1);
2587 case Intrinsic::scmp
:
2588 MIRBuilder
.buildSCmp(getOrCreateVReg(CI
),
2589 getOrCreateVReg(*CI
.getOperand(0)),
2590 getOrCreateVReg(*CI
.getOperand(1)));
2592 case Intrinsic::ucmp
:
2593 MIRBuilder
.buildUCmp(getOrCreateVReg(CI
),
2594 getOrCreateVReg(*CI
.getOperand(0)),
2595 getOrCreateVReg(*CI
.getOperand(1)));
2597 case Intrinsic::vector_extract
:
2598 return translateExtractVector(CI
, MIRBuilder
);
2599 case Intrinsic::vector_insert
:
2600 return translateInsertVector(CI
, MIRBuilder
);
2601 case Intrinsic::stepvector
: {
2602 MIRBuilder
.buildStepVector(getOrCreateVReg(CI
), 1);
2605 case Intrinsic::prefetch
: {
2606 Value
*Addr
= CI
.getOperand(0);
2607 unsigned RW
= cast
<ConstantInt
>(CI
.getOperand(1))->getZExtValue();
2608 unsigned Locality
= cast
<ConstantInt
>(CI
.getOperand(2))->getZExtValue();
2609 unsigned CacheType
= cast
<ConstantInt
>(CI
.getOperand(3))->getZExtValue();
2611 auto Flags
= RW
? MachineMemOperand::MOStore
: MachineMemOperand::MOLoad
;
2612 auto &MMO
= *MF
->getMachineMemOperand(MachinePointerInfo(Addr
), Flags
,
2615 MIRBuilder
.buildPrefetch(getOrCreateVReg(*Addr
), RW
, Locality
, CacheType
,
2621 case Intrinsic::vector_interleave2
:
2622 case Intrinsic::vector_deinterleave2
: {
2623 // Both intrinsics have at least one operand.
2624 Value
*Op0
= CI
.getOperand(0);
2625 LLT ResTy
= getLLTForType(*Op0
->getType(), MIRBuilder
.getDataLayout());
2626 if (!ResTy
.isFixedVector())
2629 if (CI
.getIntrinsicID() == Intrinsic::vector_interleave2
)
2630 return translateVectorInterleave2Intrinsic(CI
, MIRBuilder
);
2632 return translateVectorDeinterleave2Intrinsic(CI
, MIRBuilder
);
2635 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2636 case Intrinsic::INTRINSIC:
2637 #include "llvm/IR/ConstrainedOps.def"
2638 return translateConstrainedFPIntrinsic(cast
<ConstrainedFPIntrinsic
>(CI
),
2640 case Intrinsic::experimental_convergence_anchor
:
2641 case Intrinsic::experimental_convergence_entry
:
2642 case Intrinsic::experimental_convergence_loop
:
2643 return translateConvergenceControlIntrinsic(CI
, ID
, MIRBuilder
);
2648 bool IRTranslator::translateInlineAsm(const CallBase
&CB
,
2649 MachineIRBuilder
&MIRBuilder
) {
2651 const InlineAsmLowering
*ALI
= MF
->getSubtarget().getInlineAsmLowering();
2655 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2659 return ALI
->lowerInlineAsm(
2660 MIRBuilder
, CB
, [&](const Value
&Val
) { return getOrCreateVRegs(Val
); });
2663 bool IRTranslator::translateCallBase(const CallBase
&CB
,
2664 MachineIRBuilder
&MIRBuilder
) {
2665 ArrayRef
<Register
> Res
= getOrCreateVRegs(CB
);
2667 SmallVector
<ArrayRef
<Register
>, 8> Args
;
2668 Register SwiftInVReg
= 0;
2669 Register SwiftErrorVReg
= 0;
2670 for (const auto &Arg
: CB
.args()) {
2671 if (CLI
->supportSwiftError() && isSwiftError(Arg
)) {
2672 assert(SwiftInVReg
== 0 && "Expected only one swift error argument");
2673 LLT Ty
= getLLTForType(*Arg
->getType(), *DL
);
2674 SwiftInVReg
= MRI
->createGenericVirtualRegister(Ty
);
2675 MIRBuilder
.buildCopy(SwiftInVReg
, SwiftError
.getOrCreateVRegUseAt(
2676 &CB
, &MIRBuilder
.getMBB(), Arg
));
2677 Args
.emplace_back(ArrayRef(SwiftInVReg
));
2679 SwiftError
.getOrCreateVRegDefAt(&CB
, &MIRBuilder
.getMBB(), Arg
);
2682 Args
.push_back(getOrCreateVRegs(*Arg
));
2685 if (auto *CI
= dyn_cast
<CallInst
>(&CB
)) {
2686 if (ORE
->enabled()) {
2687 if (MemoryOpRemark::canHandle(CI
, *LibInfo
)) {
2688 MemoryOpRemark
R(*ORE
, "gisel-irtranslator-memsize", *DL
, *LibInfo
);
2694 std::optional
<CallLowering::PtrAuthInfo
> PAI
;
2695 if (auto Bundle
= CB
.getOperandBundle(LLVMContext::OB_ptrauth
)) {
2696 // Functions should never be ptrauth-called directly.
2697 assert(!CB
.getCalledFunction() && "invalid direct ptrauth call");
2699 const Value
*Key
= Bundle
->Inputs
[0];
2700 const Value
*Discriminator
= Bundle
->Inputs
[1];
2702 // Look through ptrauth constants to try to eliminate the matching bundle
2703 // and turn this into a direct call with no ptrauth.
2704 // CallLowering will use the raw pointer if it doesn't find the PAI.
2705 const auto *CalleeCPA
= dyn_cast
<ConstantPtrAuth
>(CB
.getCalledOperand());
2706 if (!CalleeCPA
|| !isa
<Function
>(CalleeCPA
->getPointer()) ||
2707 !CalleeCPA
->isKnownCompatibleWith(Key
, Discriminator
, *DL
)) {
2708 // If we can't make it direct, package the bundle into PAI.
2709 Register DiscReg
= getOrCreateVReg(*Discriminator
);
2710 PAI
= CallLowering::PtrAuthInfo
{cast
<ConstantInt
>(Key
)->getZExtValue(),
2715 Register ConvergenceCtrlToken
= 0;
2716 if (auto Bundle
= CB
.getOperandBundle(LLVMContext::OB_convergencectrl
)) {
2717 const auto &Token
= *Bundle
->Inputs
[0].get();
2718 ConvergenceCtrlToken
= getOrCreateConvergenceTokenVReg(Token
);
2721 // We don't set HasCalls on MFI here yet because call lowering may decide to
2722 // optimize into tail calls. Instead, we defer that to selection where a final
2723 // scan is done to check if any instructions are calls.
2724 bool Success
= CLI
->lowerCall(
2725 MIRBuilder
, CB
, Res
, Args
, SwiftErrorVReg
, PAI
, ConvergenceCtrlToken
,
2726 [&]() { return getOrCreateVReg(*CB
.getCalledOperand()); });
2728 // Check if we just inserted a tail call.
2730 assert(!HasTailCall
&& "Can't tail call return twice from block?");
2731 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
2732 HasTailCall
= TII
->isTailCall(*std::prev(MIRBuilder
.getInsertPt()));
2738 bool IRTranslator::translateCall(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2739 const CallInst
&CI
= cast
<CallInst
>(U
);
2740 auto TII
= MF
->getTarget().getIntrinsicInfo();
2741 const Function
*F
= CI
.getCalledFunction();
2743 // FIXME: support Windows dllimport function calls and calls through
2745 if (F
&& (F
->hasDLLImportStorageClass() ||
2746 (MF
->getTarget().getTargetTriple().isOSWindows() &&
2747 F
->hasExternalWeakLinkage())))
2750 // FIXME: support control flow guard targets.
2751 if (CI
.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget
))
2754 // FIXME: support statepoints and related.
2755 if (isa
<GCStatepointInst
, GCRelocateInst
, GCResultInst
>(U
))
2758 if (CI
.isInlineAsm())
2759 return translateInlineAsm(CI
, MIRBuilder
);
2761 diagnoseDontCall(CI
);
2763 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
2764 if (F
&& F
->isIntrinsic()) {
2765 ID
= F
->getIntrinsicID();
2766 if (TII
&& ID
== Intrinsic::not_intrinsic
)
2767 ID
= static_cast<Intrinsic::ID
>(TII
->getIntrinsicID(F
));
2770 if (!F
|| !F
->isIntrinsic() || ID
== Intrinsic::not_intrinsic
)
2771 return translateCallBase(CI
, MIRBuilder
);
2773 assert(ID
!= Intrinsic::not_intrinsic
&& "unknown intrinsic");
2775 if (translateKnownIntrinsic(CI
, ID
, MIRBuilder
))
2778 ArrayRef
<Register
> ResultRegs
;
2779 if (!CI
.getType()->isVoidTy())
2780 ResultRegs
= getOrCreateVRegs(CI
);
2782 // Ignore the callsite attributes. Backend code is most likely not expecting
2783 // an intrinsic to sometimes have side effects and sometimes not.
2784 MachineInstrBuilder MIB
= MIRBuilder
.buildIntrinsic(ID
, ResultRegs
);
2785 if (isa
<FPMathOperator
>(CI
))
2786 MIB
->copyIRFlags(CI
);
2788 for (const auto &Arg
: enumerate(CI
.args())) {
2789 // If this is required to be an immediate, don't materialize it in a
2791 if (CI
.paramHasAttr(Arg
.index(), Attribute::ImmArg
)) {
2792 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Arg
.value())) {
2793 // imm arguments are more convenient than cimm (and realistically
2794 // probably sufficient), so use them.
2795 assert(CI
->getBitWidth() <= 64 &&
2796 "large intrinsic immediates not handled");
2797 MIB
.addImm(CI
->getSExtValue());
2799 MIB
.addFPImm(cast
<ConstantFP
>(Arg
.value()));
2801 } else if (auto *MDVal
= dyn_cast
<MetadataAsValue
>(Arg
.value())) {
2802 auto *MD
= MDVal
->getMetadata();
2803 auto *MDN
= dyn_cast
<MDNode
>(MD
);
2805 if (auto *ConstMD
= dyn_cast
<ConstantAsMetadata
>(MD
))
2806 MDN
= MDNode::get(MF
->getFunction().getContext(), ConstMD
);
2807 else // This was probably an MDString.
2810 MIB
.addMetadata(MDN
);
2812 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(*Arg
.value());
2813 if (VRegs
.size() > 1)
2815 MIB
.addUse(VRegs
[0]);
2819 // Add a MachineMemOperand if it is a target mem intrinsic.
2820 TargetLowering::IntrinsicInfo Info
;
2821 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2822 if (TLI
->getTgtMemIntrinsic(Info
, CI
, *MF
, ID
)) {
2823 Align Alignment
= Info
.align
.value_or(
2824 DL
->getABITypeAlign(Info
.memVT
.getTypeForEVT(F
->getContext())));
2825 LLT MemTy
= Info
.memVT
.isSimple()
2826 ? getLLTForMVT(Info
.memVT
.getSimpleVT())
2827 : LLT::scalar(Info
.memVT
.getStoreSizeInBits());
2829 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2830 // didn't yield anything useful.
2831 MachinePointerInfo MPI
;
2833 MPI
= MachinePointerInfo(Info
.ptrVal
, Info
.offset
);
2834 else if (Info
.fallbackAddressSpace
)
2835 MPI
= MachinePointerInfo(*Info
.fallbackAddressSpace
);
2837 MF
->getMachineMemOperand(MPI
, Info
.flags
, MemTy
, Alignment
, CI
.getAAMetadata()));
2840 if (CI
.isConvergent()) {
2841 if (auto Bundle
= CI
.getOperandBundle(LLVMContext::OB_convergencectrl
)) {
2842 auto *Token
= Bundle
->Inputs
[0].get();
2843 Register TokenReg
= getOrCreateVReg(*Token
);
2844 MIB
.addUse(TokenReg
, RegState::Implicit
);
2851 bool IRTranslator::findUnwindDestinations(
2852 const BasicBlock
*EHPadBB
,
2853 BranchProbability Prob
,
2854 SmallVectorImpl
<std::pair
<MachineBasicBlock
*, BranchProbability
>>
2856 EHPersonality Personality
= classifyEHPersonality(
2857 EHPadBB
->getParent()->getFunction().getPersonalityFn());
2858 bool IsMSVCCXX
= Personality
== EHPersonality::MSVC_CXX
;
2859 bool IsCoreCLR
= Personality
== EHPersonality::CoreCLR
;
2860 bool IsWasmCXX
= Personality
== EHPersonality::Wasm_CXX
;
2861 bool IsSEH
= isAsynchronousEHPersonality(Personality
);
2864 // Ignore this for now.
2869 const Instruction
*Pad
= EHPadBB
->getFirstNonPHI();
2870 BasicBlock
*NewEHPadBB
= nullptr;
2871 if (isa
<LandingPadInst
>(Pad
)) {
2872 // Stop on landingpads. They are not funclets.
2873 UnwindDests
.emplace_back(&getMBB(*EHPadBB
), Prob
);
2876 if (isa
<CleanupPadInst
>(Pad
)) {
2877 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2879 UnwindDests
.emplace_back(&getMBB(*EHPadBB
), Prob
);
2880 UnwindDests
.back().first
->setIsEHScopeEntry();
2881 UnwindDests
.back().first
->setIsEHFuncletEntry();
2884 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(Pad
)) {
2885 // Add the catchpad handlers to the possible destinations.
2886 for (const BasicBlock
*CatchPadBB
: CatchSwitch
->handlers()) {
2887 UnwindDests
.emplace_back(&getMBB(*CatchPadBB
), Prob
);
2888 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2889 if (IsMSVCCXX
|| IsCoreCLR
)
2890 UnwindDests
.back().first
->setIsEHFuncletEntry();
2892 UnwindDests
.back().first
->setIsEHScopeEntry();
2894 NewEHPadBB
= CatchSwitch
->getUnwindDest();
2899 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2900 if (BPI
&& NewEHPadBB
)
2901 Prob
*= BPI
->getEdgeProbability(EHPadBB
, NewEHPadBB
);
2902 EHPadBB
= NewEHPadBB
;
2907 bool IRTranslator::translateInvoke(const User
&U
,
2908 MachineIRBuilder
&MIRBuilder
) {
2909 const InvokeInst
&I
= cast
<InvokeInst
>(U
);
2910 MCContext
&Context
= MF
->getContext();
2912 const BasicBlock
*ReturnBB
= I
.getSuccessor(0);
2913 const BasicBlock
*EHPadBB
= I
.getSuccessor(1);
2915 const Function
*Fn
= I
.getCalledFunction();
2917 // FIXME: support invoking patchpoint and statepoint intrinsics.
2918 if (Fn
&& Fn
->isIntrinsic())
2921 // FIXME: support whatever these are.
2922 if (I
.hasDeoptState())
2925 // FIXME: support control flow guard targets.
2926 if (I
.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget
))
2929 // FIXME: support Windows exception handling.
2930 if (!isa
<LandingPadInst
>(EHPadBB
->getFirstNonPHI()))
2933 // FIXME: support Windows dllimport function calls and calls through
2935 if (Fn
&& (Fn
->hasDLLImportStorageClass() ||
2936 (MF
->getTarget().getTargetTriple().isOSWindows() &&
2937 Fn
->hasExternalWeakLinkage())))
2940 bool LowerInlineAsm
= I
.isInlineAsm();
2941 bool NeedEHLabel
= true;
2943 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2944 // the region covered by the try.
2945 MCSymbol
*BeginSymbol
= nullptr;
2947 MIRBuilder
.buildInstr(TargetOpcode::G_INVOKE_REGION_START
);
2948 BeginSymbol
= Context
.createTempSymbol();
2949 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
).addSym(BeginSymbol
);
2952 if (LowerInlineAsm
) {
2953 if (!translateInlineAsm(I
, MIRBuilder
))
2955 } else if (!translateCallBase(I
, MIRBuilder
))
2958 MCSymbol
*EndSymbol
= nullptr;
2960 EndSymbol
= Context
.createTempSymbol();
2961 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
).addSym(EndSymbol
);
2964 SmallVector
<std::pair
<MachineBasicBlock
*, BranchProbability
>, 1> UnwindDests
;
2965 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2966 MachineBasicBlock
*InvokeMBB
= &MIRBuilder
.getMBB();
2967 BranchProbability EHPadBBProb
=
2968 BPI
? BPI
->getEdgeProbability(InvokeMBB
->getBasicBlock(), EHPadBB
)
2969 : BranchProbability::getZero();
2971 if (!findUnwindDestinations(EHPadBB
, EHPadBBProb
, UnwindDests
))
2974 MachineBasicBlock
&EHPadMBB
= getMBB(*EHPadBB
),
2975 &ReturnMBB
= getMBB(*ReturnBB
);
2976 // Update successor info.
2977 addSuccessorWithProb(InvokeMBB
, &ReturnMBB
);
2978 for (auto &UnwindDest
: UnwindDests
) {
2979 UnwindDest
.first
->setIsEHPad();
2980 addSuccessorWithProb(InvokeMBB
, UnwindDest
.first
, UnwindDest
.second
);
2982 InvokeMBB
->normalizeSuccProbs();
2985 assert(BeginSymbol
&& "Expected a begin symbol!");
2986 assert(EndSymbol
&& "Expected an end symbol!");
2987 MF
->addInvoke(&EHPadMBB
, BeginSymbol
, EndSymbol
);
2990 MIRBuilder
.buildBr(ReturnMBB
);
2994 bool IRTranslator::translateCallBr(const User
&U
,
2995 MachineIRBuilder
&MIRBuilder
) {
2996 // FIXME: Implement this.
3000 bool IRTranslator::translateLandingPad(const User
&U
,
3001 MachineIRBuilder
&MIRBuilder
) {
3002 const LandingPadInst
&LP
= cast
<LandingPadInst
>(U
);
3004 MachineBasicBlock
&MBB
= MIRBuilder
.getMBB();
3008 // If there aren't registers to copy the values into (e.g., during SjLj
3009 // exceptions), then don't bother.
3010 const Constant
*PersonalityFn
= MF
->getFunction().getPersonalityFn();
3011 if (TLI
->getExceptionPointerRegister(PersonalityFn
) == 0 &&
3012 TLI
->getExceptionSelectorRegister(PersonalityFn
) == 0)
3015 // If landingpad's return type is token type, we don't create DAG nodes
3016 // for its exception pointer and selector value. The extraction of exception
3017 // pointer or selector value from token type landingpads is not currently
3019 if (LP
.getType()->isTokenTy())
3022 // Add a label to mark the beginning of the landing pad. Deletion of the
3023 // landing pad can thus be detected via the MachineModuleInfo.
3024 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
)
3025 .addSym(MF
->addLandingPad(&MBB
));
3027 // If the unwinder does not preserve all registers, ensure that the
3028 // function marks the clobbered registers as used.
3029 const TargetRegisterInfo
&TRI
= *MF
->getSubtarget().getRegisterInfo();
3030 if (auto *RegMask
= TRI
.getCustomEHPadPreservedMask(*MF
))
3031 MF
->getRegInfo().addPhysRegsUsedFromRegMask(RegMask
);
3033 LLT Ty
= getLLTForType(*LP
.getType(), *DL
);
3034 Register Undef
= MRI
->createGenericVirtualRegister(Ty
);
3035 MIRBuilder
.buildUndef(Undef
);
3037 SmallVector
<LLT
, 2> Tys
;
3038 for (Type
*Ty
: cast
<StructType
>(LP
.getType())->elements())
3039 Tys
.push_back(getLLTForType(*Ty
, *DL
));
3040 assert(Tys
.size() == 2 && "Only two-valued landingpads are supported");
3042 // Mark exception register as live in.
3043 Register ExceptionReg
= TLI
->getExceptionPointerRegister(PersonalityFn
);
3047 MBB
.addLiveIn(ExceptionReg
);
3048 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(LP
);
3049 MIRBuilder
.buildCopy(ResRegs
[0], ExceptionReg
);
3051 Register SelectorReg
= TLI
->getExceptionSelectorRegister(PersonalityFn
);
3055 MBB
.addLiveIn(SelectorReg
);
3056 Register PtrVReg
= MRI
->createGenericVirtualRegister(Tys
[0]);
3057 MIRBuilder
.buildCopy(PtrVReg
, SelectorReg
);
3058 MIRBuilder
.buildCast(ResRegs
[1], PtrVReg
);
3063 bool IRTranslator::translateAlloca(const User
&U
,
3064 MachineIRBuilder
&MIRBuilder
) {
3065 auto &AI
= cast
<AllocaInst
>(U
);
3067 if (AI
.isSwiftError())
3070 if (AI
.isStaticAlloca()) {
3071 Register Res
= getOrCreateVReg(AI
);
3072 int FI
= getOrCreateFrameIndex(AI
);
3073 MIRBuilder
.buildFrameIndex(Res
, FI
);
3077 // FIXME: support stack probing for Windows.
3078 if (MF
->getTarget().getTargetTriple().isOSWindows())
3081 // Now we're in the harder dynamic case.
3082 Register NumElts
= getOrCreateVReg(*AI
.getArraySize());
3083 Type
*IntPtrIRTy
= DL
->getIntPtrType(AI
.getType());
3084 LLT IntPtrTy
= getLLTForType(*IntPtrIRTy
, *DL
);
3085 if (MRI
->getType(NumElts
) != IntPtrTy
) {
3086 Register ExtElts
= MRI
->createGenericVirtualRegister(IntPtrTy
);
3087 MIRBuilder
.buildZExtOrTrunc(ExtElts
, NumElts
);
3091 Type
*Ty
= AI
.getAllocatedType();
3093 Register AllocSize
= MRI
->createGenericVirtualRegister(IntPtrTy
);
3095 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy
, DL
->getTypeAllocSize(Ty
)));
3096 MIRBuilder
.buildMul(AllocSize
, NumElts
, TySize
);
3098 // Round the size of the allocation up to the stack alignment size
3099 // by add SA-1 to the size. This doesn't overflow because we're computing
3100 // an address inside an alloca.
3101 Align StackAlign
= MF
->getSubtarget().getFrameLowering()->getStackAlign();
3102 auto SAMinusOne
= MIRBuilder
.buildConstant(IntPtrTy
, StackAlign
.value() - 1);
3103 auto AllocAdd
= MIRBuilder
.buildAdd(IntPtrTy
, AllocSize
, SAMinusOne
,
3104 MachineInstr::NoUWrap
);
3106 MIRBuilder
.buildConstant(IntPtrTy
, ~(uint64_t)(StackAlign
.value() - 1));
3107 auto AlignedAlloc
= MIRBuilder
.buildAnd(IntPtrTy
, AllocAdd
, AlignCst
);
3109 Align Alignment
= std::max(AI
.getAlign(), DL
->getPrefTypeAlign(Ty
));
3110 if (Alignment
<= StackAlign
)
3111 Alignment
= Align(1);
3112 MIRBuilder
.buildDynStackAlloc(getOrCreateVReg(AI
), AlignedAlloc
, Alignment
);
3114 MF
->getFrameInfo().CreateVariableSizedObject(Alignment
, &AI
);
3115 assert(MF
->getFrameInfo().hasVarSizedObjects());
3119 bool IRTranslator::translateVAArg(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
3120 // FIXME: We may need more info about the type. Because of how LLT works,
3121 // we're completely discarding the i64/double distinction here (amongst
3122 // others). Fortunately the ABIs I know of where that matters don't use va_arg
3123 // anyway but that's not guaranteed.
3124 MIRBuilder
.buildInstr(TargetOpcode::G_VAARG
, {getOrCreateVReg(U
)},
3125 {getOrCreateVReg(*U
.getOperand(0)),
3126 DL
->getABITypeAlign(U
.getType()).value()});
3130 bool IRTranslator::translateUnreachable(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
3131 if (!MF
->getTarget().Options
.TrapUnreachable
)
3134 auto &UI
= cast
<UnreachableInst
>(U
);
3136 // We may be able to ignore unreachable behind a noreturn call.
3137 if (const CallInst
*Call
= dyn_cast_or_null
<CallInst
>(UI
.getPrevNode());
3138 Call
&& Call
->doesNotReturn()) {
3139 if (MF
->getTarget().Options
.NoTrapAfterNoreturn
)
3141 // Do not emit an additional trap instruction.
3142 if (Call
->isNonContinuableTrap())
3146 MIRBuilder
.buildTrap();
3150 bool IRTranslator::translateInsertElement(const User
&U
,
3151 MachineIRBuilder
&MIRBuilder
) {
3152 // If it is a <1 x Ty> vector, use the scalar as it is
3153 // not a legal vector type in LLT.
3154 if (auto *FVT
= dyn_cast
<FixedVectorType
>(U
.getType());
3155 FVT
&& FVT
->getNumElements() == 1)
3156 return translateCopy(U
, *U
.getOperand(1), MIRBuilder
);
3158 Register Res
= getOrCreateVReg(U
);
3159 Register Val
= getOrCreateVReg(*U
.getOperand(0));
3160 Register Elt
= getOrCreateVReg(*U
.getOperand(1));
3161 unsigned PreferredVecIdxWidth
= TLI
->getVectorIdxTy(*DL
).getSizeInBits();
3163 if (auto *CI
= dyn_cast
<ConstantInt
>(U
.getOperand(2))) {
3164 if (CI
->getBitWidth() != PreferredVecIdxWidth
) {
3165 APInt NewIdx
= CI
->getValue().zextOrTrunc(PreferredVecIdxWidth
);
3166 auto *NewIdxCI
= ConstantInt::get(CI
->getContext(), NewIdx
);
3167 Idx
= getOrCreateVReg(*NewIdxCI
);
3171 Idx
= getOrCreateVReg(*U
.getOperand(2));
3172 if (MRI
->getType(Idx
).getSizeInBits() != PreferredVecIdxWidth
) {
3173 const LLT VecIdxTy
= LLT::scalar(PreferredVecIdxWidth
);
3174 Idx
= MIRBuilder
.buildZExtOrTrunc(VecIdxTy
, Idx
).getReg(0);
3176 MIRBuilder
.buildInsertVectorElement(Res
, Val
, Elt
, Idx
);
3180 bool IRTranslator::translateInsertVector(const User
&U
,
3181 MachineIRBuilder
&MIRBuilder
) {
3182 Register Dst
= getOrCreateVReg(U
);
3183 Register Vec
= getOrCreateVReg(*U
.getOperand(0));
3184 Register Elt
= getOrCreateVReg(*U
.getOperand(1));
3186 ConstantInt
*CI
= cast
<ConstantInt
>(U
.getOperand(2));
3187 unsigned PreferredVecIdxWidth
= TLI
->getVectorIdxTy(*DL
).getSizeInBits();
3189 // Resize Index to preferred index width.
3190 if (CI
->getBitWidth() != PreferredVecIdxWidth
) {
3191 APInt NewIdx
= CI
->getValue().zextOrTrunc(PreferredVecIdxWidth
);
3192 CI
= ConstantInt::get(CI
->getContext(), NewIdx
);
3195 // If it is a <1 x Ty> vector, we have to use other means.
3196 if (auto *ResultType
= dyn_cast
<FixedVectorType
>(U
.getOperand(1)->getType());
3197 ResultType
&& ResultType
->getNumElements() == 1) {
3198 if (auto *InputType
= dyn_cast
<FixedVectorType
>(U
.getOperand(0)->getType());
3199 InputType
&& InputType
->getNumElements() == 1) {
3200 // We are inserting an illegal fixed vector into an illegal
3201 // fixed vector, use the scalar as it is not a legal vector type
3203 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
3205 if (isa
<FixedVectorType
>(U
.getOperand(0)->getType())) {
3206 // We are inserting an illegal fixed vector into a legal fixed
3207 // vector, use the scalar as it is not a legal vector type in
3209 Register Idx
= getOrCreateVReg(*CI
);
3210 MIRBuilder
.buildInsertVectorElement(Dst
, Vec
, Elt
, Idx
);
3213 if (isa
<ScalableVectorType
>(U
.getOperand(0)->getType())) {
3214 // We are inserting an illegal fixed vector into a scalable
3215 // vector, use a scalar element insert.
3216 LLT VecIdxTy
= LLT::scalar(PreferredVecIdxWidth
);
3217 Register Idx
= getOrCreateVReg(*CI
);
3218 auto ScaledIndex
= MIRBuilder
.buildMul(
3219 VecIdxTy
, MIRBuilder
.buildVScale(VecIdxTy
, 1), Idx
);
3220 MIRBuilder
.buildInsertVectorElement(Dst
, Vec
, Elt
, ScaledIndex
);
3225 MIRBuilder
.buildInsertSubvector(
3226 getOrCreateVReg(U
), getOrCreateVReg(*U
.getOperand(0)),
3227 getOrCreateVReg(*U
.getOperand(1)), CI
->getZExtValue());
3231 bool IRTranslator::translateExtractElement(const User
&U
,
3232 MachineIRBuilder
&MIRBuilder
) {
3233 // If it is a <1 x Ty> vector, use the scalar as it is
3234 // not a legal vector type in LLT.
3235 if (const FixedVectorType
*FVT
=
3236 dyn_cast
<FixedVectorType
>(U
.getOperand(0)->getType()))
3237 if (FVT
->getNumElements() == 1)
3238 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
3240 Register Res
= getOrCreateVReg(U
);
3241 Register Val
= getOrCreateVReg(*U
.getOperand(0));
3242 unsigned PreferredVecIdxWidth
= TLI
->getVectorIdxTy(*DL
).getSizeInBits();
3244 if (auto *CI
= dyn_cast
<ConstantInt
>(U
.getOperand(1))) {
3245 if (CI
->getBitWidth() != PreferredVecIdxWidth
) {
3246 APInt NewIdx
= CI
->getValue().zextOrTrunc(PreferredVecIdxWidth
);
3247 auto *NewIdxCI
= ConstantInt::get(CI
->getContext(), NewIdx
);
3248 Idx
= getOrCreateVReg(*NewIdxCI
);
3252 Idx
= getOrCreateVReg(*U
.getOperand(1));
3253 if (MRI
->getType(Idx
).getSizeInBits() != PreferredVecIdxWidth
) {
3254 const LLT VecIdxTy
= LLT::scalar(PreferredVecIdxWidth
);
3255 Idx
= MIRBuilder
.buildZExtOrTrunc(VecIdxTy
, Idx
).getReg(0);
3257 MIRBuilder
.buildExtractVectorElement(Res
, Val
, Idx
);
3261 bool IRTranslator::translateExtractVector(const User
&U
,
3262 MachineIRBuilder
&MIRBuilder
) {
3263 Register Res
= getOrCreateVReg(U
);
3264 Register Vec
= getOrCreateVReg(*U
.getOperand(0));
3265 ConstantInt
*CI
= cast
<ConstantInt
>(U
.getOperand(1));
3266 unsigned PreferredVecIdxWidth
= TLI
->getVectorIdxTy(*DL
).getSizeInBits();
3268 // Resize Index to preferred index width.
3269 if (CI
->getBitWidth() != PreferredVecIdxWidth
) {
3270 APInt NewIdx
= CI
->getValue().zextOrTrunc(PreferredVecIdxWidth
);
3271 CI
= ConstantInt::get(CI
->getContext(), NewIdx
);
3274 // If it is a <1 x Ty> vector, we have to use other means.
3275 if (auto *ResultType
= dyn_cast
<FixedVectorType
>(U
.getType());
3276 ResultType
&& ResultType
->getNumElements() == 1) {
3277 if (auto *InputType
= dyn_cast
<FixedVectorType
>(U
.getOperand(0)->getType());
3278 InputType
&& InputType
->getNumElements() == 1) {
3279 // We are extracting an illegal fixed vector from an illegal fixed vector,
3280 // use the scalar as it is not a legal vector type in LLT.
3281 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
3283 if (isa
<FixedVectorType
>(U
.getOperand(0)->getType())) {
3284 // We are extracting an illegal fixed vector from a legal fixed
3285 // vector, use the scalar as it is not a legal vector type in
3287 Register Idx
= getOrCreateVReg(*CI
);
3288 MIRBuilder
.buildExtractVectorElement(Res
, Vec
, Idx
);
3291 if (isa
<ScalableVectorType
>(U
.getOperand(0)->getType())) {
3292 // We are extracting an illegal fixed vector from a scalable
3293 // vector, use a scalar element extract.
3294 LLT VecIdxTy
= LLT::scalar(PreferredVecIdxWidth
);
3295 Register Idx
= getOrCreateVReg(*CI
);
3296 auto ScaledIndex
= MIRBuilder
.buildMul(
3297 VecIdxTy
, MIRBuilder
.buildVScale(VecIdxTy
, 1), Idx
);
3298 MIRBuilder
.buildExtractVectorElement(Res
, Vec
, ScaledIndex
);
3303 MIRBuilder
.buildExtractSubvector(getOrCreateVReg(U
),
3304 getOrCreateVReg(*U
.getOperand(0)),
3305 CI
->getZExtValue());
3309 bool IRTranslator::translateShuffleVector(const User
&U
,
3310 MachineIRBuilder
&MIRBuilder
) {
3311 // A ShuffleVector that operates on scalable vectors is a splat vector where
3312 // the value of the splat vector is the 0th element of the first operand,
3313 // since the index mask operand is the zeroinitializer (undef and
3314 // poison are treated as zeroinitializer here).
3315 if (U
.getOperand(0)->getType()->isScalableTy()) {
3316 Register Val
= getOrCreateVReg(*U
.getOperand(0));
3317 auto SplatVal
= MIRBuilder
.buildExtractVectorElementConstant(
3318 MRI
->getType(Val
).getElementType(), Val
, 0);
3319 MIRBuilder
.buildSplatVector(getOrCreateVReg(U
), SplatVal
);
3324 if (auto *SVI
= dyn_cast
<ShuffleVectorInst
>(&U
))
3325 Mask
= SVI
->getShuffleMask();
3327 Mask
= cast
<ConstantExpr
>(U
).getShuffleMask();
3328 ArrayRef
<int> MaskAlloc
= MF
->allocateShuffleMask(Mask
);
3330 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR
, {getOrCreateVReg(U
)},
3331 {getOrCreateVReg(*U
.getOperand(0)),
3332 getOrCreateVReg(*U
.getOperand(1))})
3333 .addShuffleMask(MaskAlloc
);
3337 bool IRTranslator::translatePHI(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
3338 const PHINode
&PI
= cast
<PHINode
>(U
);
3340 SmallVector
<MachineInstr
*, 4> Insts
;
3341 for (auto Reg
: getOrCreateVRegs(PI
)) {
3342 auto MIB
= MIRBuilder
.buildInstr(TargetOpcode::G_PHI
, {Reg
}, {});
3343 Insts
.push_back(MIB
.getInstr());
3346 PendingPHIs
.emplace_back(&PI
, std::move(Insts
));
3350 bool IRTranslator::translateAtomicCmpXchg(const User
&U
,
3351 MachineIRBuilder
&MIRBuilder
) {
3352 const AtomicCmpXchgInst
&I
= cast
<AtomicCmpXchgInst
>(U
);
3354 auto Flags
= TLI
->getAtomicMemOperandFlags(I
, *DL
);
3356 auto Res
= getOrCreateVRegs(I
);
3357 Register OldValRes
= Res
[0];
3358 Register SuccessRes
= Res
[1];
3359 Register Addr
= getOrCreateVReg(*I
.getPointerOperand());
3360 Register Cmp
= getOrCreateVReg(*I
.getCompareOperand());
3361 Register NewVal
= getOrCreateVReg(*I
.getNewValOperand());
3363 MIRBuilder
.buildAtomicCmpXchgWithSuccess(
3364 OldValRes
, SuccessRes
, Addr
, Cmp
, NewVal
,
3365 *MF
->getMachineMemOperand(
3366 MachinePointerInfo(I
.getPointerOperand()), Flags
, MRI
->getType(Cmp
),
3367 getMemOpAlign(I
), I
.getAAMetadata(), nullptr, I
.getSyncScopeID(),
3368 I
.getSuccessOrdering(), I
.getFailureOrdering()));
3372 bool IRTranslator::translateAtomicRMW(const User
&U
,
3373 MachineIRBuilder
&MIRBuilder
) {
3374 const AtomicRMWInst
&I
= cast
<AtomicRMWInst
>(U
);
3375 auto Flags
= TLI
->getAtomicMemOperandFlags(I
, *DL
);
3377 Register Res
= getOrCreateVReg(I
);
3378 Register Addr
= getOrCreateVReg(*I
.getPointerOperand());
3379 Register Val
= getOrCreateVReg(*I
.getValOperand());
3381 unsigned Opcode
= 0;
3382 switch (I
.getOperation()) {
3385 case AtomicRMWInst::Xchg
:
3386 Opcode
= TargetOpcode::G_ATOMICRMW_XCHG
;
3388 case AtomicRMWInst::Add
:
3389 Opcode
= TargetOpcode::G_ATOMICRMW_ADD
;
3391 case AtomicRMWInst::Sub
:
3392 Opcode
= TargetOpcode::G_ATOMICRMW_SUB
;
3394 case AtomicRMWInst::And
:
3395 Opcode
= TargetOpcode::G_ATOMICRMW_AND
;
3397 case AtomicRMWInst::Nand
:
3398 Opcode
= TargetOpcode::G_ATOMICRMW_NAND
;
3400 case AtomicRMWInst::Or
:
3401 Opcode
= TargetOpcode::G_ATOMICRMW_OR
;
3403 case AtomicRMWInst::Xor
:
3404 Opcode
= TargetOpcode::G_ATOMICRMW_XOR
;
3406 case AtomicRMWInst::Max
:
3407 Opcode
= TargetOpcode::G_ATOMICRMW_MAX
;
3409 case AtomicRMWInst::Min
:
3410 Opcode
= TargetOpcode::G_ATOMICRMW_MIN
;
3412 case AtomicRMWInst::UMax
:
3413 Opcode
= TargetOpcode::G_ATOMICRMW_UMAX
;
3415 case AtomicRMWInst::UMin
:
3416 Opcode
= TargetOpcode::G_ATOMICRMW_UMIN
;
3418 case AtomicRMWInst::FAdd
:
3419 Opcode
= TargetOpcode::G_ATOMICRMW_FADD
;
3421 case AtomicRMWInst::FSub
:
3422 Opcode
= TargetOpcode::G_ATOMICRMW_FSUB
;
3424 case AtomicRMWInst::FMax
:
3425 Opcode
= TargetOpcode::G_ATOMICRMW_FMAX
;
3427 case AtomicRMWInst::FMin
:
3428 Opcode
= TargetOpcode::G_ATOMICRMW_FMIN
;
3430 case AtomicRMWInst::UIncWrap
:
3431 Opcode
= TargetOpcode::G_ATOMICRMW_UINC_WRAP
;
3433 case AtomicRMWInst::UDecWrap
:
3434 Opcode
= TargetOpcode::G_ATOMICRMW_UDEC_WRAP
;
3436 case AtomicRMWInst::USubCond
:
3437 Opcode
= TargetOpcode::G_ATOMICRMW_USUB_COND
;
3439 case AtomicRMWInst::USubSat
:
3440 Opcode
= TargetOpcode::G_ATOMICRMW_USUB_SAT
;
3444 MIRBuilder
.buildAtomicRMW(
3445 Opcode
, Res
, Addr
, Val
,
3446 *MF
->getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()),
3447 Flags
, MRI
->getType(Val
), getMemOpAlign(I
),
3448 I
.getAAMetadata(), nullptr, I
.getSyncScopeID(),
3453 bool IRTranslator::translateFence(const User
&U
,
3454 MachineIRBuilder
&MIRBuilder
) {
3455 const FenceInst
&Fence
= cast
<FenceInst
>(U
);
3456 MIRBuilder
.buildFence(static_cast<unsigned>(Fence
.getOrdering()),
3457 Fence
.getSyncScopeID());
3461 bool IRTranslator::translateFreeze(const User
&U
,
3462 MachineIRBuilder
&MIRBuilder
) {
3463 const ArrayRef
<Register
> DstRegs
= getOrCreateVRegs(U
);
3464 const ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*U
.getOperand(0));
3466 assert(DstRegs
.size() == SrcRegs
.size() &&
3467 "Freeze with different source and destination type?");
3469 for (unsigned I
= 0; I
< DstRegs
.size(); ++I
) {
3470 MIRBuilder
.buildFreeze(DstRegs
[I
], SrcRegs
[I
]);
3476 void IRTranslator::finishPendingPhis() {
3478 DILocationVerifier Verifier
;
3479 GISelObserverWrapper
WrapperObserver(&Verifier
);
3480 RAIIMFObsDelInstaller
ObsInstall(*MF
, WrapperObserver
);
3481 #endif // ifndef NDEBUG
3482 for (auto &Phi
: PendingPHIs
) {
3483 const PHINode
*PI
= Phi
.first
;
3484 if (PI
->getType()->isEmptyTy())
3486 ArrayRef
<MachineInstr
*> ComponentPHIs
= Phi
.second
;
3487 MachineBasicBlock
*PhiMBB
= ComponentPHIs
[0]->getParent();
3488 EntryBuilder
->setDebugLoc(PI
->getDebugLoc());
3490 Verifier
.setCurrentInst(PI
);
3491 #endif // ifndef NDEBUG
3493 SmallSet
<const MachineBasicBlock
*, 16> SeenPreds
;
3494 for (unsigned i
= 0; i
< PI
->getNumIncomingValues(); ++i
) {
3495 auto IRPred
= PI
->getIncomingBlock(i
);
3496 ArrayRef
<Register
> ValRegs
= getOrCreateVRegs(*PI
->getIncomingValue(i
));
3497 for (auto *Pred
: getMachinePredBBs({IRPred
, PI
->getParent()})) {
3498 if (SeenPreds
.count(Pred
) || !PhiMBB
->isPredecessor(Pred
))
3500 SeenPreds
.insert(Pred
);
3501 for (unsigned j
= 0; j
< ValRegs
.size(); ++j
) {
3502 MachineInstrBuilder
MIB(*MF
, ComponentPHIs
[j
]);
3503 MIB
.addUse(ValRegs
[j
]);
3511 void IRTranslator::translateDbgValueRecord(Value
*V
, bool HasArgList
,
3512 const DILocalVariable
*Variable
,
3513 const DIExpression
*Expression
,
3515 MachineIRBuilder
&MIRBuilder
) {
3516 assert(Variable
->isValidLocationForIntrinsic(DL
) &&
3517 "Expected inlined-at fields to agree");
3518 // Act as if we're handling a debug intrinsic.
3519 MIRBuilder
.setDebugLoc(DL
);
3521 if (!V
|| HasArgList
) {
3522 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3523 // terminate any prior location.
3524 MIRBuilder
.buildIndirectDbgValue(0, Variable
, Expression
);
3528 if (const auto *CI
= dyn_cast
<Constant
>(V
)) {
3529 MIRBuilder
.buildConstDbgValue(*CI
, Variable
, Expression
);
3533 if (auto *AI
= dyn_cast
<AllocaInst
>(V
);
3534 AI
&& AI
->isStaticAlloca() && Expression
->startsWithDeref()) {
3535 // If the value is an alloca and the expression starts with a
3536 // dereference, track a stack slot instead of a register, as registers
3537 // may be clobbered.
3538 auto ExprOperands
= Expression
->getElements();
3539 auto *ExprDerefRemoved
=
3540 DIExpression::get(AI
->getContext(), ExprOperands
.drop_front());
3541 MIRBuilder
.buildFIDbgValue(getOrCreateFrameIndex(*AI
), Variable
,
3545 if (translateIfEntryValueArgument(false, V
, Variable
, Expression
, DL
,
3548 for (Register Reg
: getOrCreateVRegs(*V
)) {
3549 // FIXME: This does not handle register-indirect values at offset 0. The
3550 // direct/indirect thing shouldn't really be handled by something as
3551 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3552 // pretty baked in right now.
3553 MIRBuilder
.buildDirectDbgValue(Reg
, Variable
, Expression
);
3557 void IRTranslator::translateDbgDeclareRecord(Value
*Address
, bool HasArgList
,
3558 const DILocalVariable
*Variable
,
3559 const DIExpression
*Expression
,
3561 MachineIRBuilder
&MIRBuilder
) {
3562 if (!Address
|| isa
<UndefValue
>(Address
)) {
3563 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable
<< "\n");
3567 assert(Variable
->isValidLocationForIntrinsic(DL
) &&
3568 "Expected inlined-at fields to agree");
3569 auto AI
= dyn_cast
<AllocaInst
>(Address
);
3570 if (AI
&& AI
->isStaticAlloca()) {
3571 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3572 // instructions (in fact, they get ignored if they *do* exist).
3573 MF
->setVariableDbgInfo(Variable
, Expression
,
3574 getOrCreateFrameIndex(*AI
), DL
);
3578 if (translateIfEntryValueArgument(true, Address
, Variable
,
3583 // A dbg.declare describes the address of a source variable, so lower it
3584 // into an indirect DBG_VALUE.
3585 MIRBuilder
.setDebugLoc(DL
);
3586 MIRBuilder
.buildIndirectDbgValue(getOrCreateVReg(*Address
),
3587 Variable
, Expression
);
3591 void IRTranslator::translateDbgInfo(const Instruction
&Inst
,
3592 MachineIRBuilder
&MIRBuilder
) {
3593 for (DbgRecord
&DR
: Inst
.getDbgRecordRange()) {
3594 if (DbgLabelRecord
*DLR
= dyn_cast
<DbgLabelRecord
>(&DR
)) {
3595 MIRBuilder
.setDebugLoc(DLR
->getDebugLoc());
3596 assert(DLR
->getLabel() && "Missing label");
3597 assert(DLR
->getLabel()->isValidLocationForIntrinsic(
3598 MIRBuilder
.getDebugLoc()) &&
3599 "Expected inlined-at fields to agree");
3600 MIRBuilder
.buildDbgLabel(DLR
->getLabel());
3603 DbgVariableRecord
&DVR
= cast
<DbgVariableRecord
>(DR
);
3604 const DILocalVariable
*Variable
= DVR
.getVariable();
3605 const DIExpression
*Expression
= DVR
.getExpression();
3606 Value
*V
= DVR
.getVariableLocationOp(0);
3607 if (DVR
.isDbgDeclare())
3608 translateDbgDeclareRecord(V
, DVR
.hasArgList(), Variable
, Expression
,
3609 DVR
.getDebugLoc(), MIRBuilder
);
3611 translateDbgValueRecord(V
, DVR
.hasArgList(), Variable
, Expression
,
3612 DVR
.getDebugLoc(), MIRBuilder
);
3616 bool IRTranslator::translate(const Instruction
&Inst
) {
3617 CurBuilder
->setDebugLoc(Inst
.getDebugLoc());
3618 CurBuilder
->setPCSections(Inst
.getMetadata(LLVMContext::MD_pcsections
));
3619 CurBuilder
->setMMRAMetadata(Inst
.getMetadata(LLVMContext::MD_mmra
));
3621 if (TLI
->fallBackToDAGISel(Inst
))
3624 switch (Inst
.getOpcode()) {
3625 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3626 case Instruction::OPCODE: \
3627 return translate##OPCODE(Inst, *CurBuilder.get());
3628 #include "llvm/IR/Instruction.def"
3634 bool IRTranslator::translate(const Constant
&C
, Register Reg
) {
3635 // We only emit constants into the entry block from here. To prevent jumpy
3636 // debug behaviour remove debug line.
3637 if (auto CurrInstDL
= CurBuilder
->getDL())
3638 EntryBuilder
->setDebugLoc(DebugLoc());
3640 if (auto CI
= dyn_cast
<ConstantInt
>(&C
))
3641 EntryBuilder
->buildConstant(Reg
, *CI
);
3642 else if (auto CF
= dyn_cast
<ConstantFP
>(&C
))
3643 EntryBuilder
->buildFConstant(Reg
, *CF
);
3644 else if (isa
<UndefValue
>(C
))
3645 EntryBuilder
->buildUndef(Reg
);
3646 else if (isa
<ConstantPointerNull
>(C
))
3647 EntryBuilder
->buildConstant(Reg
, 0);
3648 else if (auto GV
= dyn_cast
<GlobalValue
>(&C
))
3649 EntryBuilder
->buildGlobalValue(Reg
, GV
);
3650 else if (auto CPA
= dyn_cast
<ConstantPtrAuth
>(&C
)) {
3651 Register Addr
= getOrCreateVReg(*CPA
->getPointer());
3652 Register AddrDisc
= getOrCreateVReg(*CPA
->getAddrDiscriminator());
3653 EntryBuilder
->buildConstantPtrAuth(Reg
, CPA
, Addr
, AddrDisc
);
3654 } else if (auto CAZ
= dyn_cast
<ConstantAggregateZero
>(&C
)) {
3655 Constant
&Elt
= *CAZ
->getElementValue(0u);
3656 if (isa
<ScalableVectorType
>(CAZ
->getType())) {
3657 EntryBuilder
->buildSplatVector(Reg
, getOrCreateVReg(Elt
));
3660 // Return the scalar if it is a <1 x Ty> vector.
3661 unsigned NumElts
= CAZ
->getElementCount().getFixedValue();
3663 return translateCopy(C
, Elt
, *EntryBuilder
);
3664 // All elements are zero so we can just use the first one.
3665 EntryBuilder
->buildSplatBuildVector(Reg
, getOrCreateVReg(Elt
));
3666 } else if (auto CV
= dyn_cast
<ConstantDataVector
>(&C
)) {
3667 // Return the scalar if it is a <1 x Ty> vector.
3668 if (CV
->getNumElements() == 1)
3669 return translateCopy(C
, *CV
->getElementAsConstant(0), *EntryBuilder
);
3670 SmallVector
<Register
, 4> Ops
;
3671 for (unsigned i
= 0; i
< CV
->getNumElements(); ++i
) {
3672 Constant
&Elt
= *CV
->getElementAsConstant(i
);
3673 Ops
.push_back(getOrCreateVReg(Elt
));
3675 EntryBuilder
->buildBuildVector(Reg
, Ops
);
3676 } else if (auto CE
= dyn_cast
<ConstantExpr
>(&C
)) {
3677 switch(CE
->getOpcode()) {
3678 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3679 case Instruction::OPCODE: \
3680 return translate##OPCODE(*CE, *EntryBuilder.get());
3681 #include "llvm/IR/Instruction.def"
3685 } else if (auto CV
= dyn_cast
<ConstantVector
>(&C
)) {
3686 if (CV
->getNumOperands() == 1)
3687 return translateCopy(C
, *CV
->getOperand(0), *EntryBuilder
);
3688 SmallVector
<Register
, 4> Ops
;
3689 for (unsigned i
= 0; i
< CV
->getNumOperands(); ++i
) {
3690 Ops
.push_back(getOrCreateVReg(*CV
->getOperand(i
)));
3692 EntryBuilder
->buildBuildVector(Reg
, Ops
);
3693 } else if (auto *BA
= dyn_cast
<BlockAddress
>(&C
)) {
3694 EntryBuilder
->buildBlockAddress(Reg
, BA
);
3701 bool IRTranslator::finalizeBasicBlock(const BasicBlock
&BB
,
3702 MachineBasicBlock
&MBB
) {
3703 for (auto &BTB
: SL
->BitTestCases
) {
3704 // Emit header first, if it wasn't already emitted.
3706 emitBitTestHeader(BTB
, BTB
.Parent
);
3708 BranchProbability UnhandledProb
= BTB
.Prob
;
3709 for (unsigned j
= 0, ej
= BTB
.Cases
.size(); j
!= ej
; ++j
) {
3710 UnhandledProb
-= BTB
.Cases
[j
].ExtraProb
;
3711 // Set the current basic block to the mbb we wish to insert the code into
3712 MachineBasicBlock
*MBB
= BTB
.Cases
[j
].ThisBB
;
3713 // If all cases cover a contiguous range, it is not necessary to jump to
3714 // the default block after the last bit test fails. This is because the
3715 // range check during bit test header creation has guaranteed that every
3716 // case here doesn't go outside the range. In this case, there is no need
3717 // to perform the last bit test, as it will always be true. Instead, make
3718 // the second-to-last bit-test fall through to the target of the last bit
3719 // test, and delete the last bit test.
3721 MachineBasicBlock
*NextMBB
;
3722 if ((BTB
.ContiguousRange
|| BTB
.FallthroughUnreachable
) && j
+ 2 == ej
) {
3723 // Second-to-last bit-test with contiguous range: fall through to the
3724 // target of the final bit test.
3725 NextMBB
= BTB
.Cases
[j
+ 1].TargetBB
;
3726 } else if (j
+ 1 == ej
) {
3727 // For the last bit test, fall through to Default.
3728 NextMBB
= BTB
.Default
;
3730 // Otherwise, fall through to the next bit test.
3731 NextMBB
= BTB
.Cases
[j
+ 1].ThisBB
;
3734 emitBitTestCase(BTB
, NextMBB
, UnhandledProb
, BTB
.Reg
, BTB
.Cases
[j
], MBB
);
3736 if ((BTB
.ContiguousRange
|| BTB
.FallthroughUnreachable
) && j
+ 2 == ej
) {
3737 // We need to record the replacement phi edge here that normally
3738 // happens in emitBitTestCase before we delete the case, otherwise the
3739 // phi edge will be lost.
3740 addMachineCFGPred({BTB
.Parent
->getBasicBlock(),
3741 BTB
.Cases
[ej
- 1].TargetBB
->getBasicBlock()},
3743 // Since we're not going to use the final bit test, remove it.
3744 BTB
.Cases
.pop_back();
3748 // This is "default" BB. We have two jumps to it. From "header" BB and from
3749 // last "case" BB, unless the latter was skipped.
3750 CFGEdge HeaderToDefaultEdge
= {BTB
.Parent
->getBasicBlock(),
3751 BTB
.Default
->getBasicBlock()};
3752 addMachineCFGPred(HeaderToDefaultEdge
, BTB
.Parent
);
3753 if (!BTB
.ContiguousRange
) {
3754 addMachineCFGPred(HeaderToDefaultEdge
, BTB
.Cases
.back().ThisBB
);
3757 SL
->BitTestCases
.clear();
3759 for (auto &JTCase
: SL
->JTCases
) {
3760 // Emit header first, if it wasn't already emitted.
3761 if (!JTCase
.first
.Emitted
)
3762 emitJumpTableHeader(JTCase
.second
, JTCase
.first
, JTCase
.first
.HeaderBB
);
3764 emitJumpTable(JTCase
.second
, JTCase
.second
.MBB
);
3766 SL
->JTCases
.clear();
3768 for (auto &SwCase
: SL
->SwitchCases
)
3769 emitSwitchCase(SwCase
, &CurBuilder
->getMBB(), *CurBuilder
);
3770 SL
->SwitchCases
.clear();
3772 // Check if we need to generate stack-protector guard checks.
3773 StackProtector
&SP
= getAnalysis
<StackProtector
>();
3774 if (SP
.shouldEmitSDCheck(BB
)) {
3775 bool FunctionBasedInstrumentation
=
3776 TLI
->getSSPStackGuardCheck(*MF
->getFunction().getParent());
3777 SPDescriptor
.initialize(&BB
, &MBB
, FunctionBasedInstrumentation
);
3779 // Handle stack protector.
3780 if (SPDescriptor
.shouldEmitFunctionBasedCheckStackProtector()) {
3781 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3783 } else if (SPDescriptor
.shouldEmitStackProtector()) {
3784 MachineBasicBlock
*ParentMBB
= SPDescriptor
.getParentMBB();
3785 MachineBasicBlock
*SuccessMBB
= SPDescriptor
.getSuccessMBB();
3787 // Find the split point to split the parent mbb. At the same time copy all
3788 // physical registers used in the tail of parent mbb into virtual registers
3789 // before the split point and back into physical registers after the split
3790 // point. This prevents us needing to deal with Live-ins and many other
3791 // register allocation issues caused by us splitting the parent mbb. The
3792 // register allocator will clean up said virtual copies later on.
3793 MachineBasicBlock::iterator SplitPoint
= findSplitPointForStackProtector(
3794 ParentMBB
, *MF
->getSubtarget().getInstrInfo());
3796 // Splice the terminator of ParentMBB into SuccessMBB.
3797 SuccessMBB
->splice(SuccessMBB
->end(), ParentMBB
, SplitPoint
,
3800 // Add compare/jump on neq/jump to the parent BB.
3801 if (!emitSPDescriptorParent(SPDescriptor
, ParentMBB
))
3804 // CodeGen Failure MBB if we have not codegened it yet.
3805 MachineBasicBlock
*FailureMBB
= SPDescriptor
.getFailureMBB();
3806 if (FailureMBB
->empty()) {
3807 if (!emitSPDescriptorFailure(SPDescriptor
, FailureMBB
))
3811 // Clear the Per-BB State.
3812 SPDescriptor
.resetPerBBState();
3817 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor
&SPD
,
3818 MachineBasicBlock
*ParentBB
) {
3819 CurBuilder
->setInsertPt(*ParentBB
, ParentBB
->end());
3820 // First create the loads to the guard/stack slot for the comparison.
3821 Type
*PtrIRTy
= PointerType::getUnqual(MF
->getFunction().getContext());
3822 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
3823 LLT PtrMemTy
= getLLTForMVT(TLI
->getPointerMemTy(*DL
));
3825 MachineFrameInfo
&MFI
= ParentBB
->getParent()->getFrameInfo();
3826 int FI
= MFI
.getStackProtectorIndex();
3829 Register StackSlotPtr
= CurBuilder
->buildFrameIndex(PtrTy
, FI
).getReg(0);
3830 const Module
&M
= *ParentBB
->getParent()->getFunction().getParent();
3831 Align Align
= DL
->getPrefTypeAlign(PointerType::getUnqual(M
.getContext()));
3833 // Generate code to load the content of the guard slot.
3836 ->buildLoad(PtrMemTy
, StackSlotPtr
,
3837 MachinePointerInfo::getFixedStack(*MF
, FI
), Align
,
3838 MachineMemOperand::MOLoad
| MachineMemOperand::MOVolatile
)
3841 if (TLI
->useStackGuardXorFP()) {
3842 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3846 // Retrieve guard check function, nullptr if instrumentation is inlined.
3847 if (const Function
*GuardCheckFn
= TLI
->getSSPStackGuardCheck(M
)) {
3848 // This path is currently untestable on GlobalISel, since the only platform
3849 // that needs this seems to be Windows, and we fall back on that currently.
3850 // The code still lives here in case that changes.
3851 // Silence warning about unused variable until the code below that uses
3852 // 'GuardCheckFn' is enabled.
3856 // The target provides a guard check function to validate the guard value.
3857 // Generate a call to that function with the content of the guard slot as
3859 FunctionType
*FnTy
= GuardCheckFn
->getFunctionType();
3860 assert(FnTy
->getNumParams() == 1 && "Invalid function signature");
3861 ISD::ArgFlagsTy Flags
;
3862 if (GuardCheckFn
->hasAttribute(1, Attribute::AttrKind::InReg
))
3864 CallLowering::ArgInfo
GuardArgInfo(
3865 {GuardVal
, FnTy
->getParamType(0), {Flags
}});
3867 CallLowering::CallLoweringInfo Info
;
3868 Info
.OrigArgs
.push_back(GuardArgInfo
);
3869 Info
.CallConv
= GuardCheckFn
->getCallingConv();
3870 Info
.Callee
= MachineOperand::CreateGA(GuardCheckFn
, 0);
3871 Info
.OrigRet
= {Register(), FnTy
->getReturnType()};
3872 if (!CLI
->lowerCall(MIRBuilder
, Info
)) {
3873 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3880 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3881 // Otherwise, emit a volatile load to retrieve the stack guard value.
3882 if (TLI
->useLoadStackGuardNode(*ParentBB
->getBasicBlock()->getModule())) {
3884 MRI
->createGenericVirtualRegister(LLT::scalar(PtrTy
.getSizeInBits()));
3885 getStackGuard(Guard
, *CurBuilder
);
3887 // TODO: test using android subtarget when we support @llvm.thread.pointer.
3888 const Value
*IRGuard
= TLI
->getSDagStackGuard(M
);
3889 Register GuardPtr
= getOrCreateVReg(*IRGuard
);
3892 ->buildLoad(PtrMemTy
, GuardPtr
,
3893 MachinePointerInfo::getFixedStack(*MF
, FI
), Align
,
3894 MachineMemOperand::MOLoad
|
3895 MachineMemOperand::MOVolatile
)
3899 // Perform the comparison.
3901 CurBuilder
->buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), Guard
, GuardVal
);
3902 // If the guard/stackslot do not equal, branch to failure MBB.
3903 CurBuilder
->buildBrCond(Cmp
, *SPD
.getFailureMBB());
3904 // Otherwise branch to success MBB.
3905 CurBuilder
->buildBr(*SPD
.getSuccessMBB());
3909 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor
&SPD
,
3910 MachineBasicBlock
*FailureBB
) {
3911 CurBuilder
->setInsertPt(*FailureBB
, FailureBB
->end());
3913 const RTLIB::Libcall Libcall
= RTLIB::STACKPROTECTOR_CHECK_FAIL
;
3914 const char *Name
= TLI
->getLibcallName(Libcall
);
3916 CallLowering::CallLoweringInfo Info
;
3917 Info
.CallConv
= TLI
->getLibcallCallingConv(Libcall
);
3918 Info
.Callee
= MachineOperand::CreateES(Name
);
3919 Info
.OrigRet
= {Register(), Type::getVoidTy(MF
->getFunction().getContext()),
3921 if (!CLI
->lowerCall(*CurBuilder
, Info
)) {
3922 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3926 // Emit a trap instruction if we are required to do so.
3927 const TargetOptions
&TargetOpts
= TLI
->getTargetMachine().Options
;
3928 if (TargetOpts
.TrapUnreachable
&& !TargetOpts
.NoTrapAfterNoreturn
)
3929 CurBuilder
->buildInstr(TargetOpcode::G_TRAP
);
3934 void IRTranslator::finalizeFunction() {
3935 // Release the memory used by the different maps we
3936 // needed during the translation.
3937 PendingPHIs
.clear();
3939 FrameIndices
.clear();
3940 MachinePreds
.clear();
3941 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3942 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3943 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3944 EntryBuilder
.reset();
3947 SPDescriptor
.resetPerFunctionState();
3950 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3951 /// variadic musttail call.
3952 static bool checkForMustTailInVarArgFn(bool IsVarArg
, const BasicBlock
&BB
) {
3956 // Walk the block backwards, because tail calls usually only appear at the end
3958 return llvm::any_of(llvm::reverse(BB
), [](const Instruction
&I
) {
3959 const auto *CI
= dyn_cast
<CallInst
>(&I
);
3960 return CI
&& CI
->isMustTailCall();
3964 bool IRTranslator::runOnMachineFunction(MachineFunction
&CurMF
) {
3966 const Function
&F
= MF
->getFunction();
3967 GISelCSEAnalysisWrapper
&Wrapper
=
3968 getAnalysis
<GISelCSEAnalysisWrapperPass
>().getCSEWrapper();
3969 // Set the CSEConfig and run the analysis.
3970 GISelCSEInfo
*CSEInfo
= nullptr;
3971 TPC
= &getAnalysis
<TargetPassConfig
>();
3972 bool EnableCSE
= EnableCSEInIRTranslator
.getNumOccurrences()
3973 ? EnableCSEInIRTranslator
3974 : TPC
->isGISelCSEEnabled();
3975 TLI
= MF
->getSubtarget().getTargetLowering();
3978 EntryBuilder
= std::make_unique
<CSEMIRBuilder
>(CurMF
);
3979 CSEInfo
= &Wrapper
.get(TPC
->getCSEConfig());
3980 EntryBuilder
->setCSEInfo(CSEInfo
);
3981 CurBuilder
= std::make_unique
<CSEMIRBuilder
>(CurMF
);
3982 CurBuilder
->setCSEInfo(CSEInfo
);
3984 EntryBuilder
= std::make_unique
<MachineIRBuilder
>();
3985 CurBuilder
= std::make_unique
<MachineIRBuilder
>();
3987 CLI
= MF
->getSubtarget().getCallLowering();
3988 CurBuilder
->setMF(*MF
);
3989 EntryBuilder
->setMF(*MF
);
3990 MRI
= &MF
->getRegInfo();
3991 DL
= &F
.getDataLayout();
3992 ORE
= std::make_unique
<OptimizationRemarkEmitter
>(&F
);
3993 const TargetMachine
&TM
= MF
->getTarget();
3994 TM
.resetTargetOptions(F
);
3995 EnableOpts
= OptLevel
!= CodeGenOptLevel::None
&& !skipFunction(F
);
3998 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
3999 FuncInfo
.BPI
= &getAnalysis
<BranchProbabilityInfoWrapperPass
>().getBPI();
4002 FuncInfo
.BPI
= nullptr;
4005 AC
= &getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(
4007 LibInfo
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
4008 FuncInfo
.CanLowerReturn
= CLI
->checkReturnTypeForCallConv(*MF
);
4010 SL
= std::make_unique
<GISelSwitchLowering
>(this, FuncInfo
);
4011 SL
->init(*TLI
, TM
, *DL
);
4013 assert(PendingPHIs
.empty() && "stale PHIs");
4015 // Targets which want to use big endian can enable it using
4016 // enableBigEndian()
4017 if (!DL
->isLittleEndian() && !CLI
->enableBigEndian()) {
4018 // Currently we don't properly handle big endian code.
4019 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
4020 F
.getSubprogram(), &F
.getEntryBlock());
4021 R
<< "unable to translate in big endian mode";
4022 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
4026 // Release the per-function state when we return, whether we succeeded or not.
4027 auto FinalizeOnReturn
= make_scope_exit([this]() { finalizeFunction(); });
4029 // Setup a separate basic-block for the arguments and constants
4030 MachineBasicBlock
*EntryBB
= MF
->CreateMachineBasicBlock();
4031 MF
->push_back(EntryBB
);
4032 EntryBuilder
->setMBB(*EntryBB
);
4034 DebugLoc DbgLoc
= F
.getEntryBlock().getFirstNonPHI()->getDebugLoc();
4035 SwiftError
.setFunction(CurMF
);
4036 SwiftError
.createEntriesInEntryBlock(DbgLoc
);
4038 bool IsVarArg
= F
.isVarArg();
4039 bool HasMustTailInVarArgFn
= false;
4041 // Create all blocks, in IR order, to preserve the layout.
4042 FuncInfo
.MBBMap
.resize(F
.getMaxBlockNumber());
4043 for (const BasicBlock
&BB
: F
) {
4044 auto *&MBB
= FuncInfo
.MBBMap
[BB
.getNumber()];
4046 MBB
= MF
->CreateMachineBasicBlock(&BB
);
4049 if (BB
.hasAddressTaken())
4050 MBB
->setAddressTakenIRBlock(const_cast<BasicBlock
*>(&BB
));
4052 if (!HasMustTailInVarArgFn
)
4053 HasMustTailInVarArgFn
= checkForMustTailInVarArgFn(IsVarArg
, BB
);
4056 MF
->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn
);
4058 // Make our arguments/constants entry block fallthrough to the IR entry block.
4059 EntryBB
->addSuccessor(&getMBB(F
.front()));
4061 if (CLI
->fallBackToDAGISel(*MF
)) {
4062 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
4063 F
.getSubprogram(), &F
.getEntryBlock());
4064 R
<< "unable to lower function: "
4065 << ore::NV("Prototype", F
.getFunctionType());
4066 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
4070 // Lower the actual args into this basic block.
4071 SmallVector
<ArrayRef
<Register
>, 8> VRegArgs
;
4072 for (const Argument
&Arg
: F
.args()) {
4073 if (DL
->getTypeStoreSize(Arg
.getType()).isZero())
4074 continue; // Don't handle zero sized types.
4075 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(Arg
);
4076 VRegArgs
.push_back(VRegs
);
4078 if (Arg
.hasSwiftErrorAttr()) {
4079 assert(VRegs
.size() == 1 && "Too many vregs for Swift error");
4080 SwiftError
.setCurrentVReg(EntryBB
, SwiftError
.getFunctionArg(), VRegs
[0]);
4084 if (!CLI
->lowerFormalArguments(*EntryBuilder
, F
, VRegArgs
, FuncInfo
)) {
4085 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
4086 F
.getSubprogram(), &F
.getEntryBlock());
4087 R
<< "unable to lower arguments: "
4088 << ore::NV("Prototype", F
.getFunctionType());
4089 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
4093 // Need to visit defs before uses when translating instructions.
4094 GISelObserverWrapper WrapperObserver
;
4095 if (EnableCSE
&& CSEInfo
)
4096 WrapperObserver
.addObserver(CSEInfo
);
4098 ReversePostOrderTraversal
<const Function
*> RPOT(&F
);
4100 DILocationVerifier Verifier
;
4101 WrapperObserver
.addObserver(&Verifier
);
4102 #endif // ifndef NDEBUG
4103 RAIIMFObsDelInstaller
ObsInstall(*MF
, WrapperObserver
);
4104 for (const BasicBlock
*BB
: RPOT
) {
4105 MachineBasicBlock
&MBB
= getMBB(*BB
);
4106 // Set the insertion point of all the following translations to
4107 // the end of this basic block.
4108 CurBuilder
->setMBB(MBB
);
4109 HasTailCall
= false;
4110 for (const Instruction
&Inst
: *BB
) {
4111 // If we translated a tail call in the last step, then we know
4112 // everything after the call is either a return, or something that is
4113 // handled by the call itself. (E.g. a lifetime marker or assume
4114 // intrinsic.) In this case, we should stop translating the block and
4119 Verifier
.setCurrentInst(&Inst
);
4120 #endif // ifndef NDEBUG
4122 // Translate any debug-info attached to the instruction.
4123 translateDbgInfo(Inst
, *CurBuilder
);
4125 if (translate(Inst
))
4128 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
4129 Inst
.getDebugLoc(), BB
);
4130 R
<< "unable to translate instruction: " << ore::NV("Opcode", &Inst
);
4132 if (ORE
->allowExtraAnalysis("gisel-irtranslator")) {
4133 std::string InstStrStorage
;
4134 raw_string_ostream
InstStr(InstStrStorage
);
4137 R
<< ": '" << InstStrStorage
<< "'";
4140 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
4144 if (!finalizeBasicBlock(*BB
, MBB
)) {
4145 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
4146 BB
->getTerminator()->getDebugLoc(), BB
);
4147 R
<< "unable to translate basic block";
4148 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
4153 WrapperObserver
.removeObserver(&Verifier
);
4157 finishPendingPhis();
4159 SwiftError
.propagateVRegs();
4161 // Merge the argument lowering and constants block with its single
4162 // successor, the LLVM-IR entry block. We want the basic block to
4164 assert(EntryBB
->succ_size() == 1 &&
4165 "Custom BB used for lowering should have only one successor");
4166 // Get the successor of the current entry block.
4167 MachineBasicBlock
&NewEntryBB
= **EntryBB
->succ_begin();
4168 assert(NewEntryBB
.pred_size() == 1 &&
4169 "LLVM-IR entry block has a predecessor!?");
4170 // Move all the instruction from the current entry block to the
4172 NewEntryBB
.splice(NewEntryBB
.begin(), EntryBB
, EntryBB
->begin(),
4175 // Update the live-in information for the new entry block.
4176 for (const MachineBasicBlock::RegisterMaskPair
&LiveIn
: EntryBB
->liveins())
4177 NewEntryBB
.addLiveIn(LiveIn
);
4178 NewEntryBB
.sortUniqueLiveIns();
4180 // Get rid of the now empty basic block.
4181 EntryBB
->removeSuccessor(&NewEntryBB
);
4182 MF
->remove(EntryBB
);
4183 MF
->deleteMachineBasicBlock(EntryBB
);
4185 assert(&MF
->front() == &NewEntryBB
&&
4186 "New entry wasn't next in the list of basic block!");
4188 // Initialize stack protector information.
4189 StackProtector
&SP
= getAnalysis
<StackProtector
>();
4190 SP
.copyToMachineFrameInfo(MF
->getFrameInfo());