1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
26 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
28 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
29 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
30 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
31 #include "llvm/CodeGen/LowLevelType.h"
32 #include "llvm/CodeGen/LowLevelTypeUtils.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/RuntimeLibcalls.h"
42 #include "llvm/CodeGen/StackProtector.h"
43 #include "llvm/CodeGen/SwitchLoweringUtils.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetOpcodes.h"
48 #include "llvm/CodeGen/TargetPassConfig.h"
49 #include "llvm/CodeGen/TargetRegisterInfo.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/CFG.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/DiagnosticInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GetElementPtrTypeIterator.h"
60 #include "llvm/IR/InlineAsm.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/IntrinsicsAMDGPU.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/Metadata.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Statepoint.h"
70 #include "llvm/IR/Type.h"
71 #include "llvm/IR/User.h"
72 #include "llvm/IR/Value.h"
73 #include "llvm/InitializePasses.h"
74 #include "llvm/MC/MCContext.h"
75 #include "llvm/Pass.h"
76 #include "llvm/Support/Casting.h"
77 #include "llvm/Support/CodeGen.h"
78 #include "llvm/Support/Debug.h"
79 #include "llvm/Support/ErrorHandling.h"
80 #include "llvm/Support/MathExtras.h"
81 #include "llvm/Support/raw_ostream.h"
82 #include "llvm/Target/TargetIntrinsicInfo.h"
83 #include "llvm/Target/TargetMachine.h"
84 #include "llvm/Transforms/Utils/Local.h"
85 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
95 #define DEBUG_TYPE "irtranslator"
100 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
101 cl::desc("Should enable CSE in irtranslator"),
102 cl::Optional
, cl::init(false));
103 char IRTranslator::ID
= 0;
105 INITIALIZE_PASS_BEGIN(IRTranslator
, DEBUG_TYPE
, "IRTranslator LLVM IR -> MI",
107 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
108 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass
)
109 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass
)
110 INITIALIZE_PASS_DEPENDENCY(StackProtector
)
111 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
112 INITIALIZE_PASS_END(IRTranslator
, DEBUG_TYPE
, "IRTranslator LLVM IR -> MI",
115 static void reportTranslationError(MachineFunction
&MF
,
116 const TargetPassConfig
&TPC
,
117 OptimizationRemarkEmitter
&ORE
,
118 OptimizationRemarkMissed
&R
) {
119 MF
.getProperties().set(MachineFunctionProperties::Property::FailedISel
);
121 // Print the function name explicitly if we don't have a debug location (which
122 // makes the diagnostic less useful) or if we're going to emit a raw error.
123 if (!R
.getLocation().isValid() || TPC
.isGlobalISelAbortEnabled())
124 R
<< (" (in function: " + MF
.getName() + ")").str();
126 if (TPC
.isGlobalISelAbortEnabled())
127 report_fatal_error(Twine(R
.getMsg()));
132 IRTranslator::IRTranslator(CodeGenOptLevel optlevel
)
133 : MachineFunctionPass(ID
), OptLevel(optlevel
) {}
137 /// Verify that every instruction created has the same DILocation as the
138 /// instruction being translated.
139 class DILocationVerifier
: public GISelChangeObserver
{
140 const Instruction
*CurrInst
= nullptr;
143 DILocationVerifier() = default;
144 ~DILocationVerifier() = default;
146 const Instruction
*getCurrentInst() const { return CurrInst
; }
147 void setCurrentInst(const Instruction
*Inst
) { CurrInst
= Inst
; }
149 void erasingInstr(MachineInstr
&MI
) override
{}
150 void changingInstr(MachineInstr
&MI
) override
{}
151 void changedInstr(MachineInstr
&MI
) override
{}
153 void createdInstr(MachineInstr
&MI
) override
{
154 assert(getCurrentInst() && "Inserted instruction without a current MI");
156 // Only print the check message if we're actually checking it.
158 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
159 << " was copied to " << MI
);
161 // We allow insts in the entry block to have no debug loc because
162 // they could have originated from constants, and we don't want a jumpy
164 assert((CurrInst
->getDebugLoc() == MI
.getDebugLoc() ||
165 (MI
.getParent()->isEntryBlock() && !MI
.getDebugLoc()) ||
166 (MI
.isDebugInstr())) &&
167 "Line info was not transferred to all instructions");
171 #endif // ifndef NDEBUG
174 void IRTranslator::getAnalysisUsage(AnalysisUsage
&AU
) const {
175 AU
.addRequired
<StackProtector
>();
176 AU
.addRequired
<TargetPassConfig
>();
177 AU
.addRequired
<GISelCSEAnalysisWrapperPass
>();
178 AU
.addRequired
<AssumptionCacheTracker
>();
179 if (OptLevel
!= CodeGenOptLevel::None
) {
180 AU
.addRequired
<BranchProbabilityInfoWrapperPass
>();
181 AU
.addRequired
<AAResultsWrapperPass
>();
183 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
184 AU
.addPreserved
<TargetLibraryInfoWrapperPass
>();
185 getSelectionDAGFallbackAnalysisUsage(AU
);
186 MachineFunctionPass::getAnalysisUsage(AU
);
189 IRTranslator::ValueToVRegInfo::VRegListT
&
190 IRTranslator::allocateVRegs(const Value
&Val
) {
191 auto VRegsIt
= VMap
.findVRegs(Val
);
192 if (VRegsIt
!= VMap
.vregs_end())
193 return *VRegsIt
->second
;
194 auto *Regs
= VMap
.getVRegs(Val
);
195 auto *Offsets
= VMap
.getOffsets(Val
);
196 SmallVector
<LLT
, 4> SplitTys
;
197 computeValueLLTs(*DL
, *Val
.getType(), SplitTys
,
198 Offsets
->empty() ? Offsets
: nullptr);
199 for (unsigned i
= 0; i
< SplitTys
.size(); ++i
)
204 ArrayRef
<Register
> IRTranslator::getOrCreateVRegs(const Value
&Val
) {
205 auto VRegsIt
= VMap
.findVRegs(Val
);
206 if (VRegsIt
!= VMap
.vregs_end())
207 return *VRegsIt
->second
;
209 if (Val
.getType()->isVoidTy())
210 return *VMap
.getVRegs(Val
);
212 // Create entry for this type.
213 auto *VRegs
= VMap
.getVRegs(Val
);
214 auto *Offsets
= VMap
.getOffsets(Val
);
216 assert(Val
.getType()->isSized() &&
217 "Don't know how to create an empty vreg");
219 SmallVector
<LLT
, 4> SplitTys
;
220 computeValueLLTs(*DL
, *Val
.getType(), SplitTys
,
221 Offsets
->empty() ? Offsets
: nullptr);
223 if (!isa
<Constant
>(Val
)) {
224 for (auto Ty
: SplitTys
)
225 VRegs
->push_back(MRI
->createGenericVirtualRegister(Ty
));
229 if (Val
.getType()->isAggregateType()) {
230 // UndefValue, ConstantAggregateZero
231 auto &C
= cast
<Constant
>(Val
);
233 while (auto Elt
= C
.getAggregateElement(Idx
++)) {
234 auto EltRegs
= getOrCreateVRegs(*Elt
);
235 llvm::copy(EltRegs
, std::back_inserter(*VRegs
));
238 assert(SplitTys
.size() == 1 && "unexpectedly split LLT");
239 VRegs
->push_back(MRI
->createGenericVirtualRegister(SplitTys
[0]));
240 bool Success
= translate(cast
<Constant
>(Val
), VRegs
->front());
242 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
243 MF
->getFunction().getSubprogram(),
244 &MF
->getFunction().getEntryBlock());
245 R
<< "unable to translate constant: " << ore::NV("Type", Val
.getType());
246 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
254 int IRTranslator::getOrCreateFrameIndex(const AllocaInst
&AI
) {
255 auto MapEntry
= FrameIndices
.find(&AI
);
256 if (MapEntry
!= FrameIndices
.end())
257 return MapEntry
->second
;
259 uint64_t ElementSize
= DL
->getTypeAllocSize(AI
.getAllocatedType());
261 ElementSize
* cast
<ConstantInt
>(AI
.getArraySize())->getZExtValue();
263 // Always allocate at least one byte.
264 Size
= std::max
<uint64_t>(Size
, 1u);
266 int &FI
= FrameIndices
[&AI
];
267 FI
= MF
->getFrameInfo().CreateStackObject(Size
, AI
.getAlign(), false, &AI
);
271 Align
IRTranslator::getMemOpAlign(const Instruction
&I
) {
272 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(&I
))
273 return SI
->getAlign();
274 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(&I
))
275 return LI
->getAlign();
276 if (const AtomicCmpXchgInst
*AI
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
277 return AI
->getAlign();
278 if (const AtomicRMWInst
*AI
= dyn_cast
<AtomicRMWInst
>(&I
))
279 return AI
->getAlign();
281 OptimizationRemarkMissed
R("gisel-irtranslator", "", &I
);
282 R
<< "unable to translate memop: " << ore::NV("Opcode", &I
);
283 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
287 MachineBasicBlock
&IRTranslator::getMBB(const BasicBlock
&BB
) {
288 MachineBasicBlock
*&MBB
= BBToMBB
[&BB
];
289 assert(MBB
&& "BasicBlock was not encountered before");
293 void IRTranslator::addMachineCFGPred(CFGEdge Edge
, MachineBasicBlock
*NewPred
) {
294 assert(NewPred
&& "new predecessor must be a real MachineBasicBlock");
295 MachinePreds
[Edge
].push_back(NewPred
);
298 bool IRTranslator::translateBinaryOp(unsigned Opcode
, const User
&U
,
299 MachineIRBuilder
&MIRBuilder
) {
300 // Get or create a virtual register for each value.
301 // Unless the value is a Constant => loadimm cst?
302 // or inline constant each time?
303 // Creation of a virtual register needs to have a size.
304 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
305 Register Op1
= getOrCreateVReg(*U
.getOperand(1));
306 Register Res
= getOrCreateVReg(U
);
308 if (isa
<Instruction
>(U
)) {
309 const Instruction
&I
= cast
<Instruction
>(U
);
310 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
313 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op0
, Op1
}, Flags
);
317 bool IRTranslator::translateUnaryOp(unsigned Opcode
, const User
&U
,
318 MachineIRBuilder
&MIRBuilder
) {
319 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
320 Register Res
= getOrCreateVReg(U
);
322 if (isa
<Instruction
>(U
)) {
323 const Instruction
&I
= cast
<Instruction
>(U
);
324 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
326 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op0
}, Flags
);
330 bool IRTranslator::translateFNeg(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
331 return translateUnaryOp(TargetOpcode::G_FNEG
, U
, MIRBuilder
);
334 bool IRTranslator::translateCompare(const User
&U
,
335 MachineIRBuilder
&MIRBuilder
) {
336 auto *CI
= dyn_cast
<CmpInst
>(&U
);
337 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
338 Register Op1
= getOrCreateVReg(*U
.getOperand(1));
339 Register Res
= getOrCreateVReg(U
);
340 CmpInst::Predicate Pred
=
341 CI
? CI
->getPredicate() : static_cast<CmpInst::Predicate
>(
342 cast
<ConstantExpr
>(U
).getPredicate());
343 if (CmpInst::isIntPredicate(Pred
))
344 MIRBuilder
.buildICmp(Pred
, Res
, Op0
, Op1
);
345 else if (Pred
== CmpInst::FCMP_FALSE
)
346 MIRBuilder
.buildCopy(
347 Res
, getOrCreateVReg(*Constant::getNullValue(U
.getType())));
348 else if (Pred
== CmpInst::FCMP_TRUE
)
349 MIRBuilder
.buildCopy(
350 Res
, getOrCreateVReg(*Constant::getAllOnesValue(U
.getType())));
354 Flags
= MachineInstr::copyFlagsFromInstruction(*CI
);
355 MIRBuilder
.buildFCmp(Pred
, Res
, Op0
, Op1
, Flags
);
361 bool IRTranslator::translateRet(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
362 const ReturnInst
&RI
= cast
<ReturnInst
>(U
);
363 const Value
*Ret
= RI
.getReturnValue();
364 if (Ret
&& DL
->getTypeStoreSize(Ret
->getType()).isZero())
367 ArrayRef
<Register
> VRegs
;
369 VRegs
= getOrCreateVRegs(*Ret
);
371 Register SwiftErrorVReg
= 0;
372 if (CLI
->supportSwiftError() && SwiftError
.getFunctionArg()) {
373 SwiftErrorVReg
= SwiftError
.getOrCreateVRegUseAt(
374 &RI
, &MIRBuilder
.getMBB(), SwiftError
.getFunctionArg());
377 // The target may mess up with the insertion point, but
378 // this is not important as a return is the last instruction
379 // of the block anyway.
380 return CLI
->lowerReturn(MIRBuilder
, Ret
, VRegs
, FuncInfo
, SwiftErrorVReg
);
383 void IRTranslator::emitBranchForMergedCondition(
384 const Value
*Cond
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
385 MachineBasicBlock
*CurBB
, MachineBasicBlock
*SwitchBB
,
386 BranchProbability TProb
, BranchProbability FProb
, bool InvertCond
) {
387 // If the leaf of the tree is a comparison, merge the condition into
389 if (const CmpInst
*BOp
= dyn_cast
<CmpInst
>(Cond
)) {
390 CmpInst::Predicate Condition
;
391 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(Cond
)) {
392 Condition
= InvertCond
? IC
->getInversePredicate() : IC
->getPredicate();
394 const FCmpInst
*FC
= cast
<FCmpInst
>(Cond
);
395 Condition
= InvertCond
? FC
->getInversePredicate() : FC
->getPredicate();
398 SwitchCG::CaseBlock
CB(Condition
, false, BOp
->getOperand(0),
399 BOp
->getOperand(1), nullptr, TBB
, FBB
, CurBB
,
400 CurBuilder
->getDebugLoc(), TProb
, FProb
);
401 SL
->SwitchCases
.push_back(CB
);
405 // Create a CaseBlock record representing this branch.
406 CmpInst::Predicate Pred
= InvertCond
? CmpInst::ICMP_NE
: CmpInst::ICMP_EQ
;
407 SwitchCG::CaseBlock
CB(
408 Pred
, false, Cond
, ConstantInt::getTrue(MF
->getFunction().getContext()),
409 nullptr, TBB
, FBB
, CurBB
, CurBuilder
->getDebugLoc(), TProb
, FProb
);
410 SL
->SwitchCases
.push_back(CB
);
413 static bool isValInBlock(const Value
*V
, const BasicBlock
*BB
) {
414 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
415 return I
->getParent() == BB
;
419 void IRTranslator::findMergedConditions(
420 const Value
*Cond
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
421 MachineBasicBlock
*CurBB
, MachineBasicBlock
*SwitchBB
,
422 Instruction::BinaryOps Opc
, BranchProbability TProb
,
423 BranchProbability FProb
, bool InvertCond
) {
424 using namespace PatternMatch
;
425 assert((Opc
== Instruction::And
|| Opc
== Instruction::Or
) &&
426 "Expected Opc to be AND/OR");
427 // Skip over not part of the tree and remember to invert op and operands at
430 if (match(Cond
, m_OneUse(m_Not(m_Value(NotCond
)))) &&
431 isValInBlock(NotCond
, CurBB
->getBasicBlock())) {
432 findMergedConditions(NotCond
, TBB
, FBB
, CurBB
, SwitchBB
, Opc
, TProb
, FProb
,
437 const Instruction
*BOp
= dyn_cast
<Instruction
>(Cond
);
438 const Value
*BOpOp0
, *BOpOp1
;
439 // Compute the effective opcode for Cond, taking into account whether it needs
440 // to be inverted, e.g.
441 // and (not (or A, B)), C
443 // and (and (not A, not B), C)
444 Instruction::BinaryOps BOpc
= (Instruction::BinaryOps
)0;
446 BOpc
= match(BOp
, m_LogicalAnd(m_Value(BOpOp0
), m_Value(BOpOp1
)))
448 : (match(BOp
, m_LogicalOr(m_Value(BOpOp0
), m_Value(BOpOp1
)))
450 : (Instruction::BinaryOps
)0);
452 if (BOpc
== Instruction::And
)
453 BOpc
= Instruction::Or
;
454 else if (BOpc
== Instruction::Or
)
455 BOpc
= Instruction::And
;
459 // If this node is not part of the or/and tree, emit it as a branch.
460 // Note that all nodes in the tree should have same opcode.
461 bool BOpIsInOrAndTree
= BOpc
&& BOpc
== Opc
&& BOp
->hasOneUse();
462 if (!BOpIsInOrAndTree
|| BOp
->getParent() != CurBB
->getBasicBlock() ||
463 !isValInBlock(BOpOp0
, CurBB
->getBasicBlock()) ||
464 !isValInBlock(BOpOp1
, CurBB
->getBasicBlock())) {
465 emitBranchForMergedCondition(Cond
, TBB
, FBB
, CurBB
, SwitchBB
, TProb
, FProb
,
470 // Create TmpBB after CurBB.
471 MachineFunction::iterator
BBI(CurBB
);
472 MachineBasicBlock
*TmpBB
=
473 MF
->CreateMachineBasicBlock(CurBB
->getBasicBlock());
474 CurBB
->getParent()->insert(++BBI
, TmpBB
);
476 if (Opc
== Instruction::Or
) {
486 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
487 // The requirement is that
488 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
489 // = TrueProb for original BB.
490 // Assuming the original probabilities are A and B, one choice is to set
491 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
492 // A/(1+B) and 2B/(1+B). This choice assumes that
493 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
494 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
495 // TmpBB, but the math is more complicated.
497 auto NewTrueProb
= TProb
/ 2;
498 auto NewFalseProb
= TProb
/ 2 + FProb
;
499 // Emit the LHS condition.
500 findMergedConditions(BOpOp0
, TBB
, TmpBB
, CurBB
, SwitchBB
, Opc
, NewTrueProb
,
501 NewFalseProb
, InvertCond
);
503 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
504 SmallVector
<BranchProbability
, 2> Probs
{TProb
/ 2, FProb
};
505 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
506 // Emit the RHS condition into TmpBB.
507 findMergedConditions(BOpOp1
, TBB
, FBB
, TmpBB
, SwitchBB
, Opc
, Probs
[0],
508 Probs
[1], InvertCond
);
510 assert(Opc
== Instruction::And
&& "Unknown merge op!");
519 // This requires creation of TmpBB after CurBB.
521 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
522 // The requirement is that
523 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
524 // = FalseProb for original BB.
525 // Assuming the original probabilities are A and B, one choice is to set
526 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
527 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
528 // TrueProb for BB1 * FalseProb for TmpBB.
530 auto NewTrueProb
= TProb
+ FProb
/ 2;
531 auto NewFalseProb
= FProb
/ 2;
532 // Emit the LHS condition.
533 findMergedConditions(BOpOp0
, TmpBB
, FBB
, CurBB
, SwitchBB
, Opc
, NewTrueProb
,
534 NewFalseProb
, InvertCond
);
536 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
537 SmallVector
<BranchProbability
, 2> Probs
{TProb
, FProb
/ 2};
538 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
539 // Emit the RHS condition into TmpBB.
540 findMergedConditions(BOpOp1
, TBB
, FBB
, TmpBB
, SwitchBB
, Opc
, Probs
[0],
541 Probs
[1], InvertCond
);
545 bool IRTranslator::shouldEmitAsBranches(
546 const std::vector
<SwitchCG::CaseBlock
> &Cases
) {
547 // For multiple cases, it's better to emit as branches.
548 if (Cases
.size() != 2)
551 // If this is two comparisons of the same values or'd or and'd together, they
552 // will get folded into a single comparison, so don't emit two blocks.
553 if ((Cases
[0].CmpLHS
== Cases
[1].CmpLHS
&&
554 Cases
[0].CmpRHS
== Cases
[1].CmpRHS
) ||
555 (Cases
[0].CmpRHS
== Cases
[1].CmpLHS
&&
556 Cases
[0].CmpLHS
== Cases
[1].CmpRHS
)) {
560 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
561 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
562 if (Cases
[0].CmpRHS
== Cases
[1].CmpRHS
&&
563 Cases
[0].PredInfo
.Pred
== Cases
[1].PredInfo
.Pred
&&
564 isa
<Constant
>(Cases
[0].CmpRHS
) &&
565 cast
<Constant
>(Cases
[0].CmpRHS
)->isNullValue()) {
566 if (Cases
[0].PredInfo
.Pred
== CmpInst::ICMP_EQ
&&
567 Cases
[0].TrueBB
== Cases
[1].ThisBB
)
569 if (Cases
[0].PredInfo
.Pred
== CmpInst::ICMP_NE
&&
570 Cases
[0].FalseBB
== Cases
[1].ThisBB
)
577 bool IRTranslator::translateBr(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
578 const BranchInst
&BrInst
= cast
<BranchInst
>(U
);
579 auto &CurMBB
= MIRBuilder
.getMBB();
580 auto *Succ0MBB
= &getMBB(*BrInst
.getSuccessor(0));
582 if (BrInst
.isUnconditional()) {
583 // If the unconditional target is the layout successor, fallthrough.
584 if (OptLevel
== CodeGenOptLevel::None
||
585 !CurMBB
.isLayoutSuccessor(Succ0MBB
))
586 MIRBuilder
.buildBr(*Succ0MBB
);
589 for (const BasicBlock
*Succ
: successors(&BrInst
))
590 CurMBB
.addSuccessor(&getMBB(*Succ
));
594 // If this condition is one of the special cases we handle, do special stuff
596 const Value
*CondVal
= BrInst
.getCondition();
597 MachineBasicBlock
*Succ1MBB
= &getMBB(*BrInst
.getSuccessor(1));
599 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
601 // If this is a series of conditions that are or'd or and'd together, emit
602 // this as a sequence of branches instead of setcc's with and/or operations.
603 // As long as jumps are not expensive (exceptions for multi-use logic ops,
604 // unpredictable branches, and vector extracts because those jumps are likely
605 // expensive for any target), this should improve performance.
606 // For example, instead of something like:
618 using namespace PatternMatch
;
619 const Instruction
*CondI
= dyn_cast
<Instruction
>(CondVal
);
620 if (!TLI
.isJumpExpensive() && CondI
&& CondI
->hasOneUse() &&
621 !BrInst
.hasMetadata(LLVMContext::MD_unpredictable
)) {
622 Instruction::BinaryOps Opcode
= (Instruction::BinaryOps
)0;
624 const Value
*BOp0
, *BOp1
;
625 if (match(CondI
, m_LogicalAnd(m_Value(BOp0
), m_Value(BOp1
))))
626 Opcode
= Instruction::And
;
627 else if (match(CondI
, m_LogicalOr(m_Value(BOp0
), m_Value(BOp1
))))
628 Opcode
= Instruction::Or
;
630 if (Opcode
&& !(match(BOp0
, m_ExtractElt(m_Value(Vec
), m_Value())) &&
631 match(BOp1
, m_ExtractElt(m_Specific(Vec
), m_Value())))) {
632 findMergedConditions(CondI
, Succ0MBB
, Succ1MBB
, &CurMBB
, &CurMBB
, Opcode
,
633 getEdgeProbability(&CurMBB
, Succ0MBB
),
634 getEdgeProbability(&CurMBB
, Succ1MBB
),
635 /*InvertCond=*/false);
636 assert(SL
->SwitchCases
[0].ThisBB
== &CurMBB
&& "Unexpected lowering!");
638 // Allow some cases to be rejected.
639 if (shouldEmitAsBranches(SL
->SwitchCases
)) {
640 // Emit the branch for this block.
641 emitSwitchCase(SL
->SwitchCases
[0], &CurMBB
, *CurBuilder
);
642 SL
->SwitchCases
.erase(SL
->SwitchCases
.begin());
646 // Okay, we decided not to do this, remove any inserted MBB's and clear
648 for (unsigned I
= 1, E
= SL
->SwitchCases
.size(); I
!= E
; ++I
)
649 MF
->erase(SL
->SwitchCases
[I
].ThisBB
);
651 SL
->SwitchCases
.clear();
655 // Create a CaseBlock record representing this branch.
656 SwitchCG::CaseBlock
CB(CmpInst::ICMP_EQ
, false, CondVal
,
657 ConstantInt::getTrue(MF
->getFunction().getContext()),
658 nullptr, Succ0MBB
, Succ1MBB
, &CurMBB
,
659 CurBuilder
->getDebugLoc());
661 // Use emitSwitchCase to actually insert the fast branch sequence for this
663 emitSwitchCase(CB
, &CurMBB
, *CurBuilder
);
667 void IRTranslator::addSuccessorWithProb(MachineBasicBlock
*Src
,
668 MachineBasicBlock
*Dst
,
669 BranchProbability Prob
) {
671 Src
->addSuccessorWithoutProb(Dst
);
674 if (Prob
.isUnknown())
675 Prob
= getEdgeProbability(Src
, Dst
);
676 Src
->addSuccessor(Dst
, Prob
);
680 IRTranslator::getEdgeProbability(const MachineBasicBlock
*Src
,
681 const MachineBasicBlock
*Dst
) const {
682 const BasicBlock
*SrcBB
= Src
->getBasicBlock();
683 const BasicBlock
*DstBB
= Dst
->getBasicBlock();
685 // If BPI is not available, set the default probability as 1 / N, where N is
686 // the number of successors.
687 auto SuccSize
= std::max
<uint32_t>(succ_size(SrcBB
), 1);
688 return BranchProbability(1, SuccSize
);
690 return FuncInfo
.BPI
->getEdgeProbability(SrcBB
, DstBB
);
693 bool IRTranslator::translateSwitch(const User
&U
, MachineIRBuilder
&MIB
) {
694 using namespace SwitchCG
;
695 // Extract cases from the switch.
696 const SwitchInst
&SI
= cast
<SwitchInst
>(U
);
697 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
698 CaseClusterVector Clusters
;
699 Clusters
.reserve(SI
.getNumCases());
700 for (const auto &I
: SI
.cases()) {
701 MachineBasicBlock
*Succ
= &getMBB(*I
.getCaseSuccessor());
702 assert(Succ
&& "Could not find successor mbb in mapping");
703 const ConstantInt
*CaseVal
= I
.getCaseValue();
704 BranchProbability Prob
=
705 BPI
? BPI
->getEdgeProbability(SI
.getParent(), I
.getSuccessorIndex())
706 : BranchProbability(1, SI
.getNumCases() + 1);
707 Clusters
.push_back(CaseCluster::range(CaseVal
, CaseVal
, Succ
, Prob
));
710 MachineBasicBlock
*DefaultMBB
= &getMBB(*SI
.getDefaultDest());
712 // Cluster adjacent cases with the same destination. We do this at all
713 // optimization levels because it's cheap to do and will make codegen faster
714 // if there are many clusters.
715 sortAndRangeify(Clusters
);
717 MachineBasicBlock
*SwitchMBB
= &getMBB(*SI
.getParent());
719 // If there is only the default destination, jump there directly.
720 if (Clusters
.empty()) {
721 SwitchMBB
->addSuccessor(DefaultMBB
);
722 if (DefaultMBB
!= SwitchMBB
->getNextNode())
723 MIB
.buildBr(*DefaultMBB
);
727 SL
->findJumpTables(Clusters
, &SI
, std::nullopt
, DefaultMBB
, nullptr, nullptr);
728 SL
->findBitTestClusters(Clusters
, &SI
);
731 dbgs() << "Case clusters: ";
732 for (const CaseCluster
&C
: Clusters
) {
733 if (C
.Kind
== CC_JumpTable
)
735 if (C
.Kind
== CC_BitTests
)
738 C
.Low
->getValue().print(dbgs(), true);
739 if (C
.Low
!= C
.High
) {
741 C
.High
->getValue().print(dbgs(), true);
748 assert(!Clusters
.empty());
749 SwitchWorkList WorkList
;
750 CaseClusterIt First
= Clusters
.begin();
751 CaseClusterIt Last
= Clusters
.end() - 1;
752 auto DefaultProb
= getEdgeProbability(SwitchMBB
, DefaultMBB
);
753 WorkList
.push_back({SwitchMBB
, First
, Last
, nullptr, nullptr, DefaultProb
});
755 while (!WorkList
.empty()) {
756 SwitchWorkListItem W
= WorkList
.pop_back_val();
758 unsigned NumClusters
= W
.LastCluster
- W
.FirstCluster
+ 1;
759 // For optimized builds, lower large range as a balanced binary tree.
760 if (NumClusters
> 3 &&
761 MF
->getTarget().getOptLevel() != CodeGenOptLevel::None
&&
762 !DefaultMBB
->getParent()->getFunction().hasMinSize()) {
763 splitWorkItem(WorkList
, W
, SI
.getCondition(), SwitchMBB
, MIB
);
767 if (!lowerSwitchWorkItem(W
, SI
.getCondition(), SwitchMBB
, DefaultMBB
, MIB
))
773 void IRTranslator::splitWorkItem(SwitchCG::SwitchWorkList
&WorkList
,
774 const SwitchCG::SwitchWorkListItem
&W
,
775 Value
*Cond
, MachineBasicBlock
*SwitchMBB
,
776 MachineIRBuilder
&MIB
) {
777 using namespace SwitchCG
;
778 assert(W
.FirstCluster
->Low
->getValue().slt(W
.LastCluster
->Low
->getValue()) &&
779 "Clusters not sorted?");
780 assert(W
.LastCluster
- W
.FirstCluster
+ 1 >= 2 && "Too small to split!");
782 auto [LastLeft
, FirstRight
, LeftProb
, RightProb
] =
783 SL
->computeSplitWorkItemInfo(W
);
785 // Use the first element on the right as pivot since we will make less-than
786 // comparisons against it.
787 CaseClusterIt PivotCluster
= FirstRight
;
788 assert(PivotCluster
> W
.FirstCluster
);
789 assert(PivotCluster
<= W
.LastCluster
);
791 CaseClusterIt FirstLeft
= W
.FirstCluster
;
792 CaseClusterIt LastRight
= W
.LastCluster
;
794 const ConstantInt
*Pivot
= PivotCluster
->Low
;
796 // New blocks will be inserted immediately after the current one.
797 MachineFunction::iterator
BBI(W
.MBB
);
800 // We will branch to the LHS if Value < Pivot. If LHS is a single cluster,
801 // we can branch to its destination directly if it's squeezed exactly in
802 // between the known lower bound and Pivot - 1.
803 MachineBasicBlock
*LeftMBB
;
804 if (FirstLeft
== LastLeft
&& FirstLeft
->Kind
== CC_Range
&&
805 FirstLeft
->Low
== W
.GE
&&
806 (FirstLeft
->High
->getValue() + 1LL) == Pivot
->getValue()) {
807 LeftMBB
= FirstLeft
->MBB
;
809 LeftMBB
= FuncInfo
.MF
->CreateMachineBasicBlock(W
.MBB
->getBasicBlock());
810 FuncInfo
.MF
->insert(BBI
, LeftMBB
);
812 {LeftMBB
, FirstLeft
, LastLeft
, W
.GE
, Pivot
, W
.DefaultProb
/ 2});
815 // Similarly, we will branch to the RHS if Value >= Pivot. If RHS is a
816 // single cluster, RHS.Low == Pivot, and we can branch to its destination
817 // directly if RHS.High equals the current upper bound.
818 MachineBasicBlock
*RightMBB
;
819 if (FirstRight
== LastRight
&& FirstRight
->Kind
== CC_Range
&& W
.LT
&&
820 (FirstRight
->High
->getValue() + 1ULL) == W
.LT
->getValue()) {
821 RightMBB
= FirstRight
->MBB
;
823 RightMBB
= FuncInfo
.MF
->CreateMachineBasicBlock(W
.MBB
->getBasicBlock());
824 FuncInfo
.MF
->insert(BBI
, RightMBB
);
826 {RightMBB
, FirstRight
, LastRight
, Pivot
, W
.LT
, W
.DefaultProb
/ 2});
829 // Create the CaseBlock record that will be used to lower the branch.
830 CaseBlock
CB(ICmpInst::Predicate::ICMP_SLT
, false, Cond
, Pivot
, nullptr,
831 LeftMBB
, RightMBB
, W
.MBB
, MIB
.getDebugLoc(), LeftProb
,
834 if (W
.MBB
== SwitchMBB
)
835 emitSwitchCase(CB
, SwitchMBB
, MIB
);
837 SL
->SwitchCases
.push_back(CB
);
840 void IRTranslator::emitJumpTable(SwitchCG::JumpTable
&JT
,
841 MachineBasicBlock
*MBB
) {
842 // Emit the code for the jump table
843 assert(JT
.Reg
!= -1U && "Should lower JT Header first!");
844 MachineIRBuilder
MIB(*MBB
->getParent());
846 MIB
.setDebugLoc(CurBuilder
->getDebugLoc());
848 Type
*PtrIRTy
= PointerType::getUnqual(MF
->getFunction().getContext());
849 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
851 auto Table
= MIB
.buildJumpTable(PtrTy
, JT
.JTI
);
852 MIB
.buildBrJT(Table
.getReg(0), JT
.JTI
, JT
.Reg
);
855 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable
&JT
,
856 SwitchCG::JumpTableHeader
&JTH
,
857 MachineBasicBlock
*HeaderBB
) {
858 MachineIRBuilder
MIB(*HeaderBB
->getParent());
859 MIB
.setMBB(*HeaderBB
);
860 MIB
.setDebugLoc(CurBuilder
->getDebugLoc());
862 const Value
&SValue
= *JTH
.SValue
;
863 // Subtract the lowest switch case value from the value being switched on.
864 const LLT SwitchTy
= getLLTForType(*SValue
.getType(), *DL
);
865 Register SwitchOpReg
= getOrCreateVReg(SValue
);
866 auto FirstCst
= MIB
.buildConstant(SwitchTy
, JTH
.First
);
867 auto Sub
= MIB
.buildSub({SwitchTy
}, SwitchOpReg
, FirstCst
);
869 // This value may be smaller or larger than the target's pointer type, and
870 // therefore require extension or truncating.
871 auto *PtrIRTy
= PointerType::getUnqual(SValue
.getContext());
872 const LLT PtrScalarTy
= LLT::scalar(DL
->getTypeSizeInBits(PtrIRTy
));
873 Sub
= MIB
.buildZExtOrTrunc(PtrScalarTy
, Sub
);
875 JT
.Reg
= Sub
.getReg(0);
877 if (JTH
.FallthroughUnreachable
) {
878 if (JT
.MBB
!= HeaderBB
->getNextNode())
879 MIB
.buildBr(*JT
.MBB
);
883 // Emit the range check for the jump table, and branch to the default block
884 // for the switch statement if the value being switched on exceeds the
885 // largest case in the switch.
886 auto Cst
= getOrCreateVReg(
887 *ConstantInt::get(SValue
.getType(), JTH
.Last
- JTH
.First
));
888 Cst
= MIB
.buildZExtOrTrunc(PtrScalarTy
, Cst
).getReg(0);
889 auto Cmp
= MIB
.buildICmp(CmpInst::ICMP_UGT
, LLT::scalar(1), Sub
, Cst
);
891 auto BrCond
= MIB
.buildBrCond(Cmp
.getReg(0), *JT
.Default
);
893 // Avoid emitting unnecessary branches to the next block.
894 if (JT
.MBB
!= HeaderBB
->getNextNode())
895 BrCond
= MIB
.buildBr(*JT
.MBB
);
899 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock
&CB
,
900 MachineBasicBlock
*SwitchBB
,
901 MachineIRBuilder
&MIB
) {
902 Register CondLHS
= getOrCreateVReg(*CB
.CmpLHS
);
904 DebugLoc OldDbgLoc
= MIB
.getDebugLoc();
905 MIB
.setDebugLoc(CB
.DbgLoc
);
906 MIB
.setMBB(*CB
.ThisBB
);
908 if (CB
.PredInfo
.NoCmp
) {
909 // Branch or fall through to TrueBB.
910 addSuccessorWithProb(CB
.ThisBB
, CB
.TrueBB
, CB
.TrueProb
);
911 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.TrueBB
->getBasicBlock()},
913 CB
.ThisBB
->normalizeSuccProbs();
914 if (CB
.TrueBB
!= CB
.ThisBB
->getNextNode())
915 MIB
.buildBr(*CB
.TrueBB
);
916 MIB
.setDebugLoc(OldDbgLoc
);
920 const LLT i1Ty
= LLT::scalar(1);
921 // Build the compare.
923 const auto *CI
= dyn_cast
<ConstantInt
>(CB
.CmpRHS
);
924 // For conditional branch lowering, we might try to do something silly like
925 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
926 // just re-use the existing condition vreg.
927 if (MRI
->getType(CondLHS
).getSizeInBits() == 1 && CI
&& CI
->isOne() &&
928 CB
.PredInfo
.Pred
== CmpInst::ICMP_EQ
) {
931 Register CondRHS
= getOrCreateVReg(*CB
.CmpRHS
);
932 if (CmpInst::isFPPredicate(CB
.PredInfo
.Pred
))
934 MIB
.buildFCmp(CB
.PredInfo
.Pred
, i1Ty
, CondLHS
, CondRHS
).getReg(0);
937 MIB
.buildICmp(CB
.PredInfo
.Pred
, i1Ty
, CondLHS
, CondRHS
).getReg(0);
940 assert(CB
.PredInfo
.Pred
== CmpInst::ICMP_SLE
&&
941 "Can only handle SLE ranges");
943 const APInt
& Low
= cast
<ConstantInt
>(CB
.CmpLHS
)->getValue();
944 const APInt
& High
= cast
<ConstantInt
>(CB
.CmpRHS
)->getValue();
946 Register CmpOpReg
= getOrCreateVReg(*CB
.CmpMHS
);
947 if (cast
<ConstantInt
>(CB
.CmpLHS
)->isMinValue(true)) {
948 Register CondRHS
= getOrCreateVReg(*CB
.CmpRHS
);
950 MIB
.buildICmp(CmpInst::ICMP_SLE
, i1Ty
, CmpOpReg
, CondRHS
).getReg(0);
952 const LLT CmpTy
= MRI
->getType(CmpOpReg
);
953 auto Sub
= MIB
.buildSub({CmpTy
}, CmpOpReg
, CondLHS
);
954 auto Diff
= MIB
.buildConstant(CmpTy
, High
- Low
);
955 Cond
= MIB
.buildICmp(CmpInst::ICMP_ULE
, i1Ty
, Sub
, Diff
).getReg(0);
959 // Update successor info
960 addSuccessorWithProb(CB
.ThisBB
, CB
.TrueBB
, CB
.TrueProb
);
962 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.TrueBB
->getBasicBlock()},
965 // TrueBB and FalseBB are always different unless the incoming IR is
966 // degenerate. This only happens when running llc on weird IR.
967 if (CB
.TrueBB
!= CB
.FalseBB
)
968 addSuccessorWithProb(CB
.ThisBB
, CB
.FalseBB
, CB
.FalseProb
);
969 CB
.ThisBB
->normalizeSuccProbs();
971 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.FalseBB
->getBasicBlock()},
974 MIB
.buildBrCond(Cond
, *CB
.TrueBB
);
975 MIB
.buildBr(*CB
.FalseBB
);
976 MIB
.setDebugLoc(OldDbgLoc
);
979 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W
,
980 MachineBasicBlock
*SwitchMBB
,
981 MachineBasicBlock
*CurMBB
,
982 MachineBasicBlock
*DefaultMBB
,
983 MachineIRBuilder
&MIB
,
984 MachineFunction::iterator BBI
,
985 BranchProbability UnhandledProbs
,
986 SwitchCG::CaseClusterIt I
,
987 MachineBasicBlock
*Fallthrough
,
988 bool FallthroughUnreachable
) {
989 using namespace SwitchCG
;
990 MachineFunction
*CurMF
= SwitchMBB
->getParent();
991 // FIXME: Optimize away range check based on pivot comparisons.
992 JumpTableHeader
*JTH
= &SL
->JTCases
[I
->JTCasesIndex
].first
;
993 SwitchCG::JumpTable
*JT
= &SL
->JTCases
[I
->JTCasesIndex
].second
;
994 BranchProbability DefaultProb
= W
.DefaultProb
;
996 // The jump block hasn't been inserted yet; insert it here.
997 MachineBasicBlock
*JumpMBB
= JT
->MBB
;
998 CurMF
->insert(BBI
, JumpMBB
);
1000 // Since the jump table block is separate from the switch block, we need
1001 // to keep track of it as a machine predecessor to the default block,
1002 // otherwise we lose the phi edges.
1003 addMachineCFGPred({SwitchMBB
->getBasicBlock(), DefaultMBB
->getBasicBlock()},
1005 addMachineCFGPred({SwitchMBB
->getBasicBlock(), DefaultMBB
->getBasicBlock()},
1008 auto JumpProb
= I
->Prob
;
1009 auto FallthroughProb
= UnhandledProbs
;
1011 // If the default statement is a target of the jump table, we evenly
1012 // distribute the default probability to successors of CurMBB. Also
1013 // update the probability on the edge from JumpMBB to Fallthrough.
1014 for (MachineBasicBlock::succ_iterator SI
= JumpMBB
->succ_begin(),
1015 SE
= JumpMBB
->succ_end();
1017 if (*SI
== DefaultMBB
) {
1018 JumpProb
+= DefaultProb
/ 2;
1019 FallthroughProb
-= DefaultProb
/ 2;
1020 JumpMBB
->setSuccProbability(SI
, DefaultProb
/ 2);
1021 JumpMBB
->normalizeSuccProbs();
1023 // Also record edges from the jump table block to it's successors.
1024 addMachineCFGPred({SwitchMBB
->getBasicBlock(), (*SI
)->getBasicBlock()},
1029 if (FallthroughUnreachable
)
1030 JTH
->FallthroughUnreachable
= true;
1032 if (!JTH
->FallthroughUnreachable
)
1033 addSuccessorWithProb(CurMBB
, Fallthrough
, FallthroughProb
);
1034 addSuccessorWithProb(CurMBB
, JumpMBB
, JumpProb
);
1035 CurMBB
->normalizeSuccProbs();
1037 // The jump table header will be inserted in our current block, do the
1038 // range check, and fall through to our fallthrough block.
1039 JTH
->HeaderBB
= CurMBB
;
1040 JT
->Default
= Fallthrough
; // FIXME: Move Default to JumpTableHeader.
1042 // If we're in the right place, emit the jump table header right now.
1043 if (CurMBB
== SwitchMBB
) {
1044 if (!emitJumpTableHeader(*JT
, *JTH
, CurMBB
))
1046 JTH
->Emitted
= true;
1050 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I
,
1052 MachineBasicBlock
*Fallthrough
,
1053 bool FallthroughUnreachable
,
1054 BranchProbability UnhandledProbs
,
1055 MachineBasicBlock
*CurMBB
,
1056 MachineIRBuilder
&MIB
,
1057 MachineBasicBlock
*SwitchMBB
) {
1058 using namespace SwitchCG
;
1059 const Value
*RHS
, *LHS
, *MHS
;
1060 CmpInst::Predicate Pred
;
1061 if (I
->Low
== I
->High
) {
1062 // Check Cond == I->Low.
1063 Pred
= CmpInst::ICMP_EQ
;
1068 // Check I->Low <= Cond <= I->High.
1069 Pred
= CmpInst::ICMP_SLE
;
1075 // If Fallthrough is unreachable, fold away the comparison.
1076 // The false probability is the sum of all unhandled cases.
1077 CaseBlock
CB(Pred
, FallthroughUnreachable
, LHS
, RHS
, MHS
, I
->MBB
, Fallthrough
,
1078 CurMBB
, MIB
.getDebugLoc(), I
->Prob
, UnhandledProbs
);
1080 emitSwitchCase(CB
, SwitchMBB
, MIB
);
1084 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock
&B
,
1085 MachineBasicBlock
*SwitchBB
) {
1086 MachineIRBuilder
&MIB
= *CurBuilder
;
1087 MIB
.setMBB(*SwitchBB
);
1089 // Subtract the minimum value.
1090 Register SwitchOpReg
= getOrCreateVReg(*B
.SValue
);
1092 LLT SwitchOpTy
= MRI
->getType(SwitchOpReg
);
1093 Register MinValReg
= MIB
.buildConstant(SwitchOpTy
, B
.First
).getReg(0);
1094 auto RangeSub
= MIB
.buildSub(SwitchOpTy
, SwitchOpReg
, MinValReg
);
1096 Type
*PtrIRTy
= PointerType::getUnqual(MF
->getFunction().getContext());
1097 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1099 LLT MaskTy
= SwitchOpTy
;
1100 if (MaskTy
.getSizeInBits() > PtrTy
.getSizeInBits() ||
1101 !llvm::has_single_bit
<uint32_t>(MaskTy
.getSizeInBits()))
1102 MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
1104 // Ensure that the type will fit the mask value.
1105 for (unsigned I
= 0, E
= B
.Cases
.size(); I
!= E
; ++I
) {
1106 if (!isUIntN(SwitchOpTy
.getSizeInBits(), B
.Cases
[I
].Mask
)) {
1107 // Switch table case range are encoded into series of masks.
1108 // Just use pointer type, it's guaranteed to fit.
1109 MaskTy
= LLT::scalar(PtrTy
.getSizeInBits());
1114 Register SubReg
= RangeSub
.getReg(0);
1115 if (SwitchOpTy
!= MaskTy
)
1116 SubReg
= MIB
.buildZExtOrTrunc(MaskTy
, SubReg
).getReg(0);
1118 B
.RegVT
= getMVTForLLT(MaskTy
);
1121 MachineBasicBlock
*MBB
= B
.Cases
[0].ThisBB
;
1123 if (!B
.FallthroughUnreachable
)
1124 addSuccessorWithProb(SwitchBB
, B
.Default
, B
.DefaultProb
);
1125 addSuccessorWithProb(SwitchBB
, MBB
, B
.Prob
);
1127 SwitchBB
->normalizeSuccProbs();
1129 if (!B
.FallthroughUnreachable
) {
1130 // Conditional branch to the default block.
1131 auto RangeCst
= MIB
.buildConstant(SwitchOpTy
, B
.Range
);
1132 auto RangeCmp
= MIB
.buildICmp(CmpInst::Predicate::ICMP_UGT
, LLT::scalar(1),
1133 RangeSub
, RangeCst
);
1134 MIB
.buildBrCond(RangeCmp
, *B
.Default
);
1137 // Avoid emitting unnecessary branches to the next block.
1138 if (MBB
!= SwitchBB
->getNextNode())
1142 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock
&BB
,
1143 MachineBasicBlock
*NextMBB
,
1144 BranchProbability BranchProbToNext
,
1145 Register Reg
, SwitchCG::BitTestCase
&B
,
1146 MachineBasicBlock
*SwitchBB
) {
1147 MachineIRBuilder
&MIB
= *CurBuilder
;
1148 MIB
.setMBB(*SwitchBB
);
1150 LLT SwitchTy
= getLLTForMVT(BB
.RegVT
);
1152 unsigned PopCount
= llvm::popcount(B
.Mask
);
1153 if (PopCount
== 1) {
1154 // Testing for a single bit; just compare the shift count with what it
1155 // would need to be to shift a 1 bit in that position.
1156 auto MaskTrailingZeros
=
1157 MIB
.buildConstant(SwitchTy
, llvm::countr_zero(B
.Mask
));
1159 MIB
.buildICmp(ICmpInst::ICMP_EQ
, LLT::scalar(1), Reg
, MaskTrailingZeros
)
1161 } else if (PopCount
== BB
.Range
) {
1162 // There is only one zero bit in the range, test for it directly.
1163 auto MaskTrailingOnes
=
1164 MIB
.buildConstant(SwitchTy
, llvm::countr_one(B
.Mask
));
1165 Cmp
= MIB
.buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), Reg
, MaskTrailingOnes
)
1168 // Make desired shift.
1169 auto CstOne
= MIB
.buildConstant(SwitchTy
, 1);
1170 auto SwitchVal
= MIB
.buildShl(SwitchTy
, CstOne
, Reg
);
1172 // Emit bit tests and jumps.
1173 auto CstMask
= MIB
.buildConstant(SwitchTy
, B
.Mask
);
1174 auto AndOp
= MIB
.buildAnd(SwitchTy
, SwitchVal
, CstMask
);
1175 auto CstZero
= MIB
.buildConstant(SwitchTy
, 0);
1176 Cmp
= MIB
.buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), AndOp
, CstZero
)
1180 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1181 addSuccessorWithProb(SwitchBB
, B
.TargetBB
, B
.ExtraProb
);
1182 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1183 addSuccessorWithProb(SwitchBB
, NextMBB
, BranchProbToNext
);
1184 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1185 // one as they are relative probabilities (and thus work more like weights),
1186 // and hence we need to normalize them to let the sum of them become one.
1187 SwitchBB
->normalizeSuccProbs();
1189 // Record the fact that the IR edge from the header to the bit test target
1190 // will go through our new block. Neeeded for PHIs to have nodes added.
1191 addMachineCFGPred({BB
.Parent
->getBasicBlock(), B
.TargetBB
->getBasicBlock()},
1194 MIB
.buildBrCond(Cmp
, *B
.TargetBB
);
1196 // Avoid emitting unnecessary branches to the next block.
1197 if (NextMBB
!= SwitchBB
->getNextNode())
1198 MIB
.buildBr(*NextMBB
);
1201 bool IRTranslator::lowerBitTestWorkItem(
1202 SwitchCG::SwitchWorkListItem W
, MachineBasicBlock
*SwitchMBB
,
1203 MachineBasicBlock
*CurMBB
, MachineBasicBlock
*DefaultMBB
,
1204 MachineIRBuilder
&MIB
, MachineFunction::iterator BBI
,
1205 BranchProbability DefaultProb
, BranchProbability UnhandledProbs
,
1206 SwitchCG::CaseClusterIt I
, MachineBasicBlock
*Fallthrough
,
1207 bool FallthroughUnreachable
) {
1208 using namespace SwitchCG
;
1209 MachineFunction
*CurMF
= SwitchMBB
->getParent();
1210 // FIXME: Optimize away range check based on pivot comparisons.
1211 BitTestBlock
*BTB
= &SL
->BitTestCases
[I
->BTCasesIndex
];
1212 // The bit test blocks haven't been inserted yet; insert them here.
1213 for (BitTestCase
&BTC
: BTB
->Cases
)
1214 CurMF
->insert(BBI
, BTC
.ThisBB
);
1216 // Fill in fields of the BitTestBlock.
1217 BTB
->Parent
= CurMBB
;
1218 BTB
->Default
= Fallthrough
;
1220 BTB
->DefaultProb
= UnhandledProbs
;
1221 // If the cases in bit test don't form a contiguous range, we evenly
1222 // distribute the probability on the edge to Fallthrough to two
1223 // successors of CurMBB.
1224 if (!BTB
->ContiguousRange
) {
1225 BTB
->Prob
+= DefaultProb
/ 2;
1226 BTB
->DefaultProb
-= DefaultProb
/ 2;
1229 if (FallthroughUnreachable
)
1230 BTB
->FallthroughUnreachable
= true;
1232 // If we're in the right place, emit the bit test header right now.
1233 if (CurMBB
== SwitchMBB
) {
1234 emitBitTestHeader(*BTB
, SwitchMBB
);
1235 BTB
->Emitted
= true;
1240 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W
,
1242 MachineBasicBlock
*SwitchMBB
,
1243 MachineBasicBlock
*DefaultMBB
,
1244 MachineIRBuilder
&MIB
) {
1245 using namespace SwitchCG
;
1246 MachineFunction
*CurMF
= FuncInfo
.MF
;
1247 MachineBasicBlock
*NextMBB
= nullptr;
1248 MachineFunction::iterator
BBI(W
.MBB
);
1249 if (++BBI
!= FuncInfo
.MF
->end())
1253 // Here, we order cases by probability so the most likely case will be
1254 // checked first. However, two clusters can have the same probability in
1255 // which case their relative ordering is non-deterministic. So we use Low
1256 // as a tie-breaker as clusters are guaranteed to never overlap.
1257 llvm::sort(W
.FirstCluster
, W
.LastCluster
+ 1,
1258 [](const CaseCluster
&a
, const CaseCluster
&b
) {
1259 return a
.Prob
!= b
.Prob
1261 : a
.Low
->getValue().slt(b
.Low
->getValue());
1264 // Rearrange the case blocks so that the last one falls through if possible
1265 // without changing the order of probabilities.
1266 for (CaseClusterIt I
= W
.LastCluster
; I
> W
.FirstCluster
;) {
1268 if (I
->Prob
> W
.LastCluster
->Prob
)
1270 if (I
->Kind
== CC_Range
&& I
->MBB
== NextMBB
) {
1271 std::swap(*I
, *W
.LastCluster
);
1277 // Compute total probability.
1278 BranchProbability DefaultProb
= W
.DefaultProb
;
1279 BranchProbability UnhandledProbs
= DefaultProb
;
1280 for (CaseClusterIt I
= W
.FirstCluster
; I
<= W
.LastCluster
; ++I
)
1281 UnhandledProbs
+= I
->Prob
;
1283 MachineBasicBlock
*CurMBB
= W
.MBB
;
1284 for (CaseClusterIt I
= W
.FirstCluster
, E
= W
.LastCluster
; I
<= E
; ++I
) {
1285 bool FallthroughUnreachable
= false;
1286 MachineBasicBlock
*Fallthrough
;
1287 if (I
== W
.LastCluster
) {
1288 // For the last cluster, fall through to the default destination.
1289 Fallthrough
= DefaultMBB
;
1290 FallthroughUnreachable
= isa
<UnreachableInst
>(
1291 DefaultMBB
->getBasicBlock()->getFirstNonPHIOrDbg());
1293 Fallthrough
= CurMF
->CreateMachineBasicBlock(CurMBB
->getBasicBlock());
1294 CurMF
->insert(BBI
, Fallthrough
);
1296 UnhandledProbs
-= I
->Prob
;
1300 if (!lowerBitTestWorkItem(W
, SwitchMBB
, CurMBB
, DefaultMBB
, MIB
, BBI
,
1301 DefaultProb
, UnhandledProbs
, I
, Fallthrough
,
1302 FallthroughUnreachable
)) {
1303 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1309 case CC_JumpTable
: {
1310 if (!lowerJumpTableWorkItem(W
, SwitchMBB
, CurMBB
, DefaultMBB
, MIB
, BBI
,
1311 UnhandledProbs
, I
, Fallthrough
,
1312 FallthroughUnreachable
)) {
1313 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1319 if (!lowerSwitchRangeWorkItem(I
, Cond
, Fallthrough
,
1320 FallthroughUnreachable
, UnhandledProbs
,
1321 CurMBB
, MIB
, SwitchMBB
)) {
1322 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1328 CurMBB
= Fallthrough
;
1334 bool IRTranslator::translateIndirectBr(const User
&U
,
1335 MachineIRBuilder
&MIRBuilder
) {
1336 const IndirectBrInst
&BrInst
= cast
<IndirectBrInst
>(U
);
1338 const Register Tgt
= getOrCreateVReg(*BrInst
.getAddress());
1339 MIRBuilder
.buildBrIndirect(Tgt
);
1342 SmallPtrSet
<const BasicBlock
*, 32> AddedSuccessors
;
1343 MachineBasicBlock
&CurBB
= MIRBuilder
.getMBB();
1344 for (const BasicBlock
*Succ
: successors(&BrInst
)) {
1345 // It's legal for indirectbr instructions to have duplicate blocks in the
1346 // destination list. We don't allow this in MIR. Skip anything that's
1347 // already a successor.
1348 if (!AddedSuccessors
.insert(Succ
).second
)
1350 CurBB
.addSuccessor(&getMBB(*Succ
));
1356 static bool isSwiftError(const Value
*V
) {
1357 if (auto Arg
= dyn_cast
<Argument
>(V
))
1358 return Arg
->hasSwiftErrorAttr();
1359 if (auto AI
= dyn_cast
<AllocaInst
>(V
))
1360 return AI
->isSwiftError();
1364 bool IRTranslator::translateLoad(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
1365 const LoadInst
&LI
= cast
<LoadInst
>(U
);
1367 unsigned StoreSize
= DL
->getTypeStoreSize(LI
.getType());
1371 ArrayRef
<Register
> Regs
= getOrCreateVRegs(LI
);
1372 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(LI
);
1373 Register Base
= getOrCreateVReg(*LI
.getPointerOperand());
1374 AAMDNodes AAInfo
= LI
.getAAMetadata();
1376 const Value
*Ptr
= LI
.getPointerOperand();
1377 Type
*OffsetIRTy
= DL
->getIndexType(Ptr
->getType());
1378 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1380 if (CLI
->supportSwiftError() && isSwiftError(Ptr
)) {
1381 assert(Regs
.size() == 1 && "swifterror should be single pointer");
1383 SwiftError
.getOrCreateVRegUseAt(&LI
, &MIRBuilder
.getMBB(), Ptr
);
1384 MIRBuilder
.buildCopy(Regs
[0], VReg
);
1388 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1389 MachineMemOperand::Flags Flags
=
1390 TLI
.getLoadMemOperandFlags(LI
, *DL
, AC
, LibInfo
);
1391 if (AA
&& !(Flags
& MachineMemOperand::MOInvariant
)) {
1392 if (AA
->pointsToConstantMemory(
1393 MemoryLocation(Ptr
, LocationSize::precise(StoreSize
), AAInfo
))) {
1394 Flags
|= MachineMemOperand::MOInvariant
;
1398 const MDNode
*Ranges
=
1399 Regs
.size() == 1 ? LI
.getMetadata(LLVMContext::MD_range
) : nullptr;
1400 for (unsigned i
= 0; i
< Regs
.size(); ++i
) {
1402 MIRBuilder
.materializePtrAdd(Addr
, Base
, OffsetTy
, Offsets
[i
] / 8);
1404 MachinePointerInfo
Ptr(LI
.getPointerOperand(), Offsets
[i
] / 8);
1405 Align BaseAlign
= getMemOpAlign(LI
);
1406 auto MMO
= MF
->getMachineMemOperand(
1407 Ptr
, Flags
, MRI
->getType(Regs
[i
]),
1408 commonAlignment(BaseAlign
, Offsets
[i
] / 8), AAInfo
, Ranges
,
1409 LI
.getSyncScopeID(), LI
.getOrdering());
1410 MIRBuilder
.buildLoad(Regs
[i
], Addr
, *MMO
);
1416 bool IRTranslator::translateStore(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
1417 const StoreInst
&SI
= cast
<StoreInst
>(U
);
1418 if (DL
->getTypeStoreSize(SI
.getValueOperand()->getType()) == 0)
1421 ArrayRef
<Register
> Vals
= getOrCreateVRegs(*SI
.getValueOperand());
1422 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(*SI
.getValueOperand());
1423 Register Base
= getOrCreateVReg(*SI
.getPointerOperand());
1425 Type
*OffsetIRTy
= DL
->getIndexType(SI
.getPointerOperandType());
1426 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1428 if (CLI
->supportSwiftError() && isSwiftError(SI
.getPointerOperand())) {
1429 assert(Vals
.size() == 1 && "swifterror should be single pointer");
1431 Register VReg
= SwiftError
.getOrCreateVRegDefAt(&SI
, &MIRBuilder
.getMBB(),
1432 SI
.getPointerOperand());
1433 MIRBuilder
.buildCopy(VReg
, Vals
[0]);
1437 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1438 MachineMemOperand::Flags Flags
= TLI
.getStoreMemOperandFlags(SI
, *DL
);
1440 for (unsigned i
= 0; i
< Vals
.size(); ++i
) {
1442 MIRBuilder
.materializePtrAdd(Addr
, Base
, OffsetTy
, Offsets
[i
] / 8);
1444 MachinePointerInfo
Ptr(SI
.getPointerOperand(), Offsets
[i
] / 8);
1445 Align BaseAlign
= getMemOpAlign(SI
);
1446 auto MMO
= MF
->getMachineMemOperand(
1447 Ptr
, Flags
, MRI
->getType(Vals
[i
]),
1448 commonAlignment(BaseAlign
, Offsets
[i
] / 8), SI
.getAAMetadata(), nullptr,
1449 SI
.getSyncScopeID(), SI
.getOrdering());
1450 MIRBuilder
.buildStore(Vals
[i
], Addr
, *MMO
);
1455 static uint64_t getOffsetFromIndices(const User
&U
, const DataLayout
&DL
) {
1456 const Value
*Src
= U
.getOperand(0);
1457 Type
*Int32Ty
= Type::getInt32Ty(U
.getContext());
1459 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1460 // usual array element rather than looking into the actual aggregate.
1461 SmallVector
<Value
*, 1> Indices
;
1462 Indices
.push_back(ConstantInt::get(Int32Ty
, 0));
1464 if (const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(&U
)) {
1465 for (auto Idx
: EVI
->indices())
1466 Indices
.push_back(ConstantInt::get(Int32Ty
, Idx
));
1467 } else if (const InsertValueInst
*IVI
= dyn_cast
<InsertValueInst
>(&U
)) {
1468 for (auto Idx
: IVI
->indices())
1469 Indices
.push_back(ConstantInt::get(Int32Ty
, Idx
));
1471 for (unsigned i
= 1; i
< U
.getNumOperands(); ++i
)
1472 Indices
.push_back(U
.getOperand(i
));
1475 return 8 * static_cast<uint64_t>(
1476 DL
.getIndexedOffsetInType(Src
->getType(), Indices
));
1479 bool IRTranslator::translateExtractValue(const User
&U
,
1480 MachineIRBuilder
&MIRBuilder
) {
1481 const Value
*Src
= U
.getOperand(0);
1482 uint64_t Offset
= getOffsetFromIndices(U
, *DL
);
1483 ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*Src
);
1484 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(*Src
);
1485 unsigned Idx
= llvm::lower_bound(Offsets
, Offset
) - Offsets
.begin();
1486 auto &DstRegs
= allocateVRegs(U
);
1488 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
)
1489 DstRegs
[i
] = SrcRegs
[Idx
++];
1494 bool IRTranslator::translateInsertValue(const User
&U
,
1495 MachineIRBuilder
&MIRBuilder
) {
1496 const Value
*Src
= U
.getOperand(0);
1497 uint64_t Offset
= getOffsetFromIndices(U
, *DL
);
1498 auto &DstRegs
= allocateVRegs(U
);
1499 ArrayRef
<uint64_t> DstOffsets
= *VMap
.getOffsets(U
);
1500 ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*Src
);
1501 ArrayRef
<Register
> InsertedRegs
= getOrCreateVRegs(*U
.getOperand(1));
1502 auto *InsertedIt
= InsertedRegs
.begin();
1504 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
) {
1505 if (DstOffsets
[i
] >= Offset
&& InsertedIt
!= InsertedRegs
.end())
1506 DstRegs
[i
] = *InsertedIt
++;
1508 DstRegs
[i
] = SrcRegs
[i
];
1514 bool IRTranslator::translateSelect(const User
&U
,
1515 MachineIRBuilder
&MIRBuilder
) {
1516 Register Tst
= getOrCreateVReg(*U
.getOperand(0));
1517 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(U
);
1518 ArrayRef
<Register
> Op0Regs
= getOrCreateVRegs(*U
.getOperand(1));
1519 ArrayRef
<Register
> Op1Regs
= getOrCreateVRegs(*U
.getOperand(2));
1522 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(&U
))
1523 Flags
= MachineInstr::copyFlagsFromInstruction(*SI
);
1525 for (unsigned i
= 0; i
< ResRegs
.size(); ++i
) {
1526 MIRBuilder
.buildSelect(ResRegs
[i
], Tst
, Op0Regs
[i
], Op1Regs
[i
], Flags
);
1532 bool IRTranslator::translateCopy(const User
&U
, const Value
&V
,
1533 MachineIRBuilder
&MIRBuilder
) {
1534 Register Src
= getOrCreateVReg(V
);
1535 auto &Regs
= *VMap
.getVRegs(U
);
1537 Regs
.push_back(Src
);
1538 VMap
.getOffsets(U
)->push_back(0);
1540 // If we already assigned a vreg for this instruction, we can't change that.
1541 // Emit a copy to satisfy the users we already emitted.
1542 MIRBuilder
.buildCopy(Regs
[0], Src
);
1547 bool IRTranslator::translateBitCast(const User
&U
,
1548 MachineIRBuilder
&MIRBuilder
) {
1549 // If we're bitcasting to the source type, we can reuse the source vreg.
1550 if (getLLTForType(*U
.getOperand(0)->getType(), *DL
) ==
1551 getLLTForType(*U
.getType(), *DL
)) {
1552 // If the source is a ConstantInt then it was probably created by
1553 // ConstantHoisting and we should leave it alone.
1554 if (isa
<ConstantInt
>(U
.getOperand(0)))
1555 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER
, U
,
1557 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
1560 return translateCast(TargetOpcode::G_BITCAST
, U
, MIRBuilder
);
1563 bool IRTranslator::translateCast(unsigned Opcode
, const User
&U
,
1564 MachineIRBuilder
&MIRBuilder
) {
1565 if (U
.getType()->getScalarType()->isBFloatTy() ||
1566 U
.getOperand(0)->getType()->getScalarType()->isBFloatTy())
1568 Register Op
= getOrCreateVReg(*U
.getOperand(0));
1569 Register Res
= getOrCreateVReg(U
);
1570 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op
});
1574 bool IRTranslator::translateGetElementPtr(const User
&U
,
1575 MachineIRBuilder
&MIRBuilder
) {
1576 Value
&Op0
= *U
.getOperand(0);
1577 Register BaseReg
= getOrCreateVReg(Op0
);
1578 Type
*PtrIRTy
= Op0
.getType();
1579 LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1580 Type
*OffsetIRTy
= DL
->getIndexType(PtrIRTy
);
1581 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1584 if (isa
<Instruction
>(U
)) {
1585 const Instruction
&I
= cast
<Instruction
>(U
);
1586 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
1589 // Normalize Vector GEP - all scalar operands should be converted to the
1591 unsigned VectorWidth
= 0;
1593 // True if we should use a splat vector; using VectorWidth alone is not
1595 bool WantSplatVector
= false;
1596 if (auto *VT
= dyn_cast
<VectorType
>(U
.getType())) {
1597 VectorWidth
= cast
<FixedVectorType
>(VT
)->getNumElements();
1598 // We don't produce 1 x N vectors; those are treated as scalars.
1599 WantSplatVector
= VectorWidth
> 1;
1602 // We might need to splat the base pointer into a vector if the offsets
1604 if (WantSplatVector
&& !PtrTy
.isVector()) {
1607 .buildSplatVector(LLT::fixed_vector(VectorWidth
, PtrTy
), BaseReg
)
1609 PtrIRTy
= FixedVectorType::get(PtrIRTy
, VectorWidth
);
1610 PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1611 OffsetIRTy
= DL
->getIndexType(PtrIRTy
);
1612 OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1616 for (gep_type_iterator GTI
= gep_type_begin(&U
), E
= gep_type_end(&U
);
1618 const Value
*Idx
= GTI
.getOperand();
1619 if (StructType
*StTy
= GTI
.getStructTypeOrNull()) {
1620 unsigned Field
= cast
<Constant
>(Idx
)->getUniqueInteger().getZExtValue();
1621 Offset
+= DL
->getStructLayout(StTy
)->getElementOffset(Field
);
1624 uint64_t ElementSize
= GTI
.getSequentialElementStride(*DL
);
1626 // If this is a scalar constant or a splat vector of constants,
1627 // handle it quickly.
1628 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
1629 if (std::optional
<int64_t> Val
= CI
->getValue().trySExtValue()) {
1630 Offset
+= ElementSize
* *Val
;
1636 auto OffsetMIB
= MIRBuilder
.buildConstant({OffsetTy
}, Offset
);
1637 BaseReg
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseReg
, OffsetMIB
.getReg(0))
1642 Register IdxReg
= getOrCreateVReg(*Idx
);
1643 LLT IdxTy
= MRI
->getType(IdxReg
);
1644 if (IdxTy
!= OffsetTy
) {
1645 if (!IdxTy
.isVector() && WantSplatVector
) {
1646 IdxReg
= MIRBuilder
.buildSplatVector(
1647 OffsetTy
.changeElementType(IdxTy
), IdxReg
).getReg(0);
1650 IdxReg
= MIRBuilder
.buildSExtOrTrunc(OffsetTy
, IdxReg
).getReg(0);
1653 // N = N + Idx * ElementSize;
1654 // Avoid doing it for ElementSize of 1.
1655 Register GepOffsetReg
;
1656 if (ElementSize
!= 1) {
1657 auto ElementSizeMIB
= MIRBuilder
.buildConstant(
1658 getLLTForType(*OffsetIRTy
, *DL
), ElementSize
);
1660 MIRBuilder
.buildMul(OffsetTy
, IdxReg
, ElementSizeMIB
).getReg(0);
1662 GepOffsetReg
= IdxReg
;
1664 BaseReg
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseReg
, GepOffsetReg
).getReg(0);
1670 MIRBuilder
.buildConstant(OffsetTy
, Offset
);
1672 if (int64_t(Offset
) >= 0 && cast
<GEPOperator
>(U
).isInBounds())
1673 Flags
|= MachineInstr::MIFlag::NoUWrap
;
1675 MIRBuilder
.buildPtrAdd(getOrCreateVReg(U
), BaseReg
, OffsetMIB
.getReg(0),
1680 MIRBuilder
.buildCopy(getOrCreateVReg(U
), BaseReg
);
1684 bool IRTranslator::translateMemFunc(const CallInst
&CI
,
1685 MachineIRBuilder
&MIRBuilder
,
1687 const Value
*SrcPtr
= CI
.getArgOperand(1);
1688 // If the source is undef, then just emit a nop.
1689 if (isa
<UndefValue
>(SrcPtr
))
1692 SmallVector
<Register
, 3> SrcRegs
;
1694 unsigned MinPtrSize
= UINT_MAX
;
1695 for (auto AI
= CI
.arg_begin(), AE
= CI
.arg_end(); std::next(AI
) != AE
; ++AI
) {
1696 Register SrcReg
= getOrCreateVReg(**AI
);
1697 LLT SrcTy
= MRI
->getType(SrcReg
);
1698 if (SrcTy
.isPointer())
1699 MinPtrSize
= std::min
<unsigned>(SrcTy
.getSizeInBits(), MinPtrSize
);
1700 SrcRegs
.push_back(SrcReg
);
1703 LLT SizeTy
= LLT::scalar(MinPtrSize
);
1705 // The size operand should be the minimum of the pointer sizes.
1706 Register
&SizeOpReg
= SrcRegs
[SrcRegs
.size() - 1];
1707 if (MRI
->getType(SizeOpReg
) != SizeTy
)
1708 SizeOpReg
= MIRBuilder
.buildZExtOrTrunc(SizeTy
, SizeOpReg
).getReg(0);
1710 auto ICall
= MIRBuilder
.buildInstr(Opcode
);
1711 for (Register SrcReg
: SrcRegs
)
1712 ICall
.addUse(SrcReg
);
1717 cast
<ConstantInt
>(CI
.getArgOperand(CI
.arg_size() - 1))->getZExtValue();
1719 ConstantInt
*CopySize
= nullptr;
1721 if (auto *MCI
= dyn_cast
<MemCpyInst
>(&CI
)) {
1722 DstAlign
= MCI
->getDestAlign().valueOrOne();
1723 SrcAlign
= MCI
->getSourceAlign().valueOrOne();
1724 CopySize
= dyn_cast
<ConstantInt
>(MCI
->getArgOperand(2));
1725 } else if (auto *MCI
= dyn_cast
<MemCpyInlineInst
>(&CI
)) {
1726 DstAlign
= MCI
->getDestAlign().valueOrOne();
1727 SrcAlign
= MCI
->getSourceAlign().valueOrOne();
1728 CopySize
= dyn_cast
<ConstantInt
>(MCI
->getArgOperand(2));
1729 } else if (auto *MMI
= dyn_cast
<MemMoveInst
>(&CI
)) {
1730 DstAlign
= MMI
->getDestAlign().valueOrOne();
1731 SrcAlign
= MMI
->getSourceAlign().valueOrOne();
1732 CopySize
= dyn_cast
<ConstantInt
>(MMI
->getArgOperand(2));
1734 auto *MSI
= cast
<MemSetInst
>(&CI
);
1735 DstAlign
= MSI
->getDestAlign().valueOrOne();
1738 if (Opcode
!= TargetOpcode::G_MEMCPY_INLINE
) {
1739 // We need to propagate the tail call flag from the IR inst as an argument.
1740 // Otherwise, we have to pessimize and assume later that we cannot tail call
1741 // any memory intrinsics.
1742 ICall
.addImm(CI
.isTailCall() ? 1 : 0);
1745 // Create mem operands to store the alignment and volatile info.
1746 MachineMemOperand::Flags LoadFlags
= MachineMemOperand::MOLoad
;
1747 MachineMemOperand::Flags StoreFlags
= MachineMemOperand::MOStore
;
1749 LoadFlags
|= MachineMemOperand::MOVolatile
;
1750 StoreFlags
|= MachineMemOperand::MOVolatile
;
1753 AAMDNodes AAInfo
= CI
.getAAMetadata();
1754 if (AA
&& CopySize
&&
1755 AA
->pointsToConstantMemory(MemoryLocation(
1756 SrcPtr
, LocationSize::precise(CopySize
->getZExtValue()), AAInfo
))) {
1757 LoadFlags
|= MachineMemOperand::MOInvariant
;
1759 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1760 // but the previous usage implied it did. Probably should check
1761 // isDereferenceableAndAlignedPointer.
1762 LoadFlags
|= MachineMemOperand::MODereferenceable
;
1765 ICall
.addMemOperand(
1766 MF
->getMachineMemOperand(MachinePointerInfo(CI
.getArgOperand(0)),
1767 StoreFlags
, 1, DstAlign
, AAInfo
));
1768 if (Opcode
!= TargetOpcode::G_MEMSET
)
1769 ICall
.addMemOperand(MF
->getMachineMemOperand(
1770 MachinePointerInfo(SrcPtr
), LoadFlags
, 1, SrcAlign
, AAInfo
));
1775 void IRTranslator::getStackGuard(Register DstReg
,
1776 MachineIRBuilder
&MIRBuilder
) {
1777 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
1778 MRI
->setRegClass(DstReg
, TRI
->getPointerRegClass(*MF
));
1780 MIRBuilder
.buildInstr(TargetOpcode::LOAD_STACK_GUARD
, {DstReg
}, {});
1782 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1783 Value
*Global
= TLI
.getSDagStackGuard(*MF
->getFunction().getParent());
1787 unsigned AddrSpace
= Global
->getType()->getPointerAddressSpace();
1788 LLT PtrTy
= LLT::pointer(AddrSpace
, DL
->getPointerSizeInBits(AddrSpace
));
1790 MachinePointerInfo
MPInfo(Global
);
1791 auto Flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOInvariant
|
1792 MachineMemOperand::MODereferenceable
;
1793 MachineMemOperand
*MemRef
= MF
->getMachineMemOperand(
1794 MPInfo
, Flags
, PtrTy
, DL
->getPointerABIAlignment(AddrSpace
));
1795 MIB
.setMemRefs({MemRef
});
1798 bool IRTranslator::translateOverflowIntrinsic(const CallInst
&CI
, unsigned Op
,
1799 MachineIRBuilder
&MIRBuilder
) {
1800 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(CI
);
1801 MIRBuilder
.buildInstr(
1802 Op
, {ResRegs
[0], ResRegs
[1]},
1803 {getOrCreateVReg(*CI
.getOperand(0)), getOrCreateVReg(*CI
.getOperand(1))});
1808 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op
, const CallInst
&CI
,
1809 MachineIRBuilder
&MIRBuilder
) {
1810 Register Dst
= getOrCreateVReg(CI
);
1811 Register Src0
= getOrCreateVReg(*CI
.getOperand(0));
1812 Register Src1
= getOrCreateVReg(*CI
.getOperand(1));
1813 uint64_t Scale
= cast
<ConstantInt
>(CI
.getOperand(2))->getZExtValue();
1814 MIRBuilder
.buildInstr(Op
, {Dst
}, { Src0
, Src1
, Scale
});
1818 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID
) {
1822 case Intrinsic::bswap
:
1823 return TargetOpcode::G_BSWAP
;
1824 case Intrinsic::bitreverse
:
1825 return TargetOpcode::G_BITREVERSE
;
1826 case Intrinsic::fshl
:
1827 return TargetOpcode::G_FSHL
;
1828 case Intrinsic::fshr
:
1829 return TargetOpcode::G_FSHR
;
1830 case Intrinsic::ceil
:
1831 return TargetOpcode::G_FCEIL
;
1832 case Intrinsic::cos
:
1833 return TargetOpcode::G_FCOS
;
1834 case Intrinsic::ctpop
:
1835 return TargetOpcode::G_CTPOP
;
1836 case Intrinsic::exp
:
1837 return TargetOpcode::G_FEXP
;
1838 case Intrinsic::exp2
:
1839 return TargetOpcode::G_FEXP2
;
1840 case Intrinsic::exp10
:
1841 return TargetOpcode::G_FEXP10
;
1842 case Intrinsic::fabs
:
1843 return TargetOpcode::G_FABS
;
1844 case Intrinsic::copysign
:
1845 return TargetOpcode::G_FCOPYSIGN
;
1846 case Intrinsic::minnum
:
1847 return TargetOpcode::G_FMINNUM
;
1848 case Intrinsic::maxnum
:
1849 return TargetOpcode::G_FMAXNUM
;
1850 case Intrinsic::minimum
:
1851 return TargetOpcode::G_FMINIMUM
;
1852 case Intrinsic::maximum
:
1853 return TargetOpcode::G_FMAXIMUM
;
1854 case Intrinsic::canonicalize
:
1855 return TargetOpcode::G_FCANONICALIZE
;
1856 case Intrinsic::floor
:
1857 return TargetOpcode::G_FFLOOR
;
1858 case Intrinsic::fma
:
1859 return TargetOpcode::G_FMA
;
1860 case Intrinsic::log
:
1861 return TargetOpcode::G_FLOG
;
1862 case Intrinsic::log2
:
1863 return TargetOpcode::G_FLOG2
;
1864 case Intrinsic::log10
:
1865 return TargetOpcode::G_FLOG10
;
1866 case Intrinsic::ldexp
:
1867 return TargetOpcode::G_FLDEXP
;
1868 case Intrinsic::nearbyint
:
1869 return TargetOpcode::G_FNEARBYINT
;
1870 case Intrinsic::pow
:
1871 return TargetOpcode::G_FPOW
;
1872 case Intrinsic::powi
:
1873 return TargetOpcode::G_FPOWI
;
1874 case Intrinsic::rint
:
1875 return TargetOpcode::G_FRINT
;
1876 case Intrinsic::round
:
1877 return TargetOpcode::G_INTRINSIC_ROUND
;
1878 case Intrinsic::roundeven
:
1879 return TargetOpcode::G_INTRINSIC_ROUNDEVEN
;
1880 case Intrinsic::sin
:
1881 return TargetOpcode::G_FSIN
;
1882 case Intrinsic::sqrt
:
1883 return TargetOpcode::G_FSQRT
;
1884 case Intrinsic::trunc
:
1885 return TargetOpcode::G_INTRINSIC_TRUNC
;
1886 case Intrinsic::readcyclecounter
:
1887 return TargetOpcode::G_READCYCLECOUNTER
;
1888 case Intrinsic::ptrmask
:
1889 return TargetOpcode::G_PTRMASK
;
1890 case Intrinsic::lrint
:
1891 return TargetOpcode::G_INTRINSIC_LRINT
;
1892 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1893 case Intrinsic::vector_reduce_fmin
:
1894 return TargetOpcode::G_VECREDUCE_FMIN
;
1895 case Intrinsic::vector_reduce_fmax
:
1896 return TargetOpcode::G_VECREDUCE_FMAX
;
1897 case Intrinsic::vector_reduce_fminimum
:
1898 return TargetOpcode::G_VECREDUCE_FMINIMUM
;
1899 case Intrinsic::vector_reduce_fmaximum
:
1900 return TargetOpcode::G_VECREDUCE_FMAXIMUM
;
1901 case Intrinsic::vector_reduce_add
:
1902 return TargetOpcode::G_VECREDUCE_ADD
;
1903 case Intrinsic::vector_reduce_mul
:
1904 return TargetOpcode::G_VECREDUCE_MUL
;
1905 case Intrinsic::vector_reduce_and
:
1906 return TargetOpcode::G_VECREDUCE_AND
;
1907 case Intrinsic::vector_reduce_or
:
1908 return TargetOpcode::G_VECREDUCE_OR
;
1909 case Intrinsic::vector_reduce_xor
:
1910 return TargetOpcode::G_VECREDUCE_XOR
;
1911 case Intrinsic::vector_reduce_smax
:
1912 return TargetOpcode::G_VECREDUCE_SMAX
;
1913 case Intrinsic::vector_reduce_smin
:
1914 return TargetOpcode::G_VECREDUCE_SMIN
;
1915 case Intrinsic::vector_reduce_umax
:
1916 return TargetOpcode::G_VECREDUCE_UMAX
;
1917 case Intrinsic::vector_reduce_umin
:
1918 return TargetOpcode::G_VECREDUCE_UMIN
;
1919 case Intrinsic::lround
:
1920 return TargetOpcode::G_LROUND
;
1921 case Intrinsic::llround
:
1922 return TargetOpcode::G_LLROUND
;
1923 case Intrinsic::get_fpenv
:
1924 return TargetOpcode::G_GET_FPENV
;
1925 case Intrinsic::get_fpmode
:
1926 return TargetOpcode::G_GET_FPMODE
;
1928 return Intrinsic::not_intrinsic
;
1931 bool IRTranslator::translateSimpleIntrinsic(const CallInst
&CI
,
1933 MachineIRBuilder
&MIRBuilder
) {
1935 unsigned Op
= getSimpleIntrinsicOpcode(ID
);
1937 // Is this a simple intrinsic?
1938 if (Op
== Intrinsic::not_intrinsic
)
1941 // Yes. Let's translate it.
1942 SmallVector
<llvm::SrcOp
, 4> VRegs
;
1943 for (const auto &Arg
: CI
.args())
1944 VRegs
.push_back(getOrCreateVReg(*Arg
));
1946 MIRBuilder
.buildInstr(Op
, {getOrCreateVReg(CI
)}, VRegs
,
1947 MachineInstr::copyFlagsFromInstruction(CI
));
1951 // TODO: Include ConstainedOps.def when all strict instructions are defined.
1952 static unsigned getConstrainedOpcode(Intrinsic::ID ID
) {
1954 case Intrinsic::experimental_constrained_fadd
:
1955 return TargetOpcode::G_STRICT_FADD
;
1956 case Intrinsic::experimental_constrained_fsub
:
1957 return TargetOpcode::G_STRICT_FSUB
;
1958 case Intrinsic::experimental_constrained_fmul
:
1959 return TargetOpcode::G_STRICT_FMUL
;
1960 case Intrinsic::experimental_constrained_fdiv
:
1961 return TargetOpcode::G_STRICT_FDIV
;
1962 case Intrinsic::experimental_constrained_frem
:
1963 return TargetOpcode::G_STRICT_FREM
;
1964 case Intrinsic::experimental_constrained_fma
:
1965 return TargetOpcode::G_STRICT_FMA
;
1966 case Intrinsic::experimental_constrained_sqrt
:
1967 return TargetOpcode::G_STRICT_FSQRT
;
1968 case Intrinsic::experimental_constrained_ldexp
:
1969 return TargetOpcode::G_STRICT_FLDEXP
;
1975 bool IRTranslator::translateConstrainedFPIntrinsic(
1976 const ConstrainedFPIntrinsic
&FPI
, MachineIRBuilder
&MIRBuilder
) {
1977 fp::ExceptionBehavior EB
= *FPI
.getExceptionBehavior();
1979 unsigned Opcode
= getConstrainedOpcode(FPI
.getIntrinsicID());
1983 uint32_t Flags
= MachineInstr::copyFlagsFromInstruction(FPI
);
1984 if (EB
== fp::ExceptionBehavior::ebIgnore
)
1985 Flags
|= MachineInstr::NoFPExcept
;
1987 SmallVector
<llvm::SrcOp
, 4> VRegs
;
1988 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(0)));
1989 if (!FPI
.isUnaryOp())
1990 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(1)));
1991 if (FPI
.isTernaryOp())
1992 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(2)));
1994 MIRBuilder
.buildInstr(Opcode
, {getOrCreateVReg(FPI
)}, VRegs
, Flags
);
1998 std::optional
<MCRegister
> IRTranslator::getArgPhysReg(Argument
&Arg
) {
1999 auto VRegs
= getOrCreateVRegs(Arg
);
2000 if (VRegs
.size() != 1)
2001 return std::nullopt
;
2003 // Arguments are lowered as a copy of a livein physical register.
2004 auto *VRegDef
= MF
->getRegInfo().getVRegDef(VRegs
[0]);
2005 if (!VRegDef
|| !VRegDef
->isCopy())
2006 return std::nullopt
;
2007 return VRegDef
->getOperand(1).getReg().asMCReg();
2010 bool IRTranslator::translateIfEntryValueArgument(bool isDeclare
, Value
*Val
,
2011 const DILocalVariable
*Var
,
2012 const DIExpression
*Expr
,
2014 MachineIRBuilder
&MIRBuilder
) {
2015 auto *Arg
= dyn_cast
<Argument
>(Val
);
2019 if (!Expr
->isEntryValue())
2022 std::optional
<MCRegister
> PhysReg
= getArgPhysReg(*Arg
);
2024 LLVM_DEBUG(dbgs() << "Dropping dbg." << (isDeclare
? "declare" : "value")
2025 << ": expression is entry_value but "
2026 << "couldn't find a physical register\n");
2027 LLVM_DEBUG(dbgs() << *Var
<< "\n");
2032 // Append an op deref to account for the fact that this is a dbg_declare.
2033 Expr
= DIExpression::append(Expr
, dwarf::DW_OP_deref
);
2034 MF
->setVariableDbgInfo(Var
, Expr
, *PhysReg
, DL
);
2036 MIRBuilder
.buildDirectDbgValue(*PhysReg
, Var
, Expr
);
2042 bool IRTranslator::translateKnownIntrinsic(const CallInst
&CI
, Intrinsic::ID ID
,
2043 MachineIRBuilder
&MIRBuilder
) {
2044 if (auto *MI
= dyn_cast
<AnyMemIntrinsic
>(&CI
)) {
2045 if (ORE
->enabled()) {
2046 if (MemoryOpRemark::canHandle(MI
, *LibInfo
)) {
2047 MemoryOpRemark
R(*ORE
, "gisel-irtranslator-memsize", *DL
, *LibInfo
);
2053 // If this is a simple intrinsic (that is, we just need to add a def of
2054 // a vreg, and uses for each arg operand, then translate it.
2055 if (translateSimpleIntrinsic(CI
, ID
, MIRBuilder
))
2061 case Intrinsic::lifetime_start
:
2062 case Intrinsic::lifetime_end
: {
2063 // No stack colouring in O0, discard region information.
2064 if (MF
->getTarget().getOptLevel() == CodeGenOptLevel::None
)
2067 unsigned Op
= ID
== Intrinsic::lifetime_start
? TargetOpcode::LIFETIME_START
2068 : TargetOpcode::LIFETIME_END
;
2070 // Get the underlying objects for the location passed on the lifetime
2072 SmallVector
<const Value
*, 4> Allocas
;
2073 getUnderlyingObjects(CI
.getArgOperand(1), Allocas
);
2075 // Iterate over each underlying object, creating lifetime markers for each
2076 // static alloca. Quit if we find a non-static alloca.
2077 for (const Value
*V
: Allocas
) {
2078 const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
);
2082 if (!AI
->isStaticAlloca())
2085 MIRBuilder
.buildInstr(Op
).addFrameIndex(getOrCreateFrameIndex(*AI
));
2089 case Intrinsic::dbg_declare
: {
2090 const DbgDeclareInst
&DI
= cast
<DbgDeclareInst
>(CI
);
2091 assert(DI
.getVariable() && "Missing variable");
2092 translateDbgDeclareRecord(DI
.getAddress(), DI
.hasArgList(), DI
.getVariable(),
2093 DI
.getExpression(), DI
.getDebugLoc(), MIRBuilder
);
2096 case Intrinsic::dbg_label
: {
2097 const DbgLabelInst
&DI
= cast
<DbgLabelInst
>(CI
);
2098 assert(DI
.getLabel() && "Missing label");
2100 assert(DI
.getLabel()->isValidLocationForIntrinsic(
2101 MIRBuilder
.getDebugLoc()) &&
2102 "Expected inlined-at fields to agree");
2104 MIRBuilder
.buildDbgLabel(DI
.getLabel());
2107 case Intrinsic::vaend
:
2108 // No target I know of cares about va_end. Certainly no in-tree target
2109 // does. Simplest intrinsic ever!
2111 case Intrinsic::vastart
: {
2112 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2113 Value
*Ptr
= CI
.getArgOperand(0);
2114 unsigned ListSize
= TLI
.getVaListSizeInBits(*DL
) / 8;
2115 Align Alignment
= getKnownAlignment(Ptr
, *DL
);
2117 MIRBuilder
.buildInstr(TargetOpcode::G_VASTART
, {}, {getOrCreateVReg(*Ptr
)})
2118 .addMemOperand(MF
->getMachineMemOperand(MachinePointerInfo(Ptr
),
2119 MachineMemOperand::MOStore
,
2120 ListSize
, Alignment
));
2123 case Intrinsic::dbg_value
: {
2124 // This form of DBG_VALUE is target-independent.
2125 const DbgValueInst
&DI
= cast
<DbgValueInst
>(CI
);
2126 translateDbgValueRecord(DI
.getValue(), DI
.hasArgList(), DI
.getVariable(),
2127 DI
.getExpression(), DI
.getDebugLoc(), MIRBuilder
);
2130 case Intrinsic::uadd_with_overflow
:
2131 return translateOverflowIntrinsic(CI
, TargetOpcode::G_UADDO
, MIRBuilder
);
2132 case Intrinsic::sadd_with_overflow
:
2133 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SADDO
, MIRBuilder
);
2134 case Intrinsic::usub_with_overflow
:
2135 return translateOverflowIntrinsic(CI
, TargetOpcode::G_USUBO
, MIRBuilder
);
2136 case Intrinsic::ssub_with_overflow
:
2137 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SSUBO
, MIRBuilder
);
2138 case Intrinsic::umul_with_overflow
:
2139 return translateOverflowIntrinsic(CI
, TargetOpcode::G_UMULO
, MIRBuilder
);
2140 case Intrinsic::smul_with_overflow
:
2141 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SMULO
, MIRBuilder
);
2142 case Intrinsic::uadd_sat
:
2143 return translateBinaryOp(TargetOpcode::G_UADDSAT
, CI
, MIRBuilder
);
2144 case Intrinsic::sadd_sat
:
2145 return translateBinaryOp(TargetOpcode::G_SADDSAT
, CI
, MIRBuilder
);
2146 case Intrinsic::usub_sat
:
2147 return translateBinaryOp(TargetOpcode::G_USUBSAT
, CI
, MIRBuilder
);
2148 case Intrinsic::ssub_sat
:
2149 return translateBinaryOp(TargetOpcode::G_SSUBSAT
, CI
, MIRBuilder
);
2150 case Intrinsic::ushl_sat
:
2151 return translateBinaryOp(TargetOpcode::G_USHLSAT
, CI
, MIRBuilder
);
2152 case Intrinsic::sshl_sat
:
2153 return translateBinaryOp(TargetOpcode::G_SSHLSAT
, CI
, MIRBuilder
);
2154 case Intrinsic::umin
:
2155 return translateBinaryOp(TargetOpcode::G_UMIN
, CI
, MIRBuilder
);
2156 case Intrinsic::umax
:
2157 return translateBinaryOp(TargetOpcode::G_UMAX
, CI
, MIRBuilder
);
2158 case Intrinsic::smin
:
2159 return translateBinaryOp(TargetOpcode::G_SMIN
, CI
, MIRBuilder
);
2160 case Intrinsic::smax
:
2161 return translateBinaryOp(TargetOpcode::G_SMAX
, CI
, MIRBuilder
);
2162 case Intrinsic::abs
:
2163 // TODO: Preserve "int min is poison" arg in GMIR?
2164 return translateUnaryOp(TargetOpcode::G_ABS
, CI
, MIRBuilder
);
2165 case Intrinsic::smul_fix
:
2166 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX
, CI
, MIRBuilder
);
2167 case Intrinsic::umul_fix
:
2168 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX
, CI
, MIRBuilder
);
2169 case Intrinsic::smul_fix_sat
:
2170 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT
, CI
, MIRBuilder
);
2171 case Intrinsic::umul_fix_sat
:
2172 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT
, CI
, MIRBuilder
);
2173 case Intrinsic::sdiv_fix
:
2174 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX
, CI
, MIRBuilder
);
2175 case Intrinsic::udiv_fix
:
2176 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX
, CI
, MIRBuilder
);
2177 case Intrinsic::sdiv_fix_sat
:
2178 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT
, CI
, MIRBuilder
);
2179 case Intrinsic::udiv_fix_sat
:
2180 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT
, CI
, MIRBuilder
);
2181 case Intrinsic::fmuladd
: {
2182 const TargetMachine
&TM
= MF
->getTarget();
2183 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
2184 Register Dst
= getOrCreateVReg(CI
);
2185 Register Op0
= getOrCreateVReg(*CI
.getArgOperand(0));
2186 Register Op1
= getOrCreateVReg(*CI
.getArgOperand(1));
2187 Register Op2
= getOrCreateVReg(*CI
.getArgOperand(2));
2188 if (TM
.Options
.AllowFPOpFusion
!= FPOpFusion::Strict
&&
2189 TLI
.isFMAFasterThanFMulAndFAdd(*MF
,
2190 TLI
.getValueType(*DL
, CI
.getType()))) {
2191 // TODO: Revisit this to see if we should move this part of the
2192 // lowering to the combiner.
2193 MIRBuilder
.buildFMA(Dst
, Op0
, Op1
, Op2
,
2194 MachineInstr::copyFlagsFromInstruction(CI
));
2196 LLT Ty
= getLLTForType(*CI
.getType(), *DL
);
2197 auto FMul
= MIRBuilder
.buildFMul(
2198 Ty
, Op0
, Op1
, MachineInstr::copyFlagsFromInstruction(CI
));
2199 MIRBuilder
.buildFAdd(Dst
, FMul
, Op2
,
2200 MachineInstr::copyFlagsFromInstruction(CI
));
2204 case Intrinsic::convert_from_fp16
:
2205 // FIXME: This intrinsic should probably be removed from the IR.
2206 MIRBuilder
.buildFPExt(getOrCreateVReg(CI
),
2207 getOrCreateVReg(*CI
.getArgOperand(0)),
2208 MachineInstr::copyFlagsFromInstruction(CI
));
2210 case Intrinsic::convert_to_fp16
:
2211 // FIXME: This intrinsic should probably be removed from the IR.
2212 MIRBuilder
.buildFPTrunc(getOrCreateVReg(CI
),
2213 getOrCreateVReg(*CI
.getArgOperand(0)),
2214 MachineInstr::copyFlagsFromInstruction(CI
));
2216 case Intrinsic::frexp
: {
2217 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(CI
);
2218 MIRBuilder
.buildFFrexp(VRegs
[0], VRegs
[1],
2219 getOrCreateVReg(*CI
.getArgOperand(0)),
2220 MachineInstr::copyFlagsFromInstruction(CI
));
2223 case Intrinsic::memcpy_inline
:
2224 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMCPY_INLINE
);
2225 case Intrinsic::memcpy
:
2226 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMCPY
);
2227 case Intrinsic::memmove
:
2228 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMMOVE
);
2229 case Intrinsic::memset
:
2230 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMSET
);
2231 case Intrinsic::eh_typeid_for
: {
2232 GlobalValue
*GV
= ExtractTypeInfo(CI
.getArgOperand(0));
2233 Register Reg
= getOrCreateVReg(CI
);
2234 unsigned TypeID
= MF
->getTypeIDFor(GV
);
2235 MIRBuilder
.buildConstant(Reg
, TypeID
);
2238 case Intrinsic::objectsize
:
2239 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2241 case Intrinsic::is_constant
:
2242 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2244 case Intrinsic::stackguard
:
2245 getStackGuard(getOrCreateVReg(CI
), MIRBuilder
);
2247 case Intrinsic::stackprotector
: {
2248 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
2249 LLT PtrTy
= getLLTForType(*CI
.getArgOperand(0)->getType(), *DL
);
2251 if (TLI
.useLoadStackGuardNode()) {
2252 GuardVal
= MRI
->createGenericVirtualRegister(PtrTy
);
2253 getStackGuard(GuardVal
, MIRBuilder
);
2255 GuardVal
= getOrCreateVReg(*CI
.getArgOperand(0)); // The guard's value.
2257 AllocaInst
*Slot
= cast
<AllocaInst
>(CI
.getArgOperand(1));
2258 int FI
= getOrCreateFrameIndex(*Slot
);
2259 MF
->getFrameInfo().setStackProtectorIndex(FI
);
2261 MIRBuilder
.buildStore(
2262 GuardVal
, getOrCreateVReg(*Slot
),
2263 *MF
->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF
, FI
),
2264 MachineMemOperand::MOStore
|
2265 MachineMemOperand::MOVolatile
,
2269 case Intrinsic::stacksave
: {
2270 MIRBuilder
.buildInstr(TargetOpcode::G_STACKSAVE
, {getOrCreateVReg(CI
)}, {});
2273 case Intrinsic::stackrestore
: {
2274 MIRBuilder
.buildInstr(TargetOpcode::G_STACKRESTORE
, {},
2275 {getOrCreateVReg(*CI
.getArgOperand(0))});
2278 case Intrinsic::cttz
:
2279 case Intrinsic::ctlz
: {
2280 ConstantInt
*Cst
= cast
<ConstantInt
>(CI
.getArgOperand(1));
2281 bool isTrailing
= ID
== Intrinsic::cttz
;
2282 unsigned Opcode
= isTrailing
2283 ? Cst
->isZero() ? TargetOpcode::G_CTTZ
2284 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2285 : Cst
->isZero() ? TargetOpcode::G_CTLZ
2286 : TargetOpcode::G_CTLZ_ZERO_UNDEF
;
2287 MIRBuilder
.buildInstr(Opcode
, {getOrCreateVReg(CI
)},
2288 {getOrCreateVReg(*CI
.getArgOperand(0))});
2291 case Intrinsic::invariant_start
: {
2292 LLT PtrTy
= getLLTForType(*CI
.getArgOperand(0)->getType(), *DL
);
2293 Register Undef
= MRI
->createGenericVirtualRegister(PtrTy
);
2294 MIRBuilder
.buildUndef(Undef
);
2297 case Intrinsic::invariant_end
:
2299 case Intrinsic::expect
:
2300 case Intrinsic::annotation
:
2301 case Intrinsic::ptr_annotation
:
2302 case Intrinsic::launder_invariant_group
:
2303 case Intrinsic::strip_invariant_group
: {
2304 // Drop the intrinsic, but forward the value.
2305 MIRBuilder
.buildCopy(getOrCreateVReg(CI
),
2306 getOrCreateVReg(*CI
.getArgOperand(0)));
2309 case Intrinsic::assume
:
2310 case Intrinsic::experimental_noalias_scope_decl
:
2311 case Intrinsic::var_annotation
:
2312 case Intrinsic::sideeffect
:
2313 // Discard annotate attributes, assumptions, and artificial side-effects.
2315 case Intrinsic::read_volatile_register
:
2316 case Intrinsic::read_register
: {
2317 Value
*Arg
= CI
.getArgOperand(0);
2319 .buildInstr(TargetOpcode::G_READ_REGISTER
, {getOrCreateVReg(CI
)}, {})
2320 .addMetadata(cast
<MDNode
>(cast
<MetadataAsValue
>(Arg
)->getMetadata()));
2323 case Intrinsic::write_register
: {
2324 Value
*Arg
= CI
.getArgOperand(0);
2325 MIRBuilder
.buildInstr(TargetOpcode::G_WRITE_REGISTER
)
2326 .addMetadata(cast
<MDNode
>(cast
<MetadataAsValue
>(Arg
)->getMetadata()))
2327 .addUse(getOrCreateVReg(*CI
.getArgOperand(1)));
2330 case Intrinsic::localescape
: {
2331 MachineBasicBlock
&EntryMBB
= MF
->front();
2332 StringRef EscapedName
= GlobalValue::dropLLVMManglingEscape(MF
->getName());
2334 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2335 // is the same on all targets.
2336 for (unsigned Idx
= 0, E
= CI
.arg_size(); Idx
< E
; ++Idx
) {
2337 Value
*Arg
= CI
.getArgOperand(Idx
)->stripPointerCasts();
2338 if (isa
<ConstantPointerNull
>(Arg
))
2339 continue; // Skip null pointers. They represent a hole in index space.
2341 int FI
= getOrCreateFrameIndex(*cast
<AllocaInst
>(Arg
));
2342 MCSymbol
*FrameAllocSym
=
2343 MF
->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName
,
2346 // This should be inserted at the start of the entry block.
2348 MIRBuilder
.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE
)
2349 .addSym(FrameAllocSym
)
2352 EntryMBB
.insert(EntryMBB
.begin(), LocalEscape
);
2357 case Intrinsic::vector_reduce_fadd
:
2358 case Intrinsic::vector_reduce_fmul
: {
2359 // Need to check for the reassoc flag to decide whether we want a
2360 // sequential reduction opcode or not.
2361 Register Dst
= getOrCreateVReg(CI
);
2362 Register ScalarSrc
= getOrCreateVReg(*CI
.getArgOperand(0));
2363 Register VecSrc
= getOrCreateVReg(*CI
.getArgOperand(1));
2365 if (!CI
.hasAllowReassoc()) {
2366 // The sequential ordering case.
2367 Opc
= ID
== Intrinsic::vector_reduce_fadd
2368 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2369 : TargetOpcode::G_VECREDUCE_SEQ_FMUL
;
2370 MIRBuilder
.buildInstr(Opc
, {Dst
}, {ScalarSrc
, VecSrc
},
2371 MachineInstr::copyFlagsFromInstruction(CI
));
2374 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2375 // since the associativity doesn't matter.
2377 if (ID
== Intrinsic::vector_reduce_fadd
) {
2378 Opc
= TargetOpcode::G_VECREDUCE_FADD
;
2379 ScalarOpc
= TargetOpcode::G_FADD
;
2381 Opc
= TargetOpcode::G_VECREDUCE_FMUL
;
2382 ScalarOpc
= TargetOpcode::G_FMUL
;
2384 LLT DstTy
= MRI
->getType(Dst
);
2385 auto Rdx
= MIRBuilder
.buildInstr(
2386 Opc
, {DstTy
}, {VecSrc
}, MachineInstr::copyFlagsFromInstruction(CI
));
2387 MIRBuilder
.buildInstr(ScalarOpc
, {Dst
}, {ScalarSrc
, Rdx
},
2388 MachineInstr::copyFlagsFromInstruction(CI
));
2392 case Intrinsic::trap
:
2393 case Intrinsic::debugtrap
:
2394 case Intrinsic::ubsantrap
: {
2395 StringRef TrapFuncName
=
2396 CI
.getAttributes().getFnAttr("trap-func-name").getValueAsString();
2397 if (TrapFuncName
.empty())
2398 break; // Use the default handling.
2399 CallLowering::CallLoweringInfo Info
;
2400 if (ID
== Intrinsic::ubsantrap
) {
2401 Info
.OrigArgs
.push_back({getOrCreateVRegs(*CI
.getArgOperand(0)),
2402 CI
.getArgOperand(0)->getType(), 0});
2404 Info
.Callee
= MachineOperand::CreateES(TrapFuncName
.data());
2406 Info
.OrigRet
= {Register(), Type::getVoidTy(CI
.getContext()), 0};
2407 return CLI
->lowerCall(MIRBuilder
, Info
);
2409 case Intrinsic::amdgcn_cs_chain
:
2410 return translateCallBase(CI
, MIRBuilder
);
2411 case Intrinsic::fptrunc_round
: {
2412 uint32_t Flags
= MachineInstr::copyFlagsFromInstruction(CI
);
2414 // Convert the metadata argument to a constant integer
2415 Metadata
*MD
= cast
<MetadataAsValue
>(CI
.getArgOperand(1))->getMetadata();
2416 std::optional
<RoundingMode
> RoundMode
=
2417 convertStrToRoundingMode(cast
<MDString
>(MD
)->getString());
2419 // Add the Rounding mode as an integer
2421 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND
,
2422 {getOrCreateVReg(CI
)},
2423 {getOrCreateVReg(*CI
.getArgOperand(0))}, Flags
)
2424 .addImm((int)*RoundMode
);
2428 case Intrinsic::is_fpclass
: {
2429 Value
*FpValue
= CI
.getOperand(0);
2430 ConstantInt
*TestMaskValue
= cast
<ConstantInt
>(CI
.getOperand(1));
2433 .buildInstr(TargetOpcode::G_IS_FPCLASS
, {getOrCreateVReg(CI
)},
2434 {getOrCreateVReg(*FpValue
)})
2435 .addImm(TestMaskValue
->getZExtValue());
2439 case Intrinsic::set_fpenv
: {
2440 Value
*FPEnv
= CI
.getOperand(0);
2441 MIRBuilder
.buildInstr(TargetOpcode::G_SET_FPENV
, {},
2442 {getOrCreateVReg(*FPEnv
)});
2445 case Intrinsic::reset_fpenv
: {
2446 MIRBuilder
.buildInstr(TargetOpcode::G_RESET_FPENV
, {}, {});
2449 case Intrinsic::set_fpmode
: {
2450 Value
*FPState
= CI
.getOperand(0);
2451 MIRBuilder
.buildInstr(TargetOpcode::G_SET_FPMODE
, {},
2452 { getOrCreateVReg(*FPState
) });
2455 case Intrinsic::reset_fpmode
: {
2456 MIRBuilder
.buildInstr(TargetOpcode::G_RESET_FPMODE
, {}, {});
2459 case Intrinsic::prefetch
: {
2460 Value
*Addr
= CI
.getOperand(0);
2461 unsigned RW
= cast
<ConstantInt
>(CI
.getOperand(1))->getZExtValue();
2462 unsigned Locality
= cast
<ConstantInt
>(CI
.getOperand(2))->getZExtValue();
2463 unsigned CacheType
= cast
<ConstantInt
>(CI
.getOperand(3))->getZExtValue();
2465 auto Flags
= RW
? MachineMemOperand::MOStore
: MachineMemOperand::MOLoad
;
2466 auto &MMO
= *MF
->getMachineMemOperand(MachinePointerInfo(Addr
), Flags
,
2469 MIRBuilder
.buildPrefetch(getOrCreateVReg(*Addr
), RW
, Locality
, CacheType
,
2474 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2475 case Intrinsic::INTRINSIC:
2476 #include "llvm/IR/ConstrainedOps.def"
2477 return translateConstrainedFPIntrinsic(cast
<ConstrainedFPIntrinsic
>(CI
),
2484 bool IRTranslator::translateInlineAsm(const CallBase
&CB
,
2485 MachineIRBuilder
&MIRBuilder
) {
2487 const InlineAsmLowering
*ALI
= MF
->getSubtarget().getInlineAsmLowering();
2491 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2495 return ALI
->lowerInlineAsm(
2496 MIRBuilder
, CB
, [&](const Value
&Val
) { return getOrCreateVRegs(Val
); });
2499 bool IRTranslator::translateCallBase(const CallBase
&CB
,
2500 MachineIRBuilder
&MIRBuilder
) {
2501 ArrayRef
<Register
> Res
= getOrCreateVRegs(CB
);
2503 SmallVector
<ArrayRef
<Register
>, 8> Args
;
2504 Register SwiftInVReg
= 0;
2505 Register SwiftErrorVReg
= 0;
2506 for (const auto &Arg
: CB
.args()) {
2507 if (CLI
->supportSwiftError() && isSwiftError(Arg
)) {
2508 assert(SwiftInVReg
== 0 && "Expected only one swift error argument");
2509 LLT Ty
= getLLTForType(*Arg
->getType(), *DL
);
2510 SwiftInVReg
= MRI
->createGenericVirtualRegister(Ty
);
2511 MIRBuilder
.buildCopy(SwiftInVReg
, SwiftError
.getOrCreateVRegUseAt(
2512 &CB
, &MIRBuilder
.getMBB(), Arg
));
2513 Args
.emplace_back(ArrayRef(SwiftInVReg
));
2515 SwiftError
.getOrCreateVRegDefAt(&CB
, &MIRBuilder
.getMBB(), Arg
);
2518 Args
.push_back(getOrCreateVRegs(*Arg
));
2521 if (auto *CI
= dyn_cast
<CallInst
>(&CB
)) {
2522 if (ORE
->enabled()) {
2523 if (MemoryOpRemark::canHandle(CI
, *LibInfo
)) {
2524 MemoryOpRemark
R(*ORE
, "gisel-irtranslator-memsize", *DL
, *LibInfo
);
2530 // We don't set HasCalls on MFI here yet because call lowering may decide to
2531 // optimize into tail calls. Instead, we defer that to selection where a final
2532 // scan is done to check if any instructions are calls.
2534 CLI
->lowerCall(MIRBuilder
, CB
, Res
, Args
, SwiftErrorVReg
,
2535 [&]() { return getOrCreateVReg(*CB
.getCalledOperand()); });
2537 // Check if we just inserted a tail call.
2539 assert(!HasTailCall
&& "Can't tail call return twice from block?");
2540 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
2541 HasTailCall
= TII
->isTailCall(*std::prev(MIRBuilder
.getInsertPt()));
2547 bool IRTranslator::translateCall(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2548 const CallInst
&CI
= cast
<CallInst
>(U
);
2549 auto TII
= MF
->getTarget().getIntrinsicInfo();
2550 const Function
*F
= CI
.getCalledFunction();
2552 // FIXME: support Windows dllimport function calls and calls through
2554 if (F
&& (F
->hasDLLImportStorageClass() ||
2555 (MF
->getTarget().getTargetTriple().isOSWindows() &&
2556 F
->hasExternalWeakLinkage())))
2559 // FIXME: support control flow guard targets.
2560 if (CI
.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget
))
2563 // FIXME: support statepoints and related.
2564 if (isa
<GCStatepointInst
, GCRelocateInst
, GCResultInst
>(U
))
2567 if (CI
.isInlineAsm())
2568 return translateInlineAsm(CI
, MIRBuilder
);
2570 diagnoseDontCall(CI
);
2572 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
2573 if (F
&& F
->isIntrinsic()) {
2574 ID
= F
->getIntrinsicID();
2575 if (TII
&& ID
== Intrinsic::not_intrinsic
)
2576 ID
= static_cast<Intrinsic::ID
>(TII
->getIntrinsicID(F
));
2579 if (!F
|| !F
->isIntrinsic() || ID
== Intrinsic::not_intrinsic
)
2580 return translateCallBase(CI
, MIRBuilder
);
2582 assert(ID
!= Intrinsic::not_intrinsic
&& "unknown intrinsic");
2584 if (translateKnownIntrinsic(CI
, ID
, MIRBuilder
))
2587 ArrayRef
<Register
> ResultRegs
;
2588 if (!CI
.getType()->isVoidTy())
2589 ResultRegs
= getOrCreateVRegs(CI
);
2591 // Ignore the callsite attributes. Backend code is most likely not expecting
2592 // an intrinsic to sometimes have side effects and sometimes not.
2593 MachineInstrBuilder MIB
= MIRBuilder
.buildIntrinsic(ID
, ResultRegs
);
2594 if (isa
<FPMathOperator
>(CI
))
2595 MIB
->copyIRFlags(CI
);
2597 for (const auto &Arg
: enumerate(CI
.args())) {
2598 // If this is required to be an immediate, don't materialize it in a
2600 if (CI
.paramHasAttr(Arg
.index(), Attribute::ImmArg
)) {
2601 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Arg
.value())) {
2602 // imm arguments are more convenient than cimm (and realistically
2603 // probably sufficient), so use them.
2604 assert(CI
->getBitWidth() <= 64 &&
2605 "large intrinsic immediates not handled");
2606 MIB
.addImm(CI
->getSExtValue());
2608 MIB
.addFPImm(cast
<ConstantFP
>(Arg
.value()));
2610 } else if (auto *MDVal
= dyn_cast
<MetadataAsValue
>(Arg
.value())) {
2611 auto *MD
= MDVal
->getMetadata();
2612 auto *MDN
= dyn_cast
<MDNode
>(MD
);
2614 if (auto *ConstMD
= dyn_cast
<ConstantAsMetadata
>(MD
))
2615 MDN
= MDNode::get(MF
->getFunction().getContext(), ConstMD
);
2616 else // This was probably an MDString.
2619 MIB
.addMetadata(MDN
);
2621 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(*Arg
.value());
2622 if (VRegs
.size() > 1)
2624 MIB
.addUse(VRegs
[0]);
2628 // Add a MachineMemOperand if it is a target mem intrinsic.
2629 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
2630 TargetLowering::IntrinsicInfo Info
;
2631 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2632 if (TLI
.getTgtMemIntrinsic(Info
, CI
, *MF
, ID
)) {
2633 Align Alignment
= Info
.align
.value_or(
2634 DL
->getABITypeAlign(Info
.memVT
.getTypeForEVT(F
->getContext())));
2635 LLT MemTy
= Info
.memVT
.isSimple()
2636 ? getLLTForMVT(Info
.memVT
.getSimpleVT())
2637 : LLT::scalar(Info
.memVT
.getStoreSizeInBits());
2639 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2640 // didn't yield anything useful.
2641 MachinePointerInfo MPI
;
2643 MPI
= MachinePointerInfo(Info
.ptrVal
, Info
.offset
);
2644 else if (Info
.fallbackAddressSpace
)
2645 MPI
= MachinePointerInfo(*Info
.fallbackAddressSpace
);
2647 MF
->getMachineMemOperand(MPI
, Info
.flags
, MemTy
, Alignment
, CI
.getAAMetadata()));
2653 bool IRTranslator::findUnwindDestinations(
2654 const BasicBlock
*EHPadBB
,
2655 BranchProbability Prob
,
2656 SmallVectorImpl
<std::pair
<MachineBasicBlock
*, BranchProbability
>>
2658 EHPersonality Personality
= classifyEHPersonality(
2659 EHPadBB
->getParent()->getFunction().getPersonalityFn());
2660 bool IsMSVCCXX
= Personality
== EHPersonality::MSVC_CXX
;
2661 bool IsCoreCLR
= Personality
== EHPersonality::CoreCLR
;
2662 bool IsWasmCXX
= Personality
== EHPersonality::Wasm_CXX
;
2663 bool IsSEH
= isAsynchronousEHPersonality(Personality
);
2666 // Ignore this for now.
2671 const Instruction
*Pad
= EHPadBB
->getFirstNonPHI();
2672 BasicBlock
*NewEHPadBB
= nullptr;
2673 if (isa
<LandingPadInst
>(Pad
)) {
2674 // Stop on landingpads. They are not funclets.
2675 UnwindDests
.emplace_back(&getMBB(*EHPadBB
), Prob
);
2678 if (isa
<CleanupPadInst
>(Pad
)) {
2679 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2681 UnwindDests
.emplace_back(&getMBB(*EHPadBB
), Prob
);
2682 UnwindDests
.back().first
->setIsEHScopeEntry();
2683 UnwindDests
.back().first
->setIsEHFuncletEntry();
2686 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(Pad
)) {
2687 // Add the catchpad handlers to the possible destinations.
2688 for (const BasicBlock
*CatchPadBB
: CatchSwitch
->handlers()) {
2689 UnwindDests
.emplace_back(&getMBB(*CatchPadBB
), Prob
);
2690 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2691 if (IsMSVCCXX
|| IsCoreCLR
)
2692 UnwindDests
.back().first
->setIsEHFuncletEntry();
2694 UnwindDests
.back().first
->setIsEHScopeEntry();
2696 NewEHPadBB
= CatchSwitch
->getUnwindDest();
2701 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2702 if (BPI
&& NewEHPadBB
)
2703 Prob
*= BPI
->getEdgeProbability(EHPadBB
, NewEHPadBB
);
2704 EHPadBB
= NewEHPadBB
;
2709 bool IRTranslator::translateInvoke(const User
&U
,
2710 MachineIRBuilder
&MIRBuilder
) {
2711 const InvokeInst
&I
= cast
<InvokeInst
>(U
);
2712 MCContext
&Context
= MF
->getContext();
2714 const BasicBlock
*ReturnBB
= I
.getSuccessor(0);
2715 const BasicBlock
*EHPadBB
= I
.getSuccessor(1);
2717 const Function
*Fn
= I
.getCalledFunction();
2719 // FIXME: support invoking patchpoint and statepoint intrinsics.
2720 if (Fn
&& Fn
->isIntrinsic())
2723 // FIXME: support whatever these are.
2724 if (I
.countOperandBundlesOfType(LLVMContext::OB_deopt
))
2727 // FIXME: support control flow guard targets.
2728 if (I
.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget
))
2731 // FIXME: support Windows exception handling.
2732 if (!isa
<LandingPadInst
>(EHPadBB
->getFirstNonPHI()))
2735 // FIXME: support Windows dllimport function calls and calls through
2737 if (Fn
&& (Fn
->hasDLLImportStorageClass() ||
2738 (MF
->getTarget().getTargetTriple().isOSWindows() &&
2739 Fn
->hasExternalWeakLinkage())))
2742 bool LowerInlineAsm
= I
.isInlineAsm();
2743 bool NeedEHLabel
= true;
2745 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2746 // the region covered by the try.
2747 MCSymbol
*BeginSymbol
= nullptr;
2749 MIRBuilder
.buildInstr(TargetOpcode::G_INVOKE_REGION_START
);
2750 BeginSymbol
= Context
.createTempSymbol();
2751 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
).addSym(BeginSymbol
);
2754 if (LowerInlineAsm
) {
2755 if (!translateInlineAsm(I
, MIRBuilder
))
2757 } else if (!translateCallBase(I
, MIRBuilder
))
2760 MCSymbol
*EndSymbol
= nullptr;
2762 EndSymbol
= Context
.createTempSymbol();
2763 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
).addSym(EndSymbol
);
2766 SmallVector
<std::pair
<MachineBasicBlock
*, BranchProbability
>, 1> UnwindDests
;
2767 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2768 MachineBasicBlock
*InvokeMBB
= &MIRBuilder
.getMBB();
2769 BranchProbability EHPadBBProb
=
2770 BPI
? BPI
->getEdgeProbability(InvokeMBB
->getBasicBlock(), EHPadBB
)
2771 : BranchProbability::getZero();
2773 if (!findUnwindDestinations(EHPadBB
, EHPadBBProb
, UnwindDests
))
2776 MachineBasicBlock
&EHPadMBB
= getMBB(*EHPadBB
),
2777 &ReturnMBB
= getMBB(*ReturnBB
);
2778 // Update successor info.
2779 addSuccessorWithProb(InvokeMBB
, &ReturnMBB
);
2780 for (auto &UnwindDest
: UnwindDests
) {
2781 UnwindDest
.first
->setIsEHPad();
2782 addSuccessorWithProb(InvokeMBB
, UnwindDest
.first
, UnwindDest
.second
);
2784 InvokeMBB
->normalizeSuccProbs();
2787 assert(BeginSymbol
&& "Expected a begin symbol!");
2788 assert(EndSymbol
&& "Expected an end symbol!");
2789 MF
->addInvoke(&EHPadMBB
, BeginSymbol
, EndSymbol
);
2792 MIRBuilder
.buildBr(ReturnMBB
);
2796 bool IRTranslator::translateCallBr(const User
&U
,
2797 MachineIRBuilder
&MIRBuilder
) {
2798 // FIXME: Implement this.
2802 bool IRTranslator::translateLandingPad(const User
&U
,
2803 MachineIRBuilder
&MIRBuilder
) {
2804 const LandingPadInst
&LP
= cast
<LandingPadInst
>(U
);
2806 MachineBasicBlock
&MBB
= MIRBuilder
.getMBB();
2810 // If there aren't registers to copy the values into (e.g., during SjLj
2811 // exceptions), then don't bother.
2812 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2813 const Constant
*PersonalityFn
= MF
->getFunction().getPersonalityFn();
2814 if (TLI
.getExceptionPointerRegister(PersonalityFn
) == 0 &&
2815 TLI
.getExceptionSelectorRegister(PersonalityFn
) == 0)
2818 // If landingpad's return type is token type, we don't create DAG nodes
2819 // for its exception pointer and selector value. The extraction of exception
2820 // pointer or selector value from token type landingpads is not currently
2822 if (LP
.getType()->isTokenTy())
2825 // Add a label to mark the beginning of the landing pad. Deletion of the
2826 // landing pad can thus be detected via the MachineModuleInfo.
2827 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
)
2828 .addSym(MF
->addLandingPad(&MBB
));
2830 // If the unwinder does not preserve all registers, ensure that the
2831 // function marks the clobbered registers as used.
2832 const TargetRegisterInfo
&TRI
= *MF
->getSubtarget().getRegisterInfo();
2833 if (auto *RegMask
= TRI
.getCustomEHPadPreservedMask(*MF
))
2834 MF
->getRegInfo().addPhysRegsUsedFromRegMask(RegMask
);
2836 LLT Ty
= getLLTForType(*LP
.getType(), *DL
);
2837 Register Undef
= MRI
->createGenericVirtualRegister(Ty
);
2838 MIRBuilder
.buildUndef(Undef
);
2840 SmallVector
<LLT
, 2> Tys
;
2841 for (Type
*Ty
: cast
<StructType
>(LP
.getType())->elements())
2842 Tys
.push_back(getLLTForType(*Ty
, *DL
));
2843 assert(Tys
.size() == 2 && "Only two-valued landingpads are supported");
2845 // Mark exception register as live in.
2846 Register ExceptionReg
= TLI
.getExceptionPointerRegister(PersonalityFn
);
2850 MBB
.addLiveIn(ExceptionReg
);
2851 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(LP
);
2852 MIRBuilder
.buildCopy(ResRegs
[0], ExceptionReg
);
2854 Register SelectorReg
= TLI
.getExceptionSelectorRegister(PersonalityFn
);
2858 MBB
.addLiveIn(SelectorReg
);
2859 Register PtrVReg
= MRI
->createGenericVirtualRegister(Tys
[0]);
2860 MIRBuilder
.buildCopy(PtrVReg
, SelectorReg
);
2861 MIRBuilder
.buildCast(ResRegs
[1], PtrVReg
);
2866 bool IRTranslator::translateAlloca(const User
&U
,
2867 MachineIRBuilder
&MIRBuilder
) {
2868 auto &AI
= cast
<AllocaInst
>(U
);
2870 if (AI
.isSwiftError())
2873 if (AI
.isStaticAlloca()) {
2874 Register Res
= getOrCreateVReg(AI
);
2875 int FI
= getOrCreateFrameIndex(AI
);
2876 MIRBuilder
.buildFrameIndex(Res
, FI
);
2880 // FIXME: support stack probing for Windows.
2881 if (MF
->getTarget().getTargetTriple().isOSWindows())
2884 // Now we're in the harder dynamic case.
2885 Register NumElts
= getOrCreateVReg(*AI
.getArraySize());
2886 Type
*IntPtrIRTy
= DL
->getIntPtrType(AI
.getType());
2887 LLT IntPtrTy
= getLLTForType(*IntPtrIRTy
, *DL
);
2888 if (MRI
->getType(NumElts
) != IntPtrTy
) {
2889 Register ExtElts
= MRI
->createGenericVirtualRegister(IntPtrTy
);
2890 MIRBuilder
.buildZExtOrTrunc(ExtElts
, NumElts
);
2894 Type
*Ty
= AI
.getAllocatedType();
2896 Register AllocSize
= MRI
->createGenericVirtualRegister(IntPtrTy
);
2898 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy
, DL
->getTypeAllocSize(Ty
)));
2899 MIRBuilder
.buildMul(AllocSize
, NumElts
, TySize
);
2901 // Round the size of the allocation up to the stack alignment size
2902 // by add SA-1 to the size. This doesn't overflow because we're computing
2903 // an address inside an alloca.
2904 Align StackAlign
= MF
->getSubtarget().getFrameLowering()->getStackAlign();
2905 auto SAMinusOne
= MIRBuilder
.buildConstant(IntPtrTy
, StackAlign
.value() - 1);
2906 auto AllocAdd
= MIRBuilder
.buildAdd(IntPtrTy
, AllocSize
, SAMinusOne
,
2907 MachineInstr::NoUWrap
);
2909 MIRBuilder
.buildConstant(IntPtrTy
, ~(uint64_t)(StackAlign
.value() - 1));
2910 auto AlignedAlloc
= MIRBuilder
.buildAnd(IntPtrTy
, AllocAdd
, AlignCst
);
2912 Align Alignment
= std::max(AI
.getAlign(), DL
->getPrefTypeAlign(Ty
));
2913 if (Alignment
<= StackAlign
)
2914 Alignment
= Align(1);
2915 MIRBuilder
.buildDynStackAlloc(getOrCreateVReg(AI
), AlignedAlloc
, Alignment
);
2917 MF
->getFrameInfo().CreateVariableSizedObject(Alignment
, &AI
);
2918 assert(MF
->getFrameInfo().hasVarSizedObjects());
2922 bool IRTranslator::translateVAArg(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2923 // FIXME: We may need more info about the type. Because of how LLT works,
2924 // we're completely discarding the i64/double distinction here (amongst
2925 // others). Fortunately the ABIs I know of where that matters don't use va_arg
2926 // anyway but that's not guaranteed.
2927 MIRBuilder
.buildInstr(TargetOpcode::G_VAARG
, {getOrCreateVReg(U
)},
2928 {getOrCreateVReg(*U
.getOperand(0)),
2929 DL
->getABITypeAlign(U
.getType()).value()});
2933 bool IRTranslator::translateUnreachable(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2934 if (!MF
->getTarget().Options
.TrapUnreachable
)
2937 auto &UI
= cast
<UnreachableInst
>(U
);
2938 // We may be able to ignore unreachable behind a noreturn call.
2939 if (MF
->getTarget().Options
.NoTrapAfterNoreturn
) {
2940 const BasicBlock
&BB
= *UI
.getParent();
2941 if (&UI
!= &BB
.front()) {
2942 BasicBlock::const_iterator PredI
=
2943 std::prev(BasicBlock::const_iterator(UI
));
2944 if (const CallInst
*Call
= dyn_cast
<CallInst
>(&*PredI
)) {
2945 if (Call
->doesNotReturn())
2951 MIRBuilder
.buildIntrinsic(Intrinsic::trap
, ArrayRef
<Register
>());
2955 bool IRTranslator::translateInsertElement(const User
&U
,
2956 MachineIRBuilder
&MIRBuilder
) {
2957 // If it is a <1 x Ty> vector, use the scalar as it is
2958 // not a legal vector type in LLT.
2959 if (cast
<FixedVectorType
>(U
.getType())->getNumElements() == 1)
2960 return translateCopy(U
, *U
.getOperand(1), MIRBuilder
);
2962 Register Res
= getOrCreateVReg(U
);
2963 Register Val
= getOrCreateVReg(*U
.getOperand(0));
2964 Register Elt
= getOrCreateVReg(*U
.getOperand(1));
2965 Register Idx
= getOrCreateVReg(*U
.getOperand(2));
2966 MIRBuilder
.buildInsertVectorElement(Res
, Val
, Elt
, Idx
);
2970 bool IRTranslator::translateExtractElement(const User
&U
,
2971 MachineIRBuilder
&MIRBuilder
) {
2972 // If it is a <1 x Ty> vector, use the scalar as it is
2973 // not a legal vector type in LLT.
2974 if (cast
<FixedVectorType
>(U
.getOperand(0)->getType())->getNumElements() == 1)
2975 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
2977 Register Res
= getOrCreateVReg(U
);
2978 Register Val
= getOrCreateVReg(*U
.getOperand(0));
2979 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2980 unsigned PreferredVecIdxWidth
= TLI
.getVectorIdxTy(*DL
).getSizeInBits();
2982 if (auto *CI
= dyn_cast
<ConstantInt
>(U
.getOperand(1))) {
2983 if (CI
->getBitWidth() != PreferredVecIdxWidth
) {
2984 APInt NewIdx
= CI
->getValue().zextOrTrunc(PreferredVecIdxWidth
);
2985 auto *NewIdxCI
= ConstantInt::get(CI
->getContext(), NewIdx
);
2986 Idx
= getOrCreateVReg(*NewIdxCI
);
2990 Idx
= getOrCreateVReg(*U
.getOperand(1));
2991 if (MRI
->getType(Idx
).getSizeInBits() != PreferredVecIdxWidth
) {
2992 const LLT VecIdxTy
= LLT::scalar(PreferredVecIdxWidth
);
2993 Idx
= MIRBuilder
.buildZExtOrTrunc(VecIdxTy
, Idx
).getReg(0);
2995 MIRBuilder
.buildExtractVectorElement(Res
, Val
, Idx
);
2999 bool IRTranslator::translateShuffleVector(const User
&U
,
3000 MachineIRBuilder
&MIRBuilder
) {
3002 if (auto *SVI
= dyn_cast
<ShuffleVectorInst
>(&U
))
3003 Mask
= SVI
->getShuffleMask();
3005 Mask
= cast
<ConstantExpr
>(U
).getShuffleMask();
3006 ArrayRef
<int> MaskAlloc
= MF
->allocateShuffleMask(Mask
);
3008 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR
, {getOrCreateVReg(U
)},
3009 {getOrCreateVReg(*U
.getOperand(0)),
3010 getOrCreateVReg(*U
.getOperand(1))})
3011 .addShuffleMask(MaskAlloc
);
3015 bool IRTranslator::translatePHI(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
3016 const PHINode
&PI
= cast
<PHINode
>(U
);
3018 SmallVector
<MachineInstr
*, 4> Insts
;
3019 for (auto Reg
: getOrCreateVRegs(PI
)) {
3020 auto MIB
= MIRBuilder
.buildInstr(TargetOpcode::G_PHI
, {Reg
}, {});
3021 Insts
.push_back(MIB
.getInstr());
3024 PendingPHIs
.emplace_back(&PI
, std::move(Insts
));
3028 bool IRTranslator::translateAtomicCmpXchg(const User
&U
,
3029 MachineIRBuilder
&MIRBuilder
) {
3030 const AtomicCmpXchgInst
&I
= cast
<AtomicCmpXchgInst
>(U
);
3032 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
3033 auto Flags
= TLI
.getAtomicMemOperandFlags(I
, *DL
);
3035 auto Res
= getOrCreateVRegs(I
);
3036 Register OldValRes
= Res
[0];
3037 Register SuccessRes
= Res
[1];
3038 Register Addr
= getOrCreateVReg(*I
.getPointerOperand());
3039 Register Cmp
= getOrCreateVReg(*I
.getCompareOperand());
3040 Register NewVal
= getOrCreateVReg(*I
.getNewValOperand());
3042 MIRBuilder
.buildAtomicCmpXchgWithSuccess(
3043 OldValRes
, SuccessRes
, Addr
, Cmp
, NewVal
,
3044 *MF
->getMachineMemOperand(
3045 MachinePointerInfo(I
.getPointerOperand()), Flags
, MRI
->getType(Cmp
),
3046 getMemOpAlign(I
), I
.getAAMetadata(), nullptr, I
.getSyncScopeID(),
3047 I
.getSuccessOrdering(), I
.getFailureOrdering()));
3051 bool IRTranslator::translateAtomicRMW(const User
&U
,
3052 MachineIRBuilder
&MIRBuilder
) {
3053 const AtomicRMWInst
&I
= cast
<AtomicRMWInst
>(U
);
3054 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
3055 auto Flags
= TLI
.getAtomicMemOperandFlags(I
, *DL
);
3057 Register Res
= getOrCreateVReg(I
);
3058 Register Addr
= getOrCreateVReg(*I
.getPointerOperand());
3059 Register Val
= getOrCreateVReg(*I
.getValOperand());
3061 unsigned Opcode
= 0;
3062 switch (I
.getOperation()) {
3065 case AtomicRMWInst::Xchg
:
3066 Opcode
= TargetOpcode::G_ATOMICRMW_XCHG
;
3068 case AtomicRMWInst::Add
:
3069 Opcode
= TargetOpcode::G_ATOMICRMW_ADD
;
3071 case AtomicRMWInst::Sub
:
3072 Opcode
= TargetOpcode::G_ATOMICRMW_SUB
;
3074 case AtomicRMWInst::And
:
3075 Opcode
= TargetOpcode::G_ATOMICRMW_AND
;
3077 case AtomicRMWInst::Nand
:
3078 Opcode
= TargetOpcode::G_ATOMICRMW_NAND
;
3080 case AtomicRMWInst::Or
:
3081 Opcode
= TargetOpcode::G_ATOMICRMW_OR
;
3083 case AtomicRMWInst::Xor
:
3084 Opcode
= TargetOpcode::G_ATOMICRMW_XOR
;
3086 case AtomicRMWInst::Max
:
3087 Opcode
= TargetOpcode::G_ATOMICRMW_MAX
;
3089 case AtomicRMWInst::Min
:
3090 Opcode
= TargetOpcode::G_ATOMICRMW_MIN
;
3092 case AtomicRMWInst::UMax
:
3093 Opcode
= TargetOpcode::G_ATOMICRMW_UMAX
;
3095 case AtomicRMWInst::UMin
:
3096 Opcode
= TargetOpcode::G_ATOMICRMW_UMIN
;
3098 case AtomicRMWInst::FAdd
:
3099 Opcode
= TargetOpcode::G_ATOMICRMW_FADD
;
3101 case AtomicRMWInst::FSub
:
3102 Opcode
= TargetOpcode::G_ATOMICRMW_FSUB
;
3104 case AtomicRMWInst::FMax
:
3105 Opcode
= TargetOpcode::G_ATOMICRMW_FMAX
;
3107 case AtomicRMWInst::FMin
:
3108 Opcode
= TargetOpcode::G_ATOMICRMW_FMIN
;
3110 case AtomicRMWInst::UIncWrap
:
3111 Opcode
= TargetOpcode::G_ATOMICRMW_UINC_WRAP
;
3113 case AtomicRMWInst::UDecWrap
:
3114 Opcode
= TargetOpcode::G_ATOMICRMW_UDEC_WRAP
;
3118 MIRBuilder
.buildAtomicRMW(
3119 Opcode
, Res
, Addr
, Val
,
3120 *MF
->getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()),
3121 Flags
, MRI
->getType(Val
), getMemOpAlign(I
),
3122 I
.getAAMetadata(), nullptr, I
.getSyncScopeID(),
3127 bool IRTranslator::translateFence(const User
&U
,
3128 MachineIRBuilder
&MIRBuilder
) {
3129 const FenceInst
&Fence
= cast
<FenceInst
>(U
);
3130 MIRBuilder
.buildFence(static_cast<unsigned>(Fence
.getOrdering()),
3131 Fence
.getSyncScopeID());
3135 bool IRTranslator::translateFreeze(const User
&U
,
3136 MachineIRBuilder
&MIRBuilder
) {
3137 const ArrayRef
<Register
> DstRegs
= getOrCreateVRegs(U
);
3138 const ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*U
.getOperand(0));
3140 assert(DstRegs
.size() == SrcRegs
.size() &&
3141 "Freeze with different source and destination type?");
3143 for (unsigned I
= 0; I
< DstRegs
.size(); ++I
) {
3144 MIRBuilder
.buildFreeze(DstRegs
[I
], SrcRegs
[I
]);
3150 void IRTranslator::finishPendingPhis() {
3152 DILocationVerifier Verifier
;
3153 GISelObserverWrapper
WrapperObserver(&Verifier
);
3154 RAIIDelegateInstaller
DelInstall(*MF
, &WrapperObserver
);
3155 #endif // ifndef NDEBUG
3156 for (auto &Phi
: PendingPHIs
) {
3157 const PHINode
*PI
= Phi
.first
;
3158 if (PI
->getType()->isEmptyTy())
3160 ArrayRef
<MachineInstr
*> ComponentPHIs
= Phi
.second
;
3161 MachineBasicBlock
*PhiMBB
= ComponentPHIs
[0]->getParent();
3162 EntryBuilder
->setDebugLoc(PI
->getDebugLoc());
3164 Verifier
.setCurrentInst(PI
);
3165 #endif // ifndef NDEBUG
3167 SmallSet
<const MachineBasicBlock
*, 16> SeenPreds
;
3168 for (unsigned i
= 0; i
< PI
->getNumIncomingValues(); ++i
) {
3169 auto IRPred
= PI
->getIncomingBlock(i
);
3170 ArrayRef
<Register
> ValRegs
= getOrCreateVRegs(*PI
->getIncomingValue(i
));
3171 for (auto *Pred
: getMachinePredBBs({IRPred
, PI
->getParent()})) {
3172 if (SeenPreds
.count(Pred
) || !PhiMBB
->isPredecessor(Pred
))
3174 SeenPreds
.insert(Pred
);
3175 for (unsigned j
= 0; j
< ValRegs
.size(); ++j
) {
3176 MachineInstrBuilder
MIB(*MF
, ComponentPHIs
[j
]);
3177 MIB
.addUse(ValRegs
[j
]);
3185 void IRTranslator::translateDbgValueRecord(Value
*V
, bool HasArgList
,
3186 const DILocalVariable
*Variable
,
3187 const DIExpression
*Expression
,
3189 MachineIRBuilder
&MIRBuilder
) {
3190 assert(Variable
->isValidLocationForIntrinsic(DL
) &&
3191 "Expected inlined-at fields to agree");
3192 // Act as if we're handling a debug intrinsic.
3193 MIRBuilder
.setDebugLoc(DL
);
3195 if (!V
|| HasArgList
) {
3196 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
3197 // terminate any prior location.
3198 MIRBuilder
.buildIndirectDbgValue(0, Variable
, Expression
);
3202 if (const auto *CI
= dyn_cast
<Constant
>(V
)) {
3203 MIRBuilder
.buildConstDbgValue(*CI
, Variable
, Expression
);
3207 if (auto *AI
= dyn_cast
<AllocaInst
>(V
);
3208 AI
&& AI
->isStaticAlloca() && Expression
->startsWithDeref()) {
3209 // If the value is an alloca and the expression starts with a
3210 // dereference, track a stack slot instead of a register, as registers
3211 // may be clobbered.
3212 auto ExprOperands
= Expression
->getElements();
3213 auto *ExprDerefRemoved
=
3214 DIExpression::get(AI
->getContext(), ExprOperands
.drop_front());
3215 MIRBuilder
.buildFIDbgValue(getOrCreateFrameIndex(*AI
), Variable
,
3219 if (translateIfEntryValueArgument(false, V
, Variable
, Expression
, DL
,
3222 for (Register Reg
: getOrCreateVRegs(*V
)) {
3223 // FIXME: This does not handle register-indirect values at offset 0. The
3224 // direct/indirect thing shouldn't really be handled by something as
3225 // implicit as reg+noreg vs reg+imm in the first place, but it seems
3226 // pretty baked in right now.
3227 MIRBuilder
.buildDirectDbgValue(Reg
, Variable
, Expression
);
3232 void IRTranslator::translateDbgDeclareRecord(Value
*Address
, bool HasArgList
,
3233 const DILocalVariable
*Variable
,
3234 const DIExpression
*Expression
,
3236 MachineIRBuilder
&MIRBuilder
) {
3237 if (!Address
|| isa
<UndefValue
>(Address
)) {
3238 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *Variable
<< "\n");
3242 assert(Variable
->isValidLocationForIntrinsic(DL
) &&
3243 "Expected inlined-at fields to agree");
3244 auto AI
= dyn_cast
<AllocaInst
>(Address
);
3245 if (AI
&& AI
->isStaticAlloca()) {
3246 // Static allocas are tracked at the MF level, no need for DBG_VALUE
3247 // instructions (in fact, they get ignored if they *do* exist).
3248 MF
->setVariableDbgInfo(Variable
, Expression
,
3249 getOrCreateFrameIndex(*AI
), DL
);
3253 if (translateIfEntryValueArgument(true, Address
, Variable
,
3258 // A dbg.declare describes the address of a source variable, so lower it
3259 // into an indirect DBG_VALUE.
3260 MIRBuilder
.setDebugLoc(DL
);
3261 MIRBuilder
.buildIndirectDbgValue(getOrCreateVReg(*Address
),
3262 Variable
, Expression
);
3266 void IRTranslator::translateDbgInfo(const Instruction
&Inst
,
3267 MachineIRBuilder
&MIRBuilder
) {
3268 for (DPValue
&DPV
: Inst
.getDbgValueRange()) {
3269 const DILocalVariable
*Variable
= DPV
.getVariable();
3270 const DIExpression
*Expression
= DPV
.getExpression();
3271 Value
*V
= DPV
.getVariableLocationOp(0);
3272 if (DPV
.isDbgDeclare())
3273 translateDbgDeclareRecord(V
, DPV
.hasArgList(), Variable
,
3274 Expression
, DPV
.getDebugLoc(), MIRBuilder
);
3276 translateDbgValueRecord(V
, DPV
.hasArgList(), Variable
,
3277 Expression
, DPV
.getDebugLoc(), MIRBuilder
);
3281 bool IRTranslator::translate(const Instruction
&Inst
) {
3282 CurBuilder
->setDebugLoc(Inst
.getDebugLoc());
3283 CurBuilder
->setPCSections(Inst
.getMetadata(LLVMContext::MD_pcsections
));
3285 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
3286 if (TLI
.fallBackToDAGISel(Inst
))
3289 switch (Inst
.getOpcode()) {
3290 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3291 case Instruction::OPCODE: \
3292 return translate##OPCODE(Inst, *CurBuilder.get());
3293 #include "llvm/IR/Instruction.def"
3299 bool IRTranslator::translate(const Constant
&C
, Register Reg
) {
3300 // We only emit constants into the entry block from here. To prevent jumpy
3301 // debug behaviour remove debug line.
3302 if (auto CurrInstDL
= CurBuilder
->getDL())
3303 EntryBuilder
->setDebugLoc(DebugLoc());
3305 if (auto CI
= dyn_cast
<ConstantInt
>(&C
))
3306 EntryBuilder
->buildConstant(Reg
, *CI
);
3307 else if (auto CF
= dyn_cast
<ConstantFP
>(&C
))
3308 EntryBuilder
->buildFConstant(Reg
, *CF
);
3309 else if (isa
<UndefValue
>(C
))
3310 EntryBuilder
->buildUndef(Reg
);
3311 else if (isa
<ConstantPointerNull
>(C
))
3312 EntryBuilder
->buildConstant(Reg
, 0);
3313 else if (auto GV
= dyn_cast
<GlobalValue
>(&C
))
3314 EntryBuilder
->buildGlobalValue(Reg
, GV
);
3315 else if (auto CAZ
= dyn_cast
<ConstantAggregateZero
>(&C
)) {
3316 if (!isa
<FixedVectorType
>(CAZ
->getType()))
3318 // Return the scalar if it is a <1 x Ty> vector.
3319 unsigned NumElts
= CAZ
->getElementCount().getFixedValue();
3321 return translateCopy(C
, *CAZ
->getElementValue(0u), *EntryBuilder
);
3322 SmallVector
<Register
, 4> Ops
;
3323 for (unsigned I
= 0; I
< NumElts
; ++I
) {
3324 Constant
&Elt
= *CAZ
->getElementValue(I
);
3325 Ops
.push_back(getOrCreateVReg(Elt
));
3327 EntryBuilder
->buildBuildVector(Reg
, Ops
);
3328 } else if (auto CV
= dyn_cast
<ConstantDataVector
>(&C
)) {
3329 // Return the scalar if it is a <1 x Ty> vector.
3330 if (CV
->getNumElements() == 1)
3331 return translateCopy(C
, *CV
->getElementAsConstant(0), *EntryBuilder
);
3332 SmallVector
<Register
, 4> Ops
;
3333 for (unsigned i
= 0; i
< CV
->getNumElements(); ++i
) {
3334 Constant
&Elt
= *CV
->getElementAsConstant(i
);
3335 Ops
.push_back(getOrCreateVReg(Elt
));
3337 EntryBuilder
->buildBuildVector(Reg
, Ops
);
3338 } else if (auto CE
= dyn_cast
<ConstantExpr
>(&C
)) {
3339 switch(CE
->getOpcode()) {
3340 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3341 case Instruction::OPCODE: \
3342 return translate##OPCODE(*CE, *EntryBuilder.get());
3343 #include "llvm/IR/Instruction.def"
3347 } else if (auto CV
= dyn_cast
<ConstantVector
>(&C
)) {
3348 if (CV
->getNumOperands() == 1)
3349 return translateCopy(C
, *CV
->getOperand(0), *EntryBuilder
);
3350 SmallVector
<Register
, 4> Ops
;
3351 for (unsigned i
= 0; i
< CV
->getNumOperands(); ++i
) {
3352 Ops
.push_back(getOrCreateVReg(*CV
->getOperand(i
)));
3354 EntryBuilder
->buildBuildVector(Reg
, Ops
);
3355 } else if (auto *BA
= dyn_cast
<BlockAddress
>(&C
)) {
3356 EntryBuilder
->buildBlockAddress(Reg
, BA
);
3363 bool IRTranslator::finalizeBasicBlock(const BasicBlock
&BB
,
3364 MachineBasicBlock
&MBB
) {
3365 for (auto &BTB
: SL
->BitTestCases
) {
3366 // Emit header first, if it wasn't already emitted.
3368 emitBitTestHeader(BTB
, BTB
.Parent
);
3370 BranchProbability UnhandledProb
= BTB
.Prob
;
3371 for (unsigned j
= 0, ej
= BTB
.Cases
.size(); j
!= ej
; ++j
) {
3372 UnhandledProb
-= BTB
.Cases
[j
].ExtraProb
;
3373 // Set the current basic block to the mbb we wish to insert the code into
3374 MachineBasicBlock
*MBB
= BTB
.Cases
[j
].ThisBB
;
3375 // If all cases cover a contiguous range, it is not necessary to jump to
3376 // the default block after the last bit test fails. This is because the
3377 // range check during bit test header creation has guaranteed that every
3378 // case here doesn't go outside the range. In this case, there is no need
3379 // to perform the last bit test, as it will always be true. Instead, make
3380 // the second-to-last bit-test fall through to the target of the last bit
3381 // test, and delete the last bit test.
3383 MachineBasicBlock
*NextMBB
;
3384 if ((BTB
.ContiguousRange
|| BTB
.FallthroughUnreachable
) && j
+ 2 == ej
) {
3385 // Second-to-last bit-test with contiguous range: fall through to the
3386 // target of the final bit test.
3387 NextMBB
= BTB
.Cases
[j
+ 1].TargetBB
;
3388 } else if (j
+ 1 == ej
) {
3389 // For the last bit test, fall through to Default.
3390 NextMBB
= BTB
.Default
;
3392 // Otherwise, fall through to the next bit test.
3393 NextMBB
= BTB
.Cases
[j
+ 1].ThisBB
;
3396 emitBitTestCase(BTB
, NextMBB
, UnhandledProb
, BTB
.Reg
, BTB
.Cases
[j
], MBB
);
3398 if ((BTB
.ContiguousRange
|| BTB
.FallthroughUnreachable
) && j
+ 2 == ej
) {
3399 // We need to record the replacement phi edge here that normally
3400 // happens in emitBitTestCase before we delete the case, otherwise the
3401 // phi edge will be lost.
3402 addMachineCFGPred({BTB
.Parent
->getBasicBlock(),
3403 BTB
.Cases
[ej
- 1].TargetBB
->getBasicBlock()},
3405 // Since we're not going to use the final bit test, remove it.
3406 BTB
.Cases
.pop_back();
3410 // This is "default" BB. We have two jumps to it. From "header" BB and from
3411 // last "case" BB, unless the latter was skipped.
3412 CFGEdge HeaderToDefaultEdge
= {BTB
.Parent
->getBasicBlock(),
3413 BTB
.Default
->getBasicBlock()};
3414 addMachineCFGPred(HeaderToDefaultEdge
, BTB
.Parent
);
3415 if (!BTB
.ContiguousRange
) {
3416 addMachineCFGPred(HeaderToDefaultEdge
, BTB
.Cases
.back().ThisBB
);
3419 SL
->BitTestCases
.clear();
3421 for (auto &JTCase
: SL
->JTCases
) {
3422 // Emit header first, if it wasn't already emitted.
3423 if (!JTCase
.first
.Emitted
)
3424 emitJumpTableHeader(JTCase
.second
, JTCase
.first
, JTCase
.first
.HeaderBB
);
3426 emitJumpTable(JTCase
.second
, JTCase
.second
.MBB
);
3428 SL
->JTCases
.clear();
3430 for (auto &SwCase
: SL
->SwitchCases
)
3431 emitSwitchCase(SwCase
, &CurBuilder
->getMBB(), *CurBuilder
);
3432 SL
->SwitchCases
.clear();
3434 // Check if we need to generate stack-protector guard checks.
3435 StackProtector
&SP
= getAnalysis
<StackProtector
>();
3436 if (SP
.shouldEmitSDCheck(BB
)) {
3437 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
3438 bool FunctionBasedInstrumentation
=
3439 TLI
.getSSPStackGuardCheck(*MF
->getFunction().getParent());
3440 SPDescriptor
.initialize(&BB
, &MBB
, FunctionBasedInstrumentation
);
3442 // Handle stack protector.
3443 if (SPDescriptor
.shouldEmitFunctionBasedCheckStackProtector()) {
3444 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3446 } else if (SPDescriptor
.shouldEmitStackProtector()) {
3447 MachineBasicBlock
*ParentMBB
= SPDescriptor
.getParentMBB();
3448 MachineBasicBlock
*SuccessMBB
= SPDescriptor
.getSuccessMBB();
3450 // Find the split point to split the parent mbb. At the same time copy all
3451 // physical registers used in the tail of parent mbb into virtual registers
3452 // before the split point and back into physical registers after the split
3453 // point. This prevents us needing to deal with Live-ins and many other
3454 // register allocation issues caused by us splitting the parent mbb. The
3455 // register allocator will clean up said virtual copies later on.
3456 MachineBasicBlock::iterator SplitPoint
= findSplitPointForStackProtector(
3457 ParentMBB
, *MF
->getSubtarget().getInstrInfo());
3459 // Splice the terminator of ParentMBB into SuccessMBB.
3460 SuccessMBB
->splice(SuccessMBB
->end(), ParentMBB
, SplitPoint
,
3463 // Add compare/jump on neq/jump to the parent BB.
3464 if (!emitSPDescriptorParent(SPDescriptor
, ParentMBB
))
3467 // CodeGen Failure MBB if we have not codegened it yet.
3468 MachineBasicBlock
*FailureMBB
= SPDescriptor
.getFailureMBB();
3469 if (FailureMBB
->empty()) {
3470 if (!emitSPDescriptorFailure(SPDescriptor
, FailureMBB
))
3474 // Clear the Per-BB State.
3475 SPDescriptor
.resetPerBBState();
3480 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor
&SPD
,
3481 MachineBasicBlock
*ParentBB
) {
3482 CurBuilder
->setInsertPt(*ParentBB
, ParentBB
->end());
3483 // First create the loads to the guard/stack slot for the comparison.
3484 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
3485 Type
*PtrIRTy
= PointerType::getUnqual(MF
->getFunction().getContext());
3486 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
3487 LLT PtrMemTy
= getLLTForMVT(TLI
.getPointerMemTy(*DL
));
3489 MachineFrameInfo
&MFI
= ParentBB
->getParent()->getFrameInfo();
3490 int FI
= MFI
.getStackProtectorIndex();
3493 Register StackSlotPtr
= CurBuilder
->buildFrameIndex(PtrTy
, FI
).getReg(0);
3494 const Module
&M
= *ParentBB
->getParent()->getFunction().getParent();
3495 Align Align
= DL
->getPrefTypeAlign(PointerType::getUnqual(M
.getContext()));
3497 // Generate code to load the content of the guard slot.
3500 ->buildLoad(PtrMemTy
, StackSlotPtr
,
3501 MachinePointerInfo::getFixedStack(*MF
, FI
), Align
,
3502 MachineMemOperand::MOLoad
| MachineMemOperand::MOVolatile
)
3505 if (TLI
.useStackGuardXorFP()) {
3506 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3510 // Retrieve guard check function, nullptr if instrumentation is inlined.
3511 if (const Function
*GuardCheckFn
= TLI
.getSSPStackGuardCheck(M
)) {
3512 // This path is currently untestable on GlobalISel, since the only platform
3513 // that needs this seems to be Windows, and we fall back on that currently.
3514 // The code still lives here in case that changes.
3515 // Silence warning about unused variable until the code below that uses
3516 // 'GuardCheckFn' is enabled.
3520 // The target provides a guard check function to validate the guard value.
3521 // Generate a call to that function with the content of the guard slot as
3523 FunctionType
*FnTy
= GuardCheckFn
->getFunctionType();
3524 assert(FnTy
->getNumParams() == 1 && "Invalid function signature");
3525 ISD::ArgFlagsTy Flags
;
3526 if (GuardCheckFn
->hasAttribute(1, Attribute::AttrKind::InReg
))
3528 CallLowering::ArgInfo
GuardArgInfo(
3529 {GuardVal
, FnTy
->getParamType(0), {Flags
}});
3531 CallLowering::CallLoweringInfo Info
;
3532 Info
.OrigArgs
.push_back(GuardArgInfo
);
3533 Info
.CallConv
= GuardCheckFn
->getCallingConv();
3534 Info
.Callee
= MachineOperand::CreateGA(GuardCheckFn
, 0);
3535 Info
.OrigRet
= {Register(), FnTy
->getReturnType()};
3536 if (!CLI
->lowerCall(MIRBuilder
, Info
)) {
3537 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3544 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3545 // Otherwise, emit a volatile load to retrieve the stack guard value.
3546 if (TLI
.useLoadStackGuardNode()) {
3548 MRI
->createGenericVirtualRegister(LLT::scalar(PtrTy
.getSizeInBits()));
3549 getStackGuard(Guard
, *CurBuilder
);
3551 // TODO: test using android subtarget when we support @llvm.thread.pointer.
3552 const Value
*IRGuard
= TLI
.getSDagStackGuard(M
);
3553 Register GuardPtr
= getOrCreateVReg(*IRGuard
);
3556 ->buildLoad(PtrMemTy
, GuardPtr
,
3557 MachinePointerInfo::getFixedStack(*MF
, FI
), Align
,
3558 MachineMemOperand::MOLoad
|
3559 MachineMemOperand::MOVolatile
)
3563 // Perform the comparison.
3565 CurBuilder
->buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), Guard
, GuardVal
);
3566 // If the guard/stackslot do not equal, branch to failure MBB.
3567 CurBuilder
->buildBrCond(Cmp
, *SPD
.getFailureMBB());
3568 // Otherwise branch to success MBB.
3569 CurBuilder
->buildBr(*SPD
.getSuccessMBB());
3573 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor
&SPD
,
3574 MachineBasicBlock
*FailureBB
) {
3575 CurBuilder
->setInsertPt(*FailureBB
, FailureBB
->end());
3576 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
3578 const RTLIB::Libcall Libcall
= RTLIB::STACKPROTECTOR_CHECK_FAIL
;
3579 const char *Name
= TLI
.getLibcallName(Libcall
);
3581 CallLowering::CallLoweringInfo Info
;
3582 Info
.CallConv
= TLI
.getLibcallCallingConv(Libcall
);
3583 Info
.Callee
= MachineOperand::CreateES(Name
);
3584 Info
.OrigRet
= {Register(), Type::getVoidTy(MF
->getFunction().getContext()),
3586 if (!CLI
->lowerCall(*CurBuilder
, Info
)) {
3587 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3591 // On PS4/PS5, the "return address" must still be within the calling
3592 // function, even if it's at the very end, so emit an explicit TRAP here.
3593 // WebAssembly needs an unreachable instruction after a non-returning call,
3594 // because the function return type can be different from __stack_chk_fail's
3595 // return type (void).
3596 const TargetMachine
&TM
= MF
->getTarget();
3597 if (TM
.getTargetTriple().isPS() || TM
.getTargetTriple().isWasm()) {
3598 LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3604 void IRTranslator::finalizeFunction() {
3605 // Release the memory used by the different maps we
3606 // needed during the translation.
3607 PendingPHIs
.clear();
3609 FrameIndices
.clear();
3610 MachinePreds
.clear();
3611 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3612 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3613 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3614 EntryBuilder
.reset();
3617 SPDescriptor
.resetPerFunctionState();
3620 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3621 /// variadic musttail call.
3622 static bool checkForMustTailInVarArgFn(bool IsVarArg
, const BasicBlock
&BB
) {
3626 // Walk the block backwards, because tail calls usually only appear at the end
3628 return llvm::any_of(llvm::reverse(BB
), [](const Instruction
&I
) {
3629 const auto *CI
= dyn_cast
<CallInst
>(&I
);
3630 return CI
&& CI
->isMustTailCall();
3634 bool IRTranslator::runOnMachineFunction(MachineFunction
&CurMF
) {
3636 const Function
&F
= MF
->getFunction();
3637 GISelCSEAnalysisWrapper
&Wrapper
=
3638 getAnalysis
<GISelCSEAnalysisWrapperPass
>().getCSEWrapper();
3639 // Set the CSEConfig and run the analysis.
3640 GISelCSEInfo
*CSEInfo
= nullptr;
3641 TPC
= &getAnalysis
<TargetPassConfig
>();
3642 bool EnableCSE
= EnableCSEInIRTranslator
.getNumOccurrences()
3643 ? EnableCSEInIRTranslator
3644 : TPC
->isGISelCSEEnabled();
3647 EntryBuilder
= std::make_unique
<CSEMIRBuilder
>(CurMF
);
3648 CSEInfo
= &Wrapper
.get(TPC
->getCSEConfig());
3649 EntryBuilder
->setCSEInfo(CSEInfo
);
3650 CurBuilder
= std::make_unique
<CSEMIRBuilder
>(CurMF
);
3651 CurBuilder
->setCSEInfo(CSEInfo
);
3653 EntryBuilder
= std::make_unique
<MachineIRBuilder
>();
3654 CurBuilder
= std::make_unique
<MachineIRBuilder
>();
3656 CLI
= MF
->getSubtarget().getCallLowering();
3657 CurBuilder
->setMF(*MF
);
3658 EntryBuilder
->setMF(*MF
);
3659 MRI
= &MF
->getRegInfo();
3660 DL
= &F
.getParent()->getDataLayout();
3661 ORE
= std::make_unique
<OptimizationRemarkEmitter
>(&F
);
3662 const TargetMachine
&TM
= MF
->getTarget();
3663 TM
.resetTargetOptions(F
);
3664 EnableOpts
= OptLevel
!= CodeGenOptLevel::None
&& !skipFunction(F
);
3667 AA
= &getAnalysis
<AAResultsWrapperPass
>().getAAResults();
3668 FuncInfo
.BPI
= &getAnalysis
<BranchProbabilityInfoWrapperPass
>().getBPI();
3671 FuncInfo
.BPI
= nullptr;
3674 AC
= &getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(
3676 LibInfo
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
3677 FuncInfo
.CanLowerReturn
= CLI
->checkReturnTypeForCallConv(*MF
);
3679 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
3681 SL
= std::make_unique
<GISelSwitchLowering
>(this, FuncInfo
);
3682 SL
->init(TLI
, TM
, *DL
);
3686 assert(PendingPHIs
.empty() && "stale PHIs");
3688 // Targets which want to use big endian can enable it using
3689 // enableBigEndian()
3690 if (!DL
->isLittleEndian() && !CLI
->enableBigEndian()) {
3691 // Currently we don't properly handle big endian code.
3692 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3693 F
.getSubprogram(), &F
.getEntryBlock());
3694 R
<< "unable to translate in big endian mode";
3695 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3698 // Release the per-function state when we return, whether we succeeded or not.
3699 auto FinalizeOnReturn
= make_scope_exit([this]() { finalizeFunction(); });
3701 // Setup a separate basic-block for the arguments and constants
3702 MachineBasicBlock
*EntryBB
= MF
->CreateMachineBasicBlock();
3703 MF
->push_back(EntryBB
);
3704 EntryBuilder
->setMBB(*EntryBB
);
3706 DebugLoc DbgLoc
= F
.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3707 SwiftError
.setFunction(CurMF
);
3708 SwiftError
.createEntriesInEntryBlock(DbgLoc
);
3710 bool IsVarArg
= F
.isVarArg();
3711 bool HasMustTailInVarArgFn
= false;
3713 // Create all blocks, in IR order, to preserve the layout.
3714 for (const BasicBlock
&BB
: F
) {
3715 auto *&MBB
= BBToMBB
[&BB
];
3717 MBB
= MF
->CreateMachineBasicBlock(&BB
);
3720 if (BB
.hasAddressTaken())
3721 MBB
->setAddressTakenIRBlock(const_cast<BasicBlock
*>(&BB
));
3723 if (!HasMustTailInVarArgFn
)
3724 HasMustTailInVarArgFn
= checkForMustTailInVarArgFn(IsVarArg
, BB
);
3727 MF
->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn
);
3729 // Make our arguments/constants entry block fallthrough to the IR entry block.
3730 EntryBB
->addSuccessor(&getMBB(F
.front()));
3732 if (CLI
->fallBackToDAGISel(*MF
)) {
3733 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3734 F
.getSubprogram(), &F
.getEntryBlock());
3735 R
<< "unable to lower function: " << ore::NV("Prototype", F
.getType());
3736 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3740 // Lower the actual args into this basic block.
3741 SmallVector
<ArrayRef
<Register
>, 8> VRegArgs
;
3742 for (const Argument
&Arg
: F
.args()) {
3743 if (DL
->getTypeStoreSize(Arg
.getType()).isZero())
3744 continue; // Don't handle zero sized types.
3745 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(Arg
);
3746 VRegArgs
.push_back(VRegs
);
3748 if (Arg
.hasSwiftErrorAttr()) {
3749 assert(VRegs
.size() == 1 && "Too many vregs for Swift error");
3750 SwiftError
.setCurrentVReg(EntryBB
, SwiftError
.getFunctionArg(), VRegs
[0]);
3754 if (!CLI
->lowerFormalArguments(*EntryBuilder
, F
, VRegArgs
, FuncInfo
)) {
3755 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3756 F
.getSubprogram(), &F
.getEntryBlock());
3757 R
<< "unable to lower arguments: " << ore::NV("Prototype", F
.getType());
3758 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3762 // Need to visit defs before uses when translating instructions.
3763 GISelObserverWrapper WrapperObserver
;
3764 if (EnableCSE
&& CSEInfo
)
3765 WrapperObserver
.addObserver(CSEInfo
);
3767 ReversePostOrderTraversal
<const Function
*> RPOT(&F
);
3769 DILocationVerifier Verifier
;
3770 WrapperObserver
.addObserver(&Verifier
);
3771 #endif // ifndef NDEBUG
3772 RAIIDelegateInstaller
DelInstall(*MF
, &WrapperObserver
);
3773 RAIIMFObserverInstaller
ObsInstall(*MF
, WrapperObserver
);
3774 for (const BasicBlock
*BB
: RPOT
) {
3775 MachineBasicBlock
&MBB
= getMBB(*BB
);
3776 // Set the insertion point of all the following translations to
3777 // the end of this basic block.
3778 CurBuilder
->setMBB(MBB
);
3779 HasTailCall
= false;
3780 for (const Instruction
&Inst
: *BB
) {
3781 // If we translated a tail call in the last step, then we know
3782 // everything after the call is either a return, or something that is
3783 // handled by the call itself. (E.g. a lifetime marker or assume
3784 // intrinsic.) In this case, we should stop translating the block and
3789 Verifier
.setCurrentInst(&Inst
);
3790 #endif // ifndef NDEBUG
3792 // Translate any debug-info attached to the instruction.
3793 translateDbgInfo(Inst
, *CurBuilder
.get());
3795 if (translate(Inst
))
3798 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3799 Inst
.getDebugLoc(), BB
);
3800 R
<< "unable to translate instruction: " << ore::NV("Opcode", &Inst
);
3802 if (ORE
->allowExtraAnalysis("gisel-irtranslator")) {
3803 std::string InstStrStorage
;
3804 raw_string_ostream
InstStr(InstStrStorage
);
3807 R
<< ": '" << InstStr
.str() << "'";
3810 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3814 if (!finalizeBasicBlock(*BB
, MBB
)) {
3815 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3816 BB
->getTerminator()->getDebugLoc(), BB
);
3817 R
<< "unable to translate basic block";
3818 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3823 WrapperObserver
.removeObserver(&Verifier
);
3827 finishPendingPhis();
3829 SwiftError
.propagateVRegs();
3831 // Merge the argument lowering and constants block with its single
3832 // successor, the LLVM-IR entry block. We want the basic block to
3834 assert(EntryBB
->succ_size() == 1 &&
3835 "Custom BB used for lowering should have only one successor");
3836 // Get the successor of the current entry block.
3837 MachineBasicBlock
&NewEntryBB
= **EntryBB
->succ_begin();
3838 assert(NewEntryBB
.pred_size() == 1 &&
3839 "LLVM-IR entry block has a predecessor!?");
3840 // Move all the instruction from the current entry block to the
3842 NewEntryBB
.splice(NewEntryBB
.begin(), EntryBB
, EntryBB
->begin(),
3845 // Update the live-in information for the new entry block.
3846 for (const MachineBasicBlock::RegisterMaskPair
&LiveIn
: EntryBB
->liveins())
3847 NewEntryBB
.addLiveIn(LiveIn
);
3848 NewEntryBB
.sortUniqueLiveIns();
3850 // Get rid of the now empty basic block.
3851 EntryBB
->removeSuccessor(&NewEntryBB
);
3852 MF
->remove(EntryBB
);
3853 MF
->deleteMachineBasicBlock(EntryBB
);
3855 assert(&MF
->front() == &NewEntryBB
&&
3856 "New entry wasn't next in the list of basic block!");
3858 // Initialize stack protector information.
3859 StackProtector
&SP
= getAnalysis
<StackProtector
>();
3860 SP
.copyToMachineFrameInfo(MF
->getFrameInfo());