1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/Loads.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
26 #include "llvm/CodeGen/LowLevelType.h"
27 #include "llvm/CodeGen/MachineBasicBlock.h"
28 #include "llvm/CodeGen/MachineFrameInfo.h"
29 #include "llvm/CodeGen/MachineFunction.h"
30 #include "llvm/CodeGen/MachineInstrBuilder.h"
31 #include "llvm/CodeGen/MachineMemOperand.h"
32 #include "llvm/CodeGen/MachineModuleInfo.h"
33 #include "llvm/CodeGen/MachineOperand.h"
34 #include "llvm/CodeGen/MachineRegisterInfo.h"
35 #include "llvm/CodeGen/StackProtector.h"
36 #include "llvm/CodeGen/SwitchLoweringUtils.h"
37 #include "llvm/CodeGen/TargetFrameLowering.h"
38 #include "llvm/CodeGen/TargetInstrInfo.h"
39 #include "llvm/CodeGen/TargetLowering.h"
40 #include "llvm/CodeGen/TargetPassConfig.h"
41 #include "llvm/CodeGen/TargetRegisterInfo.h"
42 #include "llvm/CodeGen/TargetSubtargetInfo.h"
43 #include "llvm/IR/BasicBlock.h"
44 #include "llvm/IR/CFG.h"
45 #include "llvm/IR/Constant.h"
46 #include "llvm/IR/Constants.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugInfo.h"
49 #include "llvm/IR/DerivedTypes.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/GetElementPtrTypeIterator.h"
52 #include "llvm/IR/InlineAsm.h"
53 #include "llvm/IR/InstrTypes.h"
54 #include "llvm/IR/Instructions.h"
55 #include "llvm/IR/IntrinsicInst.h"
56 #include "llvm/IR/Intrinsics.h"
57 #include "llvm/IR/LLVMContext.h"
58 #include "llvm/IR/Metadata.h"
59 #include "llvm/IR/PatternMatch.h"
60 #include "llvm/IR/Type.h"
61 #include "llvm/IR/User.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/InitializePasses.h"
64 #include "llvm/MC/MCContext.h"
65 #include "llvm/Pass.h"
66 #include "llvm/Support/Casting.h"
67 #include "llvm/Support/CodeGen.h"
68 #include "llvm/Support/Debug.h"
69 #include "llvm/Support/ErrorHandling.h"
70 #include "llvm/Support/LowLevelTypeImpl.h"
71 #include "llvm/Support/MathExtras.h"
72 #include "llvm/Support/raw_ostream.h"
73 #include "llvm/Target/TargetIntrinsicInfo.h"
74 #include "llvm/Target/TargetMachine.h"
75 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
85 #define DEBUG_TYPE "irtranslator"
90 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
91 cl::desc("Should enable CSE in irtranslator"),
92 cl::Optional
, cl::init(false));
93 char IRTranslator::ID
= 0;
95 INITIALIZE_PASS_BEGIN(IRTranslator
, DEBUG_TYPE
, "IRTranslator LLVM IR -> MI",
97 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig
)
98 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass
)
99 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass
)
100 INITIALIZE_PASS_DEPENDENCY(StackProtector
)
101 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
102 INITIALIZE_PASS_END(IRTranslator
, DEBUG_TYPE
, "IRTranslator LLVM IR -> MI",
105 static void reportTranslationError(MachineFunction
&MF
,
106 const TargetPassConfig
&TPC
,
107 OptimizationRemarkEmitter
&ORE
,
108 OptimizationRemarkMissed
&R
) {
109 MF
.getProperties().set(MachineFunctionProperties::Property::FailedISel
);
111 // Print the function name explicitly if we don't have a debug location (which
112 // makes the diagnostic less useful) or if we're going to emit a raw error.
113 if (!R
.getLocation().isValid() || TPC
.isGlobalISelAbortEnabled())
114 R
<< (" (in function: " + MF
.getName() + ")").str();
116 if (TPC
.isGlobalISelAbortEnabled())
117 report_fatal_error(R
.getMsg());
122 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel
)
123 : MachineFunctionPass(ID
), OptLevel(optlevel
) {}
127 /// Verify that every instruction created has the same DILocation as the
128 /// instruction being translated.
129 class DILocationVerifier
: public GISelChangeObserver
{
130 const Instruction
*CurrInst
= nullptr;
133 DILocationVerifier() = default;
134 ~DILocationVerifier() = default;
136 const Instruction
*getCurrentInst() const { return CurrInst
; }
137 void setCurrentInst(const Instruction
*Inst
) { CurrInst
= Inst
; }
139 void erasingInstr(MachineInstr
&MI
) override
{}
140 void changingInstr(MachineInstr
&MI
) override
{}
141 void changedInstr(MachineInstr
&MI
) override
{}
143 void createdInstr(MachineInstr
&MI
) override
{
144 assert(getCurrentInst() && "Inserted instruction without a current MI");
146 // Only print the check message if we're actually checking it.
148 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
149 << " was copied to " << MI
);
151 // We allow insts in the entry block to have a debug loc line of 0 because
152 // they could have originated from constants, and we don't want a jumpy
154 assert((CurrInst
->getDebugLoc() == MI
.getDebugLoc() ||
155 MI
.getDebugLoc().getLine() == 0) &&
156 "Line info was not transferred to all instructions");
160 #endif // ifndef NDEBUG
163 void IRTranslator::getAnalysisUsage(AnalysisUsage
&AU
) const {
164 AU
.addRequired
<StackProtector
>();
165 AU
.addRequired
<TargetPassConfig
>();
166 AU
.addRequired
<GISelCSEAnalysisWrapperPass
>();
167 if (OptLevel
!= CodeGenOpt::None
)
168 AU
.addRequired
<BranchProbabilityInfoWrapperPass
>();
169 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
170 AU
.addPreserved
<TargetLibraryInfoWrapperPass
>();
171 getSelectionDAGFallbackAnalysisUsage(AU
);
172 MachineFunctionPass::getAnalysisUsage(AU
);
175 IRTranslator::ValueToVRegInfo::VRegListT
&
176 IRTranslator::allocateVRegs(const Value
&Val
) {
177 auto VRegsIt
= VMap
.findVRegs(Val
);
178 if (VRegsIt
!= VMap
.vregs_end())
179 return *VRegsIt
->second
;
180 auto *Regs
= VMap
.getVRegs(Val
);
181 auto *Offsets
= VMap
.getOffsets(Val
);
182 SmallVector
<LLT
, 4> SplitTys
;
183 computeValueLLTs(*DL
, *Val
.getType(), SplitTys
,
184 Offsets
->empty() ? Offsets
: nullptr);
185 for (unsigned i
= 0; i
< SplitTys
.size(); ++i
)
190 ArrayRef
<Register
> IRTranslator::getOrCreateVRegs(const Value
&Val
) {
191 auto VRegsIt
= VMap
.findVRegs(Val
);
192 if (VRegsIt
!= VMap
.vregs_end())
193 return *VRegsIt
->second
;
195 if (Val
.getType()->isVoidTy())
196 return *VMap
.getVRegs(Val
);
198 // Create entry for this type.
199 auto *VRegs
= VMap
.getVRegs(Val
);
200 auto *Offsets
= VMap
.getOffsets(Val
);
202 assert(Val
.getType()->isSized() &&
203 "Don't know how to create an empty vreg");
205 SmallVector
<LLT
, 4> SplitTys
;
206 computeValueLLTs(*DL
, *Val
.getType(), SplitTys
,
207 Offsets
->empty() ? Offsets
: nullptr);
209 if (!isa
<Constant
>(Val
)) {
210 for (auto Ty
: SplitTys
)
211 VRegs
->push_back(MRI
->createGenericVirtualRegister(Ty
));
215 if (Val
.getType()->isAggregateType()) {
216 // UndefValue, ConstantAggregateZero
217 auto &C
= cast
<Constant
>(Val
);
219 while (auto Elt
= C
.getAggregateElement(Idx
++)) {
220 auto EltRegs
= getOrCreateVRegs(*Elt
);
221 llvm::copy(EltRegs
, std::back_inserter(*VRegs
));
224 assert(SplitTys
.size() == 1 && "unexpectedly split LLT");
225 VRegs
->push_back(MRI
->createGenericVirtualRegister(SplitTys
[0]));
226 bool Success
= translate(cast
<Constant
>(Val
), VRegs
->front());
228 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
229 MF
->getFunction().getSubprogram(),
230 &MF
->getFunction().getEntryBlock());
231 R
<< "unable to translate constant: " << ore::NV("Type", Val
.getType());
232 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
240 int IRTranslator::getOrCreateFrameIndex(const AllocaInst
&AI
) {
241 auto MapEntry
= FrameIndices
.find(&AI
);
242 if (MapEntry
!= FrameIndices
.end())
243 return MapEntry
->second
;
245 uint64_t ElementSize
= DL
->getTypeAllocSize(AI
.getAllocatedType());
247 ElementSize
* cast
<ConstantInt
>(AI
.getArraySize())->getZExtValue();
249 // Always allocate at least one byte.
250 Size
= std::max
<uint64_t>(Size
, 1u);
252 int &FI
= FrameIndices
[&AI
];
253 FI
= MF
->getFrameInfo().CreateStackObject(Size
, AI
.getAlign(), false, &AI
);
257 Align
IRTranslator::getMemOpAlign(const Instruction
&I
) {
258 if (const StoreInst
*SI
= dyn_cast
<StoreInst
>(&I
))
259 return SI
->getAlign();
260 if (const LoadInst
*LI
= dyn_cast
<LoadInst
>(&I
))
261 return LI
->getAlign();
262 if (const AtomicCmpXchgInst
*AI
= dyn_cast
<AtomicCmpXchgInst
>(&I
))
263 return AI
->getAlign();
264 if (const AtomicRMWInst
*AI
= dyn_cast
<AtomicRMWInst
>(&I
))
265 return AI
->getAlign();
267 OptimizationRemarkMissed
R("gisel-irtranslator", "", &I
);
268 R
<< "unable to translate memop: " << ore::NV("Opcode", &I
);
269 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
273 MachineBasicBlock
&IRTranslator::getMBB(const BasicBlock
&BB
) {
274 MachineBasicBlock
*&MBB
= BBToMBB
[&BB
];
275 assert(MBB
&& "BasicBlock was not encountered before");
279 void IRTranslator::addMachineCFGPred(CFGEdge Edge
, MachineBasicBlock
*NewPred
) {
280 assert(NewPred
&& "new predecessor must be a real MachineBasicBlock");
281 MachinePreds
[Edge
].push_back(NewPred
);
284 bool IRTranslator::translateBinaryOp(unsigned Opcode
, const User
&U
,
285 MachineIRBuilder
&MIRBuilder
) {
286 // Get or create a virtual register for each value.
287 // Unless the value is a Constant => loadimm cst?
288 // or inline constant each time?
289 // Creation of a virtual register needs to have a size.
290 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
291 Register Op1
= getOrCreateVReg(*U
.getOperand(1));
292 Register Res
= getOrCreateVReg(U
);
294 if (isa
<Instruction
>(U
)) {
295 const Instruction
&I
= cast
<Instruction
>(U
);
296 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
299 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op0
, Op1
}, Flags
);
303 bool IRTranslator::translateUnaryOp(unsigned Opcode
, const User
&U
,
304 MachineIRBuilder
&MIRBuilder
) {
305 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
306 Register Res
= getOrCreateVReg(U
);
308 if (isa
<Instruction
>(U
)) {
309 const Instruction
&I
= cast
<Instruction
>(U
);
310 Flags
= MachineInstr::copyFlagsFromInstruction(I
);
312 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op0
}, Flags
);
316 bool IRTranslator::translateFNeg(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
317 return translateUnaryOp(TargetOpcode::G_FNEG
, U
, MIRBuilder
);
320 bool IRTranslator::translateCompare(const User
&U
,
321 MachineIRBuilder
&MIRBuilder
) {
322 auto *CI
= dyn_cast
<CmpInst
>(&U
);
323 Register Op0
= getOrCreateVReg(*U
.getOperand(0));
324 Register Op1
= getOrCreateVReg(*U
.getOperand(1));
325 Register Res
= getOrCreateVReg(U
);
326 CmpInst::Predicate Pred
=
327 CI
? CI
->getPredicate() : static_cast<CmpInst::Predicate
>(
328 cast
<ConstantExpr
>(U
).getPredicate());
329 if (CmpInst::isIntPredicate(Pred
))
330 MIRBuilder
.buildICmp(Pred
, Res
, Op0
, Op1
);
331 else if (Pred
== CmpInst::FCMP_FALSE
)
332 MIRBuilder
.buildCopy(
333 Res
, getOrCreateVReg(*Constant::getNullValue(U
.getType())));
334 else if (Pred
== CmpInst::FCMP_TRUE
)
335 MIRBuilder
.buildCopy(
336 Res
, getOrCreateVReg(*Constant::getAllOnesValue(U
.getType())));
338 assert(CI
&& "Instruction should be CmpInst");
339 MIRBuilder
.buildFCmp(Pred
, Res
, Op0
, Op1
,
340 MachineInstr::copyFlagsFromInstruction(*CI
));
346 bool IRTranslator::translateRet(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
347 const ReturnInst
&RI
= cast
<ReturnInst
>(U
);
348 const Value
*Ret
= RI
.getReturnValue();
349 if (Ret
&& DL
->getTypeStoreSize(Ret
->getType()) == 0)
352 ArrayRef
<Register
> VRegs
;
354 VRegs
= getOrCreateVRegs(*Ret
);
356 Register SwiftErrorVReg
= 0;
357 if (CLI
->supportSwiftError() && SwiftError
.getFunctionArg()) {
358 SwiftErrorVReg
= SwiftError
.getOrCreateVRegUseAt(
359 &RI
, &MIRBuilder
.getMBB(), SwiftError
.getFunctionArg());
362 // The target may mess up with the insertion point, but
363 // this is not important as a return is the last instruction
364 // of the block anyway.
365 return CLI
->lowerReturn(MIRBuilder
, Ret
, VRegs
, FuncInfo
, SwiftErrorVReg
);
368 void IRTranslator::emitBranchForMergedCondition(
369 const Value
*Cond
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
370 MachineBasicBlock
*CurBB
, MachineBasicBlock
*SwitchBB
,
371 BranchProbability TProb
, BranchProbability FProb
, bool InvertCond
) {
372 // If the leaf of the tree is a comparison, merge the condition into
374 if (const CmpInst
*BOp
= dyn_cast
<CmpInst
>(Cond
)) {
375 CmpInst::Predicate Condition
;
376 if (const ICmpInst
*IC
= dyn_cast
<ICmpInst
>(Cond
)) {
377 Condition
= InvertCond
? IC
->getInversePredicate() : IC
->getPredicate();
379 const FCmpInst
*FC
= cast
<FCmpInst
>(Cond
);
380 Condition
= InvertCond
? FC
->getInversePredicate() : FC
->getPredicate();
383 SwitchCG::CaseBlock
CB(Condition
, false, BOp
->getOperand(0),
384 BOp
->getOperand(1), nullptr, TBB
, FBB
, CurBB
,
385 CurBuilder
->getDebugLoc(), TProb
, FProb
);
386 SL
->SwitchCases
.push_back(CB
);
390 // Create a CaseBlock record representing this branch.
391 CmpInst::Predicate Pred
= InvertCond
? CmpInst::ICMP_NE
: CmpInst::ICMP_EQ
;
392 SwitchCG::CaseBlock
CB(
393 Pred
, false, Cond
, ConstantInt::getTrue(MF
->getFunction().getContext()),
394 nullptr, TBB
, FBB
, CurBB
, CurBuilder
->getDebugLoc(), TProb
, FProb
);
395 SL
->SwitchCases
.push_back(CB
);
398 static bool isValInBlock(const Value
*V
, const BasicBlock
*BB
) {
399 if (const Instruction
*I
= dyn_cast
<Instruction
>(V
))
400 return I
->getParent() == BB
;
404 void IRTranslator::findMergedConditions(
405 const Value
*Cond
, MachineBasicBlock
*TBB
, MachineBasicBlock
*FBB
,
406 MachineBasicBlock
*CurBB
, MachineBasicBlock
*SwitchBB
,
407 Instruction::BinaryOps Opc
, BranchProbability TProb
,
408 BranchProbability FProb
, bool InvertCond
) {
409 using namespace PatternMatch
;
410 assert((Opc
== Instruction::And
|| Opc
== Instruction::Or
) &&
411 "Expected Opc to be AND/OR");
412 // Skip over not part of the tree and remember to invert op and operands at
415 if (match(Cond
, m_OneUse(m_Not(m_Value(NotCond
)))) &&
416 isValInBlock(NotCond
, CurBB
->getBasicBlock())) {
417 findMergedConditions(NotCond
, TBB
, FBB
, CurBB
, SwitchBB
, Opc
, TProb
, FProb
,
422 const Instruction
*BOp
= dyn_cast
<Instruction
>(Cond
);
423 const Value
*BOpOp0
, *BOpOp1
;
424 // Compute the effective opcode for Cond, taking into account whether it needs
425 // to be inverted, e.g.
426 // and (not (or A, B)), C
428 // and (and (not A, not B), C)
429 Instruction::BinaryOps BOpc
= (Instruction::BinaryOps
)0;
431 BOpc
= match(BOp
, m_LogicalAnd(m_Value(BOpOp0
), m_Value(BOpOp1
)))
433 : (match(BOp
, m_LogicalOr(m_Value(BOpOp0
), m_Value(BOpOp1
)))
435 : (Instruction::BinaryOps
)0);
437 if (BOpc
== Instruction::And
)
438 BOpc
= Instruction::Or
;
439 else if (BOpc
== Instruction::Or
)
440 BOpc
= Instruction::And
;
444 // If this node is not part of the or/and tree, emit it as a branch.
445 // Note that all nodes in the tree should have same opcode.
446 bool BOpIsInOrAndTree
= BOpc
&& BOpc
== Opc
&& BOp
->hasOneUse();
447 if (!BOpIsInOrAndTree
|| BOp
->getParent() != CurBB
->getBasicBlock() ||
448 !isValInBlock(BOpOp0
, CurBB
->getBasicBlock()) ||
449 !isValInBlock(BOpOp1
, CurBB
->getBasicBlock())) {
450 emitBranchForMergedCondition(Cond
, TBB
, FBB
, CurBB
, SwitchBB
, TProb
, FProb
,
455 // Create TmpBB after CurBB.
456 MachineFunction::iterator
BBI(CurBB
);
457 MachineBasicBlock
*TmpBB
=
458 MF
->CreateMachineBasicBlock(CurBB
->getBasicBlock());
459 CurBB
->getParent()->insert(++BBI
, TmpBB
);
461 if (Opc
== Instruction::Or
) {
471 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
472 // The requirement is that
473 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
474 // = TrueProb for original BB.
475 // Assuming the original probabilities are A and B, one choice is to set
476 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
477 // A/(1+B) and 2B/(1+B). This choice assumes that
478 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
479 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
480 // TmpBB, but the math is more complicated.
482 auto NewTrueProb
= TProb
/ 2;
483 auto NewFalseProb
= TProb
/ 2 + FProb
;
484 // Emit the LHS condition.
485 findMergedConditions(BOpOp0
, TBB
, TmpBB
, CurBB
, SwitchBB
, Opc
, NewTrueProb
,
486 NewFalseProb
, InvertCond
);
488 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
489 SmallVector
<BranchProbability
, 2> Probs
{TProb
/ 2, FProb
};
490 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
491 // Emit the RHS condition into TmpBB.
492 findMergedConditions(BOpOp1
, TBB
, FBB
, TmpBB
, SwitchBB
, Opc
, Probs
[0],
493 Probs
[1], InvertCond
);
495 assert(Opc
== Instruction::And
&& "Unknown merge op!");
504 // This requires creation of TmpBB after CurBB.
506 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
507 // The requirement is that
508 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
509 // = FalseProb for original BB.
510 // Assuming the original probabilities are A and B, one choice is to set
511 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
512 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
513 // TrueProb for BB1 * FalseProb for TmpBB.
515 auto NewTrueProb
= TProb
+ FProb
/ 2;
516 auto NewFalseProb
= FProb
/ 2;
517 // Emit the LHS condition.
518 findMergedConditions(BOpOp0
, TmpBB
, FBB
, CurBB
, SwitchBB
, Opc
, NewTrueProb
,
519 NewFalseProb
, InvertCond
);
521 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
522 SmallVector
<BranchProbability
, 2> Probs
{TProb
, FProb
/ 2};
523 BranchProbability::normalizeProbabilities(Probs
.begin(), Probs
.end());
524 // Emit the RHS condition into TmpBB.
525 findMergedConditions(BOpOp1
, TBB
, FBB
, TmpBB
, SwitchBB
, Opc
, Probs
[0],
526 Probs
[1], InvertCond
);
530 bool IRTranslator::shouldEmitAsBranches(
531 const std::vector
<SwitchCG::CaseBlock
> &Cases
) {
532 // For multiple cases, it's better to emit as branches.
533 if (Cases
.size() != 2)
536 // If this is two comparisons of the same values or'd or and'd together, they
537 // will get folded into a single comparison, so don't emit two blocks.
538 if ((Cases
[0].CmpLHS
== Cases
[1].CmpLHS
&&
539 Cases
[0].CmpRHS
== Cases
[1].CmpRHS
) ||
540 (Cases
[0].CmpRHS
== Cases
[1].CmpLHS
&&
541 Cases
[0].CmpLHS
== Cases
[1].CmpRHS
)) {
545 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
546 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
547 if (Cases
[0].CmpRHS
== Cases
[1].CmpRHS
&&
548 Cases
[0].PredInfo
.Pred
== Cases
[1].PredInfo
.Pred
&&
549 isa
<Constant
>(Cases
[0].CmpRHS
) &&
550 cast
<Constant
>(Cases
[0].CmpRHS
)->isNullValue()) {
551 if (Cases
[0].PredInfo
.Pred
== CmpInst::ICMP_EQ
&&
552 Cases
[0].TrueBB
== Cases
[1].ThisBB
)
554 if (Cases
[0].PredInfo
.Pred
== CmpInst::ICMP_NE
&&
555 Cases
[0].FalseBB
== Cases
[1].ThisBB
)
562 bool IRTranslator::translateBr(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
563 const BranchInst
&BrInst
= cast
<BranchInst
>(U
);
564 auto &CurMBB
= MIRBuilder
.getMBB();
565 auto *Succ0MBB
= &getMBB(*BrInst
.getSuccessor(0));
567 if (BrInst
.isUnconditional()) {
568 // If the unconditional target is the layout successor, fallthrough.
569 if (OptLevel
== CodeGenOpt::None
|| !CurMBB
.isLayoutSuccessor(Succ0MBB
))
570 MIRBuilder
.buildBr(*Succ0MBB
);
573 for (const BasicBlock
*Succ
: successors(&BrInst
))
574 CurMBB
.addSuccessor(&getMBB(*Succ
));
578 // If this condition is one of the special cases we handle, do special stuff
580 const Value
*CondVal
= BrInst
.getCondition();
581 MachineBasicBlock
*Succ1MBB
= &getMBB(*BrInst
.getSuccessor(1));
583 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
585 // If this is a series of conditions that are or'd or and'd together, emit
586 // this as a sequence of branches instead of setcc's with and/or operations.
587 // As long as jumps are not expensive (exceptions for multi-use logic ops,
588 // unpredictable branches, and vector extracts because those jumps are likely
589 // expensive for any target), this should improve performance.
590 // For example, instead of something like:
602 using namespace PatternMatch
;
603 const Instruction
*CondI
= dyn_cast
<Instruction
>(CondVal
);
604 if (!TLI
.isJumpExpensive() && CondI
&& CondI
->hasOneUse() &&
605 !BrInst
.hasMetadata(LLVMContext::MD_unpredictable
)) {
606 Instruction::BinaryOps Opcode
= (Instruction::BinaryOps
)0;
608 const Value
*BOp0
, *BOp1
;
609 if (match(CondI
, m_LogicalAnd(m_Value(BOp0
), m_Value(BOp1
))))
610 Opcode
= Instruction::And
;
611 else if (match(CondI
, m_LogicalOr(m_Value(BOp0
), m_Value(BOp1
))))
612 Opcode
= Instruction::Or
;
614 if (Opcode
&& !(match(BOp0
, m_ExtractElt(m_Value(Vec
), m_Value())) &&
615 match(BOp1
, m_ExtractElt(m_Specific(Vec
), m_Value())))) {
616 findMergedConditions(CondI
, Succ0MBB
, Succ1MBB
, &CurMBB
, &CurMBB
, Opcode
,
617 getEdgeProbability(&CurMBB
, Succ0MBB
),
618 getEdgeProbability(&CurMBB
, Succ1MBB
),
619 /*InvertCond=*/false);
620 assert(SL
->SwitchCases
[0].ThisBB
== &CurMBB
&& "Unexpected lowering!");
622 // Allow some cases to be rejected.
623 if (shouldEmitAsBranches(SL
->SwitchCases
)) {
624 // Emit the branch for this block.
625 emitSwitchCase(SL
->SwitchCases
[0], &CurMBB
, *CurBuilder
);
626 SL
->SwitchCases
.erase(SL
->SwitchCases
.begin());
630 // Okay, we decided not to do this, remove any inserted MBB's and clear
632 for (unsigned I
= 1, E
= SL
->SwitchCases
.size(); I
!= E
; ++I
)
633 MF
->erase(SL
->SwitchCases
[I
].ThisBB
);
635 SL
->SwitchCases
.clear();
639 // Create a CaseBlock record representing this branch.
640 SwitchCG::CaseBlock
CB(CmpInst::ICMP_EQ
, false, CondVal
,
641 ConstantInt::getTrue(MF
->getFunction().getContext()),
642 nullptr, Succ0MBB
, Succ1MBB
, &CurMBB
,
643 CurBuilder
->getDebugLoc());
645 // Use emitSwitchCase to actually insert the fast branch sequence for this
647 emitSwitchCase(CB
, &CurMBB
, *CurBuilder
);
651 void IRTranslator::addSuccessorWithProb(MachineBasicBlock
*Src
,
652 MachineBasicBlock
*Dst
,
653 BranchProbability Prob
) {
655 Src
->addSuccessorWithoutProb(Dst
);
658 if (Prob
.isUnknown())
659 Prob
= getEdgeProbability(Src
, Dst
);
660 Src
->addSuccessor(Dst
, Prob
);
664 IRTranslator::getEdgeProbability(const MachineBasicBlock
*Src
,
665 const MachineBasicBlock
*Dst
) const {
666 const BasicBlock
*SrcBB
= Src
->getBasicBlock();
667 const BasicBlock
*DstBB
= Dst
->getBasicBlock();
669 // If BPI is not available, set the default probability as 1 / N, where N is
670 // the number of successors.
671 auto SuccSize
= std::max
<uint32_t>(succ_size(SrcBB
), 1);
672 return BranchProbability(1, SuccSize
);
674 return FuncInfo
.BPI
->getEdgeProbability(SrcBB
, DstBB
);
677 bool IRTranslator::translateSwitch(const User
&U
, MachineIRBuilder
&MIB
) {
678 using namespace SwitchCG
;
679 // Extract cases from the switch.
680 const SwitchInst
&SI
= cast
<SwitchInst
>(U
);
681 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
682 CaseClusterVector Clusters
;
683 Clusters
.reserve(SI
.getNumCases());
684 for (auto &I
: SI
.cases()) {
685 MachineBasicBlock
*Succ
= &getMBB(*I
.getCaseSuccessor());
686 assert(Succ
&& "Could not find successor mbb in mapping");
687 const ConstantInt
*CaseVal
= I
.getCaseValue();
688 BranchProbability Prob
=
689 BPI
? BPI
->getEdgeProbability(SI
.getParent(), I
.getSuccessorIndex())
690 : BranchProbability(1, SI
.getNumCases() + 1);
691 Clusters
.push_back(CaseCluster::range(CaseVal
, CaseVal
, Succ
, Prob
));
694 MachineBasicBlock
*DefaultMBB
= &getMBB(*SI
.getDefaultDest());
696 // Cluster adjacent cases with the same destination. We do this at all
697 // optimization levels because it's cheap to do and will make codegen faster
698 // if there are many clusters.
699 sortAndRangeify(Clusters
);
701 MachineBasicBlock
*SwitchMBB
= &getMBB(*SI
.getParent());
703 // If there is only the default destination, jump there directly.
704 if (Clusters
.empty()) {
705 SwitchMBB
->addSuccessor(DefaultMBB
);
706 if (DefaultMBB
!= SwitchMBB
->getNextNode())
707 MIB
.buildBr(*DefaultMBB
);
711 SL
->findJumpTables(Clusters
, &SI
, DefaultMBB
, nullptr, nullptr);
712 SL
->findBitTestClusters(Clusters
, &SI
);
715 dbgs() << "Case clusters: ";
716 for (const CaseCluster
&C
: Clusters
) {
717 if (C
.Kind
== CC_JumpTable
)
719 if (C
.Kind
== CC_BitTests
)
722 C
.Low
->getValue().print(dbgs(), true);
723 if (C
.Low
!= C
.High
) {
725 C
.High
->getValue().print(dbgs(), true);
732 assert(!Clusters
.empty());
733 SwitchWorkList WorkList
;
734 CaseClusterIt First
= Clusters
.begin();
735 CaseClusterIt Last
= Clusters
.end() - 1;
736 auto DefaultProb
= getEdgeProbability(SwitchMBB
, DefaultMBB
);
737 WorkList
.push_back({SwitchMBB
, First
, Last
, nullptr, nullptr, DefaultProb
});
739 // FIXME: At the moment we don't do any splitting optimizations here like
740 // SelectionDAG does, so this worklist only has one entry.
741 while (!WorkList
.empty()) {
742 SwitchWorkListItem W
= WorkList
.back();
744 if (!lowerSwitchWorkItem(W
, SI
.getCondition(), SwitchMBB
, DefaultMBB
, MIB
))
750 void IRTranslator::emitJumpTable(SwitchCG::JumpTable
&JT
,
751 MachineBasicBlock
*MBB
) {
752 // Emit the code for the jump table
753 assert(JT
.Reg
!= -1U && "Should lower JT Header first!");
754 MachineIRBuilder
MIB(*MBB
->getParent());
756 MIB
.setDebugLoc(CurBuilder
->getDebugLoc());
758 Type
*PtrIRTy
= Type::getInt8PtrTy(MF
->getFunction().getContext());
759 const LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
761 auto Table
= MIB
.buildJumpTable(PtrTy
, JT
.JTI
);
762 MIB
.buildBrJT(Table
.getReg(0), JT
.JTI
, JT
.Reg
);
765 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable
&JT
,
766 SwitchCG::JumpTableHeader
&JTH
,
767 MachineBasicBlock
*HeaderBB
) {
768 MachineIRBuilder
MIB(*HeaderBB
->getParent());
769 MIB
.setMBB(*HeaderBB
);
770 MIB
.setDebugLoc(CurBuilder
->getDebugLoc());
772 const Value
&SValue
= *JTH
.SValue
;
773 // Subtract the lowest switch case value from the value being switched on.
774 const LLT SwitchTy
= getLLTForType(*SValue
.getType(), *DL
);
775 Register SwitchOpReg
= getOrCreateVReg(SValue
);
776 auto FirstCst
= MIB
.buildConstant(SwitchTy
, JTH
.First
);
777 auto Sub
= MIB
.buildSub({SwitchTy
}, SwitchOpReg
, FirstCst
);
779 // This value may be smaller or larger than the target's pointer type, and
780 // therefore require extension or truncating.
781 Type
*PtrIRTy
= SValue
.getType()->getPointerTo();
782 const LLT PtrScalarTy
= LLT::scalar(DL
->getTypeSizeInBits(PtrIRTy
));
783 Sub
= MIB
.buildZExtOrTrunc(PtrScalarTy
, Sub
);
785 JT
.Reg
= Sub
.getReg(0);
787 if (JTH
.OmitRangeCheck
) {
788 if (JT
.MBB
!= HeaderBB
->getNextNode())
789 MIB
.buildBr(*JT
.MBB
);
793 // Emit the range check for the jump table, and branch to the default block
794 // for the switch statement if the value being switched on exceeds the
795 // largest case in the switch.
796 auto Cst
= getOrCreateVReg(
797 *ConstantInt::get(SValue
.getType(), JTH
.Last
- JTH
.First
));
798 Cst
= MIB
.buildZExtOrTrunc(PtrScalarTy
, Cst
).getReg(0);
799 auto Cmp
= MIB
.buildICmp(CmpInst::ICMP_UGT
, LLT::scalar(1), Sub
, Cst
);
801 auto BrCond
= MIB
.buildBrCond(Cmp
.getReg(0), *JT
.Default
);
803 // Avoid emitting unnecessary branches to the next block.
804 if (JT
.MBB
!= HeaderBB
->getNextNode())
805 BrCond
= MIB
.buildBr(*JT
.MBB
);
809 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock
&CB
,
810 MachineBasicBlock
*SwitchBB
,
811 MachineIRBuilder
&MIB
) {
812 Register CondLHS
= getOrCreateVReg(*CB
.CmpLHS
);
814 DebugLoc OldDbgLoc
= MIB
.getDebugLoc();
815 MIB
.setDebugLoc(CB
.DbgLoc
);
816 MIB
.setMBB(*CB
.ThisBB
);
818 if (CB
.PredInfo
.NoCmp
) {
819 // Branch or fall through to TrueBB.
820 addSuccessorWithProb(CB
.ThisBB
, CB
.TrueBB
, CB
.TrueProb
);
821 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.TrueBB
->getBasicBlock()},
823 CB
.ThisBB
->normalizeSuccProbs();
824 if (CB
.TrueBB
!= CB
.ThisBB
->getNextNode())
825 MIB
.buildBr(*CB
.TrueBB
);
826 MIB
.setDebugLoc(OldDbgLoc
);
830 const LLT i1Ty
= LLT::scalar(1);
831 // Build the compare.
833 const auto *CI
= dyn_cast
<ConstantInt
>(CB
.CmpRHS
);
834 // For conditional branch lowering, we might try to do something silly like
835 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
836 // just re-use the existing condition vreg.
837 if (MRI
->getType(CondLHS
).getSizeInBits() == 1 && CI
&&
838 CI
->getZExtValue() == 1 && CB
.PredInfo
.Pred
== CmpInst::ICMP_EQ
) {
841 Register CondRHS
= getOrCreateVReg(*CB
.CmpRHS
);
842 if (CmpInst::isFPPredicate(CB
.PredInfo
.Pred
))
844 MIB
.buildFCmp(CB
.PredInfo
.Pred
, i1Ty
, CondLHS
, CondRHS
).getReg(0);
847 MIB
.buildICmp(CB
.PredInfo
.Pred
, i1Ty
, CondLHS
, CondRHS
).getReg(0);
850 assert(CB
.PredInfo
.Pred
== CmpInst::ICMP_SLE
&&
851 "Can only handle SLE ranges");
853 const APInt
& Low
= cast
<ConstantInt
>(CB
.CmpLHS
)->getValue();
854 const APInt
& High
= cast
<ConstantInt
>(CB
.CmpRHS
)->getValue();
856 Register CmpOpReg
= getOrCreateVReg(*CB
.CmpMHS
);
857 if (cast
<ConstantInt
>(CB
.CmpLHS
)->isMinValue(true)) {
858 Register CondRHS
= getOrCreateVReg(*CB
.CmpRHS
);
860 MIB
.buildICmp(CmpInst::ICMP_SLE
, i1Ty
, CmpOpReg
, CondRHS
).getReg(0);
862 const LLT CmpTy
= MRI
->getType(CmpOpReg
);
863 auto Sub
= MIB
.buildSub({CmpTy
}, CmpOpReg
, CondLHS
);
864 auto Diff
= MIB
.buildConstant(CmpTy
, High
- Low
);
865 Cond
= MIB
.buildICmp(CmpInst::ICMP_ULE
, i1Ty
, Sub
, Diff
).getReg(0);
869 // Update successor info
870 addSuccessorWithProb(CB
.ThisBB
, CB
.TrueBB
, CB
.TrueProb
);
872 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.TrueBB
->getBasicBlock()},
875 // TrueBB and FalseBB are always different unless the incoming IR is
876 // degenerate. This only happens when running llc on weird IR.
877 if (CB
.TrueBB
!= CB
.FalseBB
)
878 addSuccessorWithProb(CB
.ThisBB
, CB
.FalseBB
, CB
.FalseProb
);
879 CB
.ThisBB
->normalizeSuccProbs();
881 addMachineCFGPred({SwitchBB
->getBasicBlock(), CB
.FalseBB
->getBasicBlock()},
884 MIB
.buildBrCond(Cond
, *CB
.TrueBB
);
885 MIB
.buildBr(*CB
.FalseBB
);
886 MIB
.setDebugLoc(OldDbgLoc
);
889 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W
,
890 MachineBasicBlock
*SwitchMBB
,
891 MachineBasicBlock
*CurMBB
,
892 MachineBasicBlock
*DefaultMBB
,
893 MachineIRBuilder
&MIB
,
894 MachineFunction::iterator BBI
,
895 BranchProbability UnhandledProbs
,
896 SwitchCG::CaseClusterIt I
,
897 MachineBasicBlock
*Fallthrough
,
898 bool FallthroughUnreachable
) {
899 using namespace SwitchCG
;
900 MachineFunction
*CurMF
= SwitchMBB
->getParent();
901 // FIXME: Optimize away range check based on pivot comparisons.
902 JumpTableHeader
*JTH
= &SL
->JTCases
[I
->JTCasesIndex
].first
;
903 SwitchCG::JumpTable
*JT
= &SL
->JTCases
[I
->JTCasesIndex
].second
;
904 BranchProbability DefaultProb
= W
.DefaultProb
;
906 // The jump block hasn't been inserted yet; insert it here.
907 MachineBasicBlock
*JumpMBB
= JT
->MBB
;
908 CurMF
->insert(BBI
, JumpMBB
);
910 // Since the jump table block is separate from the switch block, we need
911 // to keep track of it as a machine predecessor to the default block,
912 // otherwise we lose the phi edges.
913 addMachineCFGPred({SwitchMBB
->getBasicBlock(), DefaultMBB
->getBasicBlock()},
915 addMachineCFGPred({SwitchMBB
->getBasicBlock(), DefaultMBB
->getBasicBlock()},
918 auto JumpProb
= I
->Prob
;
919 auto FallthroughProb
= UnhandledProbs
;
921 // If the default statement is a target of the jump table, we evenly
922 // distribute the default probability to successors of CurMBB. Also
923 // update the probability on the edge from JumpMBB to Fallthrough.
924 for (MachineBasicBlock::succ_iterator SI
= JumpMBB
->succ_begin(),
925 SE
= JumpMBB
->succ_end();
927 if (*SI
== DefaultMBB
) {
928 JumpProb
+= DefaultProb
/ 2;
929 FallthroughProb
-= DefaultProb
/ 2;
930 JumpMBB
->setSuccProbability(SI
, DefaultProb
/ 2);
931 JumpMBB
->normalizeSuccProbs();
933 // Also record edges from the jump table block to it's successors.
934 addMachineCFGPred({SwitchMBB
->getBasicBlock(), (*SI
)->getBasicBlock()},
939 // Skip the range check if the fallthrough block is unreachable.
940 if (FallthroughUnreachable
)
941 JTH
->OmitRangeCheck
= true;
943 if (!JTH
->OmitRangeCheck
)
944 addSuccessorWithProb(CurMBB
, Fallthrough
, FallthroughProb
);
945 addSuccessorWithProb(CurMBB
, JumpMBB
, JumpProb
);
946 CurMBB
->normalizeSuccProbs();
948 // The jump table header will be inserted in our current block, do the
949 // range check, and fall through to our fallthrough block.
950 JTH
->HeaderBB
= CurMBB
;
951 JT
->Default
= Fallthrough
; // FIXME: Move Default to JumpTableHeader.
953 // If we're in the right place, emit the jump table header right now.
954 if (CurMBB
== SwitchMBB
) {
955 if (!emitJumpTableHeader(*JT
, *JTH
, CurMBB
))
961 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I
,
963 MachineBasicBlock
*Fallthrough
,
964 bool FallthroughUnreachable
,
965 BranchProbability UnhandledProbs
,
966 MachineBasicBlock
*CurMBB
,
967 MachineIRBuilder
&MIB
,
968 MachineBasicBlock
*SwitchMBB
) {
969 using namespace SwitchCG
;
970 const Value
*RHS
, *LHS
, *MHS
;
971 CmpInst::Predicate Pred
;
972 if (I
->Low
== I
->High
) {
973 // Check Cond == I->Low.
974 Pred
= CmpInst::ICMP_EQ
;
979 // Check I->Low <= Cond <= I->High.
980 Pred
= CmpInst::ICMP_SLE
;
986 // If Fallthrough is unreachable, fold away the comparison.
987 // The false probability is the sum of all unhandled cases.
988 CaseBlock
CB(Pred
, FallthroughUnreachable
, LHS
, RHS
, MHS
, I
->MBB
, Fallthrough
,
989 CurMBB
, MIB
.getDebugLoc(), I
->Prob
, UnhandledProbs
);
991 emitSwitchCase(CB
, SwitchMBB
, MIB
);
995 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock
&B
,
996 MachineBasicBlock
*SwitchBB
) {
997 MachineIRBuilder
&MIB
= *CurBuilder
;
998 MIB
.setMBB(*SwitchBB
);
1000 // Subtract the minimum value.
1001 Register SwitchOpReg
= getOrCreateVReg(*B
.SValue
);
1003 LLT SwitchOpTy
= MRI
->getType(SwitchOpReg
);
1004 Register MinValReg
= MIB
.buildConstant(SwitchOpTy
, B
.First
).getReg(0);
1005 auto RangeSub
= MIB
.buildSub(SwitchOpTy
, SwitchOpReg
, MinValReg
);
1007 // Ensure that the type will fit the mask value.
1008 LLT MaskTy
= SwitchOpTy
;
1009 for (unsigned I
= 0, E
= B
.Cases
.size(); I
!= E
; ++I
) {
1010 if (!isUIntN(SwitchOpTy
.getSizeInBits(), B
.Cases
[I
].Mask
)) {
1011 // Switch table case range are encoded into series of masks.
1012 // Just use pointer type, it's guaranteed to fit.
1013 MaskTy
= LLT::scalar(64);
1017 Register SubReg
= RangeSub
.getReg(0);
1018 if (SwitchOpTy
!= MaskTy
)
1019 SubReg
= MIB
.buildZExtOrTrunc(MaskTy
, SubReg
).getReg(0);
1021 B
.RegVT
= getMVTForLLT(MaskTy
);
1024 MachineBasicBlock
*MBB
= B
.Cases
[0].ThisBB
;
1026 if (!B
.OmitRangeCheck
)
1027 addSuccessorWithProb(SwitchBB
, B
.Default
, B
.DefaultProb
);
1028 addSuccessorWithProb(SwitchBB
, MBB
, B
.Prob
);
1030 SwitchBB
->normalizeSuccProbs();
1032 if (!B
.OmitRangeCheck
) {
1033 // Conditional branch to the default block.
1034 auto RangeCst
= MIB
.buildConstant(SwitchOpTy
, B
.Range
);
1035 auto RangeCmp
= MIB
.buildICmp(CmpInst::Predicate::ICMP_UGT
, LLT::scalar(1),
1036 RangeSub
, RangeCst
);
1037 MIB
.buildBrCond(RangeCmp
, *B
.Default
);
1040 // Avoid emitting unnecessary branches to the next block.
1041 if (MBB
!= SwitchBB
->getNextNode())
1045 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock
&BB
,
1046 MachineBasicBlock
*NextMBB
,
1047 BranchProbability BranchProbToNext
,
1048 Register Reg
, SwitchCG::BitTestCase
&B
,
1049 MachineBasicBlock
*SwitchBB
) {
1050 MachineIRBuilder
&MIB
= *CurBuilder
;
1051 MIB
.setMBB(*SwitchBB
);
1053 LLT SwitchTy
= getLLTForMVT(BB
.RegVT
);
1055 unsigned PopCount
= countPopulation(B
.Mask
);
1056 if (PopCount
== 1) {
1057 // Testing for a single bit; just compare the shift count with what it
1058 // would need to be to shift a 1 bit in that position.
1059 auto MaskTrailingZeros
=
1060 MIB
.buildConstant(SwitchTy
, countTrailingZeros(B
.Mask
));
1062 MIB
.buildICmp(ICmpInst::ICMP_EQ
, LLT::scalar(1), Reg
, MaskTrailingZeros
)
1064 } else if (PopCount
== BB
.Range
) {
1065 // There is only one zero bit in the range, test for it directly.
1066 auto MaskTrailingOnes
=
1067 MIB
.buildConstant(SwitchTy
, countTrailingOnes(B
.Mask
));
1068 Cmp
= MIB
.buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), Reg
, MaskTrailingOnes
)
1071 // Make desired shift.
1072 auto CstOne
= MIB
.buildConstant(SwitchTy
, 1);
1073 auto SwitchVal
= MIB
.buildShl(SwitchTy
, CstOne
, Reg
);
1075 // Emit bit tests and jumps.
1076 auto CstMask
= MIB
.buildConstant(SwitchTy
, B
.Mask
);
1077 auto AndOp
= MIB
.buildAnd(SwitchTy
, SwitchVal
, CstMask
);
1078 auto CstZero
= MIB
.buildConstant(SwitchTy
, 0);
1079 Cmp
= MIB
.buildICmp(CmpInst::ICMP_NE
, LLT::scalar(1), AndOp
, CstZero
)
1083 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1084 addSuccessorWithProb(SwitchBB
, B
.TargetBB
, B
.ExtraProb
);
1085 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1086 addSuccessorWithProb(SwitchBB
, NextMBB
, BranchProbToNext
);
1087 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1088 // one as they are relative probabilities (and thus work more like weights),
1089 // and hence we need to normalize them to let the sum of them become one.
1090 SwitchBB
->normalizeSuccProbs();
1092 // Record the fact that the IR edge from the header to the bit test target
1093 // will go through our new block. Neeeded for PHIs to have nodes added.
1094 addMachineCFGPred({BB
.Parent
->getBasicBlock(), B
.TargetBB
->getBasicBlock()},
1097 MIB
.buildBrCond(Cmp
, *B
.TargetBB
);
1099 // Avoid emitting unnecessary branches to the next block.
1100 if (NextMBB
!= SwitchBB
->getNextNode())
1101 MIB
.buildBr(*NextMBB
);
1104 bool IRTranslator::lowerBitTestWorkItem(
1105 SwitchCG::SwitchWorkListItem W
, MachineBasicBlock
*SwitchMBB
,
1106 MachineBasicBlock
*CurMBB
, MachineBasicBlock
*DefaultMBB
,
1107 MachineIRBuilder
&MIB
, MachineFunction::iterator BBI
,
1108 BranchProbability DefaultProb
, BranchProbability UnhandledProbs
,
1109 SwitchCG::CaseClusterIt I
, MachineBasicBlock
*Fallthrough
,
1110 bool FallthroughUnreachable
) {
1111 using namespace SwitchCG
;
1112 MachineFunction
*CurMF
= SwitchMBB
->getParent();
1113 // FIXME: Optimize away range check based on pivot comparisons.
1114 BitTestBlock
*BTB
= &SL
->BitTestCases
[I
->BTCasesIndex
];
1115 // The bit test blocks haven't been inserted yet; insert them here.
1116 for (BitTestCase
&BTC
: BTB
->Cases
)
1117 CurMF
->insert(BBI
, BTC
.ThisBB
);
1119 // Fill in fields of the BitTestBlock.
1120 BTB
->Parent
= CurMBB
;
1121 BTB
->Default
= Fallthrough
;
1123 BTB
->DefaultProb
= UnhandledProbs
;
1124 // If the cases in bit test don't form a contiguous range, we evenly
1125 // distribute the probability on the edge to Fallthrough to two
1126 // successors of CurMBB.
1127 if (!BTB
->ContiguousRange
) {
1128 BTB
->Prob
+= DefaultProb
/ 2;
1129 BTB
->DefaultProb
-= DefaultProb
/ 2;
1132 if (FallthroughUnreachable
) {
1133 // Skip the range check if the fallthrough block is unreachable.
1134 BTB
->OmitRangeCheck
= true;
1137 // If we're in the right place, emit the bit test header right now.
1138 if (CurMBB
== SwitchMBB
) {
1139 emitBitTestHeader(*BTB
, SwitchMBB
);
1140 BTB
->Emitted
= true;
1145 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W
,
1147 MachineBasicBlock
*SwitchMBB
,
1148 MachineBasicBlock
*DefaultMBB
,
1149 MachineIRBuilder
&MIB
) {
1150 using namespace SwitchCG
;
1151 MachineFunction
*CurMF
= FuncInfo
.MF
;
1152 MachineBasicBlock
*NextMBB
= nullptr;
1153 MachineFunction::iterator
BBI(W
.MBB
);
1154 if (++BBI
!= FuncInfo
.MF
->end())
1158 // Here, we order cases by probability so the most likely case will be
1159 // checked first. However, two clusters can have the same probability in
1160 // which case their relative ordering is non-deterministic. So we use Low
1161 // as a tie-breaker as clusters are guaranteed to never overlap.
1162 llvm::sort(W
.FirstCluster
, W
.LastCluster
+ 1,
1163 [](const CaseCluster
&a
, const CaseCluster
&b
) {
1164 return a
.Prob
!= b
.Prob
1166 : a
.Low
->getValue().slt(b
.Low
->getValue());
1169 // Rearrange the case blocks so that the last one falls through if possible
1170 // without changing the order of probabilities.
1171 for (CaseClusterIt I
= W
.LastCluster
; I
> W
.FirstCluster
;) {
1173 if (I
->Prob
> W
.LastCluster
->Prob
)
1175 if (I
->Kind
== CC_Range
&& I
->MBB
== NextMBB
) {
1176 std::swap(*I
, *W
.LastCluster
);
1182 // Compute total probability.
1183 BranchProbability DefaultProb
= W
.DefaultProb
;
1184 BranchProbability UnhandledProbs
= DefaultProb
;
1185 for (CaseClusterIt I
= W
.FirstCluster
; I
<= W
.LastCluster
; ++I
)
1186 UnhandledProbs
+= I
->Prob
;
1188 MachineBasicBlock
*CurMBB
= W
.MBB
;
1189 for (CaseClusterIt I
= W
.FirstCluster
, E
= W
.LastCluster
; I
<= E
; ++I
) {
1190 bool FallthroughUnreachable
= false;
1191 MachineBasicBlock
*Fallthrough
;
1192 if (I
== W
.LastCluster
) {
1193 // For the last cluster, fall through to the default destination.
1194 Fallthrough
= DefaultMBB
;
1195 FallthroughUnreachable
= isa
<UnreachableInst
>(
1196 DefaultMBB
->getBasicBlock()->getFirstNonPHIOrDbg());
1198 Fallthrough
= CurMF
->CreateMachineBasicBlock(CurMBB
->getBasicBlock());
1199 CurMF
->insert(BBI
, Fallthrough
);
1201 UnhandledProbs
-= I
->Prob
;
1205 if (!lowerBitTestWorkItem(W
, SwitchMBB
, CurMBB
, DefaultMBB
, MIB
, BBI
,
1206 DefaultProb
, UnhandledProbs
, I
, Fallthrough
,
1207 FallthroughUnreachable
)) {
1208 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1214 case CC_JumpTable
: {
1215 if (!lowerJumpTableWorkItem(W
, SwitchMBB
, CurMBB
, DefaultMBB
, MIB
, BBI
,
1216 UnhandledProbs
, I
, Fallthrough
,
1217 FallthroughUnreachable
)) {
1218 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1224 if (!lowerSwitchRangeWorkItem(I
, Cond
, Fallthrough
,
1225 FallthroughUnreachable
, UnhandledProbs
,
1226 CurMBB
, MIB
, SwitchMBB
)) {
1227 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1233 CurMBB
= Fallthrough
;
1239 bool IRTranslator::translateIndirectBr(const User
&U
,
1240 MachineIRBuilder
&MIRBuilder
) {
1241 const IndirectBrInst
&BrInst
= cast
<IndirectBrInst
>(U
);
1243 const Register Tgt
= getOrCreateVReg(*BrInst
.getAddress());
1244 MIRBuilder
.buildBrIndirect(Tgt
);
1247 SmallPtrSet
<const BasicBlock
*, 32> AddedSuccessors
;
1248 MachineBasicBlock
&CurBB
= MIRBuilder
.getMBB();
1249 for (const BasicBlock
*Succ
: successors(&BrInst
)) {
1250 // It's legal for indirectbr instructions to have duplicate blocks in the
1251 // destination list. We don't allow this in MIR. Skip anything that's
1252 // already a successor.
1253 if (!AddedSuccessors
.insert(Succ
).second
)
1255 CurBB
.addSuccessor(&getMBB(*Succ
));
1261 static bool isSwiftError(const Value
*V
) {
1262 if (auto Arg
= dyn_cast
<Argument
>(V
))
1263 return Arg
->hasSwiftErrorAttr();
1264 if (auto AI
= dyn_cast
<AllocaInst
>(V
))
1265 return AI
->isSwiftError();
1269 bool IRTranslator::translateLoad(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
1270 const LoadInst
&LI
= cast
<LoadInst
>(U
);
1271 if (DL
->getTypeStoreSize(LI
.getType()) == 0)
1274 ArrayRef
<Register
> Regs
= getOrCreateVRegs(LI
);
1275 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(LI
);
1276 Register Base
= getOrCreateVReg(*LI
.getPointerOperand());
1278 Type
*OffsetIRTy
= DL
->getIntPtrType(LI
.getPointerOperandType());
1279 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1281 if (CLI
->supportSwiftError() && isSwiftError(LI
.getPointerOperand())) {
1282 assert(Regs
.size() == 1 && "swifterror should be single pointer");
1283 Register VReg
= SwiftError
.getOrCreateVRegUseAt(&LI
, &MIRBuilder
.getMBB(),
1284 LI
.getPointerOperand());
1285 MIRBuilder
.buildCopy(Regs
[0], VReg
);
1289 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1290 MachineMemOperand::Flags Flags
= TLI
.getLoadMemOperandFlags(LI
, *DL
);
1292 const MDNode
*Ranges
=
1293 Regs
.size() == 1 ? LI
.getMetadata(LLVMContext::MD_range
) : nullptr;
1294 for (unsigned i
= 0; i
< Regs
.size(); ++i
) {
1296 MIRBuilder
.materializePtrAdd(Addr
, Base
, OffsetTy
, Offsets
[i
] / 8);
1298 MachinePointerInfo
Ptr(LI
.getPointerOperand(), Offsets
[i
] / 8);
1299 Align BaseAlign
= getMemOpAlign(LI
);
1300 AAMDNodes AAMetadata
;
1301 LI
.getAAMetadata(AAMetadata
);
1302 auto MMO
= MF
->getMachineMemOperand(
1303 Ptr
, Flags
, MRI
->getType(Regs
[i
]),
1304 commonAlignment(BaseAlign
, Offsets
[i
] / 8), AAMetadata
, Ranges
,
1305 LI
.getSyncScopeID(), LI
.getOrdering());
1306 MIRBuilder
.buildLoad(Regs
[i
], Addr
, *MMO
);
1312 bool IRTranslator::translateStore(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
1313 const StoreInst
&SI
= cast
<StoreInst
>(U
);
1314 if (DL
->getTypeStoreSize(SI
.getValueOperand()->getType()) == 0)
1317 ArrayRef
<Register
> Vals
= getOrCreateVRegs(*SI
.getValueOperand());
1318 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(*SI
.getValueOperand());
1319 Register Base
= getOrCreateVReg(*SI
.getPointerOperand());
1321 Type
*OffsetIRTy
= DL
->getIntPtrType(SI
.getPointerOperandType());
1322 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1324 if (CLI
->supportSwiftError() && isSwiftError(SI
.getPointerOperand())) {
1325 assert(Vals
.size() == 1 && "swifterror should be single pointer");
1327 Register VReg
= SwiftError
.getOrCreateVRegDefAt(&SI
, &MIRBuilder
.getMBB(),
1328 SI
.getPointerOperand());
1329 MIRBuilder
.buildCopy(VReg
, Vals
[0]);
1333 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1334 MachineMemOperand::Flags Flags
= TLI
.getStoreMemOperandFlags(SI
, *DL
);
1336 for (unsigned i
= 0; i
< Vals
.size(); ++i
) {
1338 MIRBuilder
.materializePtrAdd(Addr
, Base
, OffsetTy
, Offsets
[i
] / 8);
1340 MachinePointerInfo
Ptr(SI
.getPointerOperand(), Offsets
[i
] / 8);
1341 Align BaseAlign
= getMemOpAlign(SI
);
1342 AAMDNodes AAMetadata
;
1343 SI
.getAAMetadata(AAMetadata
);
1344 auto MMO
= MF
->getMachineMemOperand(
1345 Ptr
, Flags
, MRI
->getType(Vals
[i
]),
1346 commonAlignment(BaseAlign
, Offsets
[i
] / 8), AAMetadata
, nullptr,
1347 SI
.getSyncScopeID(), SI
.getOrdering());
1348 MIRBuilder
.buildStore(Vals
[i
], Addr
, *MMO
);
1353 static uint64_t getOffsetFromIndices(const User
&U
, const DataLayout
&DL
) {
1354 const Value
*Src
= U
.getOperand(0);
1355 Type
*Int32Ty
= Type::getInt32Ty(U
.getContext());
1357 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1358 // usual array element rather than looking into the actual aggregate.
1359 SmallVector
<Value
*, 1> Indices
;
1360 Indices
.push_back(ConstantInt::get(Int32Ty
, 0));
1362 if (const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(&U
)) {
1363 for (auto Idx
: EVI
->indices())
1364 Indices
.push_back(ConstantInt::get(Int32Ty
, Idx
));
1365 } else if (const InsertValueInst
*IVI
= dyn_cast
<InsertValueInst
>(&U
)) {
1366 for (auto Idx
: IVI
->indices())
1367 Indices
.push_back(ConstantInt::get(Int32Ty
, Idx
));
1369 for (unsigned i
= 1; i
< U
.getNumOperands(); ++i
)
1370 Indices
.push_back(U
.getOperand(i
));
1373 return 8 * static_cast<uint64_t>(
1374 DL
.getIndexedOffsetInType(Src
->getType(), Indices
));
1377 bool IRTranslator::translateExtractValue(const User
&U
,
1378 MachineIRBuilder
&MIRBuilder
) {
1379 const Value
*Src
= U
.getOperand(0);
1380 uint64_t Offset
= getOffsetFromIndices(U
, *DL
);
1381 ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*Src
);
1382 ArrayRef
<uint64_t> Offsets
= *VMap
.getOffsets(*Src
);
1383 unsigned Idx
= llvm::lower_bound(Offsets
, Offset
) - Offsets
.begin();
1384 auto &DstRegs
= allocateVRegs(U
);
1386 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
)
1387 DstRegs
[i
] = SrcRegs
[Idx
++];
1392 bool IRTranslator::translateInsertValue(const User
&U
,
1393 MachineIRBuilder
&MIRBuilder
) {
1394 const Value
*Src
= U
.getOperand(0);
1395 uint64_t Offset
= getOffsetFromIndices(U
, *DL
);
1396 auto &DstRegs
= allocateVRegs(U
);
1397 ArrayRef
<uint64_t> DstOffsets
= *VMap
.getOffsets(U
);
1398 ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*Src
);
1399 ArrayRef
<Register
> InsertedRegs
= getOrCreateVRegs(*U
.getOperand(1));
1400 auto InsertedIt
= InsertedRegs
.begin();
1402 for (unsigned i
= 0; i
< DstRegs
.size(); ++i
) {
1403 if (DstOffsets
[i
] >= Offset
&& InsertedIt
!= InsertedRegs
.end())
1404 DstRegs
[i
] = *InsertedIt
++;
1406 DstRegs
[i
] = SrcRegs
[i
];
1412 bool IRTranslator::translateSelect(const User
&U
,
1413 MachineIRBuilder
&MIRBuilder
) {
1414 Register Tst
= getOrCreateVReg(*U
.getOperand(0));
1415 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(U
);
1416 ArrayRef
<Register
> Op0Regs
= getOrCreateVRegs(*U
.getOperand(1));
1417 ArrayRef
<Register
> Op1Regs
= getOrCreateVRegs(*U
.getOperand(2));
1420 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(&U
))
1421 Flags
= MachineInstr::copyFlagsFromInstruction(*SI
);
1423 for (unsigned i
= 0; i
< ResRegs
.size(); ++i
) {
1424 MIRBuilder
.buildSelect(ResRegs
[i
], Tst
, Op0Regs
[i
], Op1Regs
[i
], Flags
);
1430 bool IRTranslator::translateCopy(const User
&U
, const Value
&V
,
1431 MachineIRBuilder
&MIRBuilder
) {
1432 Register Src
= getOrCreateVReg(V
);
1433 auto &Regs
= *VMap
.getVRegs(U
);
1435 Regs
.push_back(Src
);
1436 VMap
.getOffsets(U
)->push_back(0);
1438 // If we already assigned a vreg for this instruction, we can't change that.
1439 // Emit a copy to satisfy the users we already emitted.
1440 MIRBuilder
.buildCopy(Regs
[0], Src
);
1445 bool IRTranslator::translateBitCast(const User
&U
,
1446 MachineIRBuilder
&MIRBuilder
) {
1447 // If we're bitcasting to the source type, we can reuse the source vreg.
1448 if (getLLTForType(*U
.getOperand(0)->getType(), *DL
) ==
1449 getLLTForType(*U
.getType(), *DL
))
1450 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
1452 return translateCast(TargetOpcode::G_BITCAST
, U
, MIRBuilder
);
1455 bool IRTranslator::translateCast(unsigned Opcode
, const User
&U
,
1456 MachineIRBuilder
&MIRBuilder
) {
1457 Register Op
= getOrCreateVReg(*U
.getOperand(0));
1458 Register Res
= getOrCreateVReg(U
);
1459 MIRBuilder
.buildInstr(Opcode
, {Res
}, {Op
});
1463 bool IRTranslator::translateGetElementPtr(const User
&U
,
1464 MachineIRBuilder
&MIRBuilder
) {
1465 Value
&Op0
= *U
.getOperand(0);
1466 Register BaseReg
= getOrCreateVReg(Op0
);
1467 Type
*PtrIRTy
= Op0
.getType();
1468 LLT PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1469 Type
*OffsetIRTy
= DL
->getIntPtrType(PtrIRTy
);
1470 LLT OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1472 // Normalize Vector GEP - all scalar operands should be converted to the
1474 unsigned VectorWidth
= 0;
1476 // True if we should use a splat vector; using VectorWidth alone is not
1478 bool WantSplatVector
= false;
1479 if (auto *VT
= dyn_cast
<VectorType
>(U
.getType())) {
1480 VectorWidth
= cast
<FixedVectorType
>(VT
)->getNumElements();
1481 // We don't produce 1 x N vectors; those are treated as scalars.
1482 WantSplatVector
= VectorWidth
> 1;
1485 // We might need to splat the base pointer into a vector if the offsets
1487 if (WantSplatVector
&& !PtrTy
.isVector()) {
1490 .buildSplatVector(LLT::fixed_vector(VectorWidth
, PtrTy
), BaseReg
)
1492 PtrIRTy
= FixedVectorType::get(PtrIRTy
, VectorWidth
);
1493 PtrTy
= getLLTForType(*PtrIRTy
, *DL
);
1494 OffsetIRTy
= DL
->getIntPtrType(PtrIRTy
);
1495 OffsetTy
= getLLTForType(*OffsetIRTy
, *DL
);
1499 for (gep_type_iterator GTI
= gep_type_begin(&U
), E
= gep_type_end(&U
);
1501 const Value
*Idx
= GTI
.getOperand();
1502 if (StructType
*StTy
= GTI
.getStructTypeOrNull()) {
1503 unsigned Field
= cast
<Constant
>(Idx
)->getUniqueInteger().getZExtValue();
1504 Offset
+= DL
->getStructLayout(StTy
)->getElementOffset(Field
);
1507 uint64_t ElementSize
= DL
->getTypeAllocSize(GTI
.getIndexedType());
1509 // If this is a scalar constant or a splat vector of constants,
1510 // handle it quickly.
1511 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
1512 Offset
+= ElementSize
* CI
->getSExtValue();
1517 auto OffsetMIB
= MIRBuilder
.buildConstant({OffsetTy
}, Offset
);
1518 BaseReg
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseReg
, OffsetMIB
.getReg(0))
1523 Register IdxReg
= getOrCreateVReg(*Idx
);
1524 LLT IdxTy
= MRI
->getType(IdxReg
);
1525 if (IdxTy
!= OffsetTy
) {
1526 if (!IdxTy
.isVector() && WantSplatVector
) {
1527 IdxReg
= MIRBuilder
.buildSplatVector(
1528 OffsetTy
.changeElementType(IdxTy
), IdxReg
).getReg(0);
1531 IdxReg
= MIRBuilder
.buildSExtOrTrunc(OffsetTy
, IdxReg
).getReg(0);
1534 // N = N + Idx * ElementSize;
1535 // Avoid doing it for ElementSize of 1.
1536 Register GepOffsetReg
;
1537 if (ElementSize
!= 1) {
1538 auto ElementSizeMIB
= MIRBuilder
.buildConstant(
1539 getLLTForType(*OffsetIRTy
, *DL
), ElementSize
);
1541 MIRBuilder
.buildMul(OffsetTy
, IdxReg
, ElementSizeMIB
).getReg(0);
1543 GepOffsetReg
= IdxReg
;
1545 BaseReg
= MIRBuilder
.buildPtrAdd(PtrTy
, BaseReg
, GepOffsetReg
).getReg(0);
1551 MIRBuilder
.buildConstant(OffsetTy
, Offset
);
1552 MIRBuilder
.buildPtrAdd(getOrCreateVReg(U
), BaseReg
, OffsetMIB
.getReg(0));
1556 MIRBuilder
.buildCopy(getOrCreateVReg(U
), BaseReg
);
1560 bool IRTranslator::translateMemFunc(const CallInst
&CI
,
1561 MachineIRBuilder
&MIRBuilder
,
1564 // If the source is undef, then just emit a nop.
1565 if (isa
<UndefValue
>(CI
.getArgOperand(1)))
1568 SmallVector
<Register
, 3> SrcRegs
;
1570 unsigned MinPtrSize
= UINT_MAX
;
1571 for (auto AI
= CI
.arg_begin(), AE
= CI
.arg_end(); std::next(AI
) != AE
; ++AI
) {
1572 Register SrcReg
= getOrCreateVReg(**AI
);
1573 LLT SrcTy
= MRI
->getType(SrcReg
);
1574 if (SrcTy
.isPointer())
1575 MinPtrSize
= std::min
<unsigned>(SrcTy
.getSizeInBits(), MinPtrSize
);
1576 SrcRegs
.push_back(SrcReg
);
1579 LLT SizeTy
= LLT::scalar(MinPtrSize
);
1581 // The size operand should be the minimum of the pointer sizes.
1582 Register
&SizeOpReg
= SrcRegs
[SrcRegs
.size() - 1];
1583 if (MRI
->getType(SizeOpReg
) != SizeTy
)
1584 SizeOpReg
= MIRBuilder
.buildZExtOrTrunc(SizeTy
, SizeOpReg
).getReg(0);
1586 auto ICall
= MIRBuilder
.buildInstr(Opcode
);
1587 for (Register SrcReg
: SrcRegs
)
1588 ICall
.addUse(SrcReg
);
1593 cast
<ConstantInt
>(CI
.getArgOperand(CI
.getNumArgOperands() - 1))
1596 if (auto *MCI
= dyn_cast
<MemCpyInst
>(&CI
)) {
1597 DstAlign
= MCI
->getDestAlign().valueOrOne();
1598 SrcAlign
= MCI
->getSourceAlign().valueOrOne();
1599 } else if (auto *MCI
= dyn_cast
<MemCpyInlineInst
>(&CI
)) {
1600 DstAlign
= MCI
->getDestAlign().valueOrOne();
1601 SrcAlign
= MCI
->getSourceAlign().valueOrOne();
1602 } else if (auto *MMI
= dyn_cast
<MemMoveInst
>(&CI
)) {
1603 DstAlign
= MMI
->getDestAlign().valueOrOne();
1604 SrcAlign
= MMI
->getSourceAlign().valueOrOne();
1606 auto *MSI
= cast
<MemSetInst
>(&CI
);
1607 DstAlign
= MSI
->getDestAlign().valueOrOne();
1610 if (Opcode
!= TargetOpcode::G_MEMCPY_INLINE
) {
1611 // We need to propagate the tail call flag from the IR inst as an argument.
1612 // Otherwise, we have to pessimize and assume later that we cannot tail call
1613 // any memory intrinsics.
1614 ICall
.addImm(CI
.isTailCall() ? 1 : 0);
1617 // Create mem operands to store the alignment and volatile info.
1618 auto VolFlag
= IsVol
? MachineMemOperand::MOVolatile
: MachineMemOperand::MONone
;
1619 ICall
.addMemOperand(MF
->getMachineMemOperand(
1620 MachinePointerInfo(CI
.getArgOperand(0)),
1621 MachineMemOperand::MOStore
| VolFlag
, 1, DstAlign
));
1622 if (Opcode
!= TargetOpcode::G_MEMSET
)
1623 ICall
.addMemOperand(MF
->getMachineMemOperand(
1624 MachinePointerInfo(CI
.getArgOperand(1)),
1625 MachineMemOperand::MOLoad
| VolFlag
, 1, SrcAlign
));
1630 void IRTranslator::getStackGuard(Register DstReg
,
1631 MachineIRBuilder
&MIRBuilder
) {
1632 const TargetRegisterInfo
*TRI
= MF
->getSubtarget().getRegisterInfo();
1633 MRI
->setRegClass(DstReg
, TRI
->getPointerRegClass(*MF
));
1635 MIRBuilder
.buildInstr(TargetOpcode::LOAD_STACK_GUARD
, {DstReg
}, {});
1637 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1638 Value
*Global
= TLI
.getSDagStackGuard(*MF
->getFunction().getParent());
1642 unsigned AddrSpace
= Global
->getType()->getPointerAddressSpace();
1643 LLT PtrTy
= LLT::pointer(AddrSpace
, DL
->getPointerSizeInBits(AddrSpace
));
1645 MachinePointerInfo
MPInfo(Global
);
1646 auto Flags
= MachineMemOperand::MOLoad
| MachineMemOperand::MOInvariant
|
1647 MachineMemOperand::MODereferenceable
;
1648 MachineMemOperand
*MemRef
= MF
->getMachineMemOperand(
1649 MPInfo
, Flags
, PtrTy
, DL
->getPointerABIAlignment(AddrSpace
));
1650 MIB
.setMemRefs({MemRef
});
1653 bool IRTranslator::translateOverflowIntrinsic(const CallInst
&CI
, unsigned Op
,
1654 MachineIRBuilder
&MIRBuilder
) {
1655 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(CI
);
1656 MIRBuilder
.buildInstr(
1657 Op
, {ResRegs
[0], ResRegs
[1]},
1658 {getOrCreateVReg(*CI
.getOperand(0)), getOrCreateVReg(*CI
.getOperand(1))});
1663 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op
, const CallInst
&CI
,
1664 MachineIRBuilder
&MIRBuilder
) {
1665 Register Dst
= getOrCreateVReg(CI
);
1666 Register Src0
= getOrCreateVReg(*CI
.getOperand(0));
1667 Register Src1
= getOrCreateVReg(*CI
.getOperand(1));
1668 uint64_t Scale
= cast
<ConstantInt
>(CI
.getOperand(2))->getZExtValue();
1669 MIRBuilder
.buildInstr(Op
, {Dst
}, { Src0
, Src1
, Scale
});
1673 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID
) {
1677 case Intrinsic::bswap
:
1678 return TargetOpcode::G_BSWAP
;
1679 case Intrinsic::bitreverse
:
1680 return TargetOpcode::G_BITREVERSE
;
1681 case Intrinsic::fshl
:
1682 return TargetOpcode::G_FSHL
;
1683 case Intrinsic::fshr
:
1684 return TargetOpcode::G_FSHR
;
1685 case Intrinsic::ceil
:
1686 return TargetOpcode::G_FCEIL
;
1687 case Intrinsic::cos
:
1688 return TargetOpcode::G_FCOS
;
1689 case Intrinsic::ctpop
:
1690 return TargetOpcode::G_CTPOP
;
1691 case Intrinsic::exp
:
1692 return TargetOpcode::G_FEXP
;
1693 case Intrinsic::exp2
:
1694 return TargetOpcode::G_FEXP2
;
1695 case Intrinsic::fabs
:
1696 return TargetOpcode::G_FABS
;
1697 case Intrinsic::copysign
:
1698 return TargetOpcode::G_FCOPYSIGN
;
1699 case Intrinsic::minnum
:
1700 return TargetOpcode::G_FMINNUM
;
1701 case Intrinsic::maxnum
:
1702 return TargetOpcode::G_FMAXNUM
;
1703 case Intrinsic::minimum
:
1704 return TargetOpcode::G_FMINIMUM
;
1705 case Intrinsic::maximum
:
1706 return TargetOpcode::G_FMAXIMUM
;
1707 case Intrinsic::canonicalize
:
1708 return TargetOpcode::G_FCANONICALIZE
;
1709 case Intrinsic::floor
:
1710 return TargetOpcode::G_FFLOOR
;
1711 case Intrinsic::fma
:
1712 return TargetOpcode::G_FMA
;
1713 case Intrinsic::log
:
1714 return TargetOpcode::G_FLOG
;
1715 case Intrinsic::log2
:
1716 return TargetOpcode::G_FLOG2
;
1717 case Intrinsic::log10
:
1718 return TargetOpcode::G_FLOG10
;
1719 case Intrinsic::nearbyint
:
1720 return TargetOpcode::G_FNEARBYINT
;
1721 case Intrinsic::pow
:
1722 return TargetOpcode::G_FPOW
;
1723 case Intrinsic::powi
:
1724 return TargetOpcode::G_FPOWI
;
1725 case Intrinsic::rint
:
1726 return TargetOpcode::G_FRINT
;
1727 case Intrinsic::round
:
1728 return TargetOpcode::G_INTRINSIC_ROUND
;
1729 case Intrinsic::roundeven
:
1730 return TargetOpcode::G_INTRINSIC_ROUNDEVEN
;
1731 case Intrinsic::sin
:
1732 return TargetOpcode::G_FSIN
;
1733 case Intrinsic::sqrt
:
1734 return TargetOpcode::G_FSQRT
;
1735 case Intrinsic::trunc
:
1736 return TargetOpcode::G_INTRINSIC_TRUNC
;
1737 case Intrinsic::readcyclecounter
:
1738 return TargetOpcode::G_READCYCLECOUNTER
;
1739 case Intrinsic::ptrmask
:
1740 return TargetOpcode::G_PTRMASK
;
1741 case Intrinsic::lrint
:
1742 return TargetOpcode::G_INTRINSIC_LRINT
;
1743 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1744 case Intrinsic::vector_reduce_fmin
:
1745 return TargetOpcode::G_VECREDUCE_FMIN
;
1746 case Intrinsic::vector_reduce_fmax
:
1747 return TargetOpcode::G_VECREDUCE_FMAX
;
1748 case Intrinsic::vector_reduce_add
:
1749 return TargetOpcode::G_VECREDUCE_ADD
;
1750 case Intrinsic::vector_reduce_mul
:
1751 return TargetOpcode::G_VECREDUCE_MUL
;
1752 case Intrinsic::vector_reduce_and
:
1753 return TargetOpcode::G_VECREDUCE_AND
;
1754 case Intrinsic::vector_reduce_or
:
1755 return TargetOpcode::G_VECREDUCE_OR
;
1756 case Intrinsic::vector_reduce_xor
:
1757 return TargetOpcode::G_VECREDUCE_XOR
;
1758 case Intrinsic::vector_reduce_smax
:
1759 return TargetOpcode::G_VECREDUCE_SMAX
;
1760 case Intrinsic::vector_reduce_smin
:
1761 return TargetOpcode::G_VECREDUCE_SMIN
;
1762 case Intrinsic::vector_reduce_umax
:
1763 return TargetOpcode::G_VECREDUCE_UMAX
;
1764 case Intrinsic::vector_reduce_umin
:
1765 return TargetOpcode::G_VECREDUCE_UMIN
;
1766 case Intrinsic::lround
:
1767 return TargetOpcode::G_LROUND
;
1769 return Intrinsic::not_intrinsic
;
1772 bool IRTranslator::translateSimpleIntrinsic(const CallInst
&CI
,
1774 MachineIRBuilder
&MIRBuilder
) {
1776 unsigned Op
= getSimpleIntrinsicOpcode(ID
);
1778 // Is this a simple intrinsic?
1779 if (Op
== Intrinsic::not_intrinsic
)
1782 // Yes. Let's translate it.
1783 SmallVector
<llvm::SrcOp
, 4> VRegs
;
1784 for (auto &Arg
: CI
.arg_operands())
1785 VRegs
.push_back(getOrCreateVReg(*Arg
));
1787 MIRBuilder
.buildInstr(Op
, {getOrCreateVReg(CI
)}, VRegs
,
1788 MachineInstr::copyFlagsFromInstruction(CI
));
1792 // TODO: Include ConstainedOps.def when all strict instructions are defined.
1793 static unsigned getConstrainedOpcode(Intrinsic::ID ID
) {
1795 case Intrinsic::experimental_constrained_fadd
:
1796 return TargetOpcode::G_STRICT_FADD
;
1797 case Intrinsic::experimental_constrained_fsub
:
1798 return TargetOpcode::G_STRICT_FSUB
;
1799 case Intrinsic::experimental_constrained_fmul
:
1800 return TargetOpcode::G_STRICT_FMUL
;
1801 case Intrinsic::experimental_constrained_fdiv
:
1802 return TargetOpcode::G_STRICT_FDIV
;
1803 case Intrinsic::experimental_constrained_frem
:
1804 return TargetOpcode::G_STRICT_FREM
;
1805 case Intrinsic::experimental_constrained_fma
:
1806 return TargetOpcode::G_STRICT_FMA
;
1807 case Intrinsic::experimental_constrained_sqrt
:
1808 return TargetOpcode::G_STRICT_FSQRT
;
1814 bool IRTranslator::translateConstrainedFPIntrinsic(
1815 const ConstrainedFPIntrinsic
&FPI
, MachineIRBuilder
&MIRBuilder
) {
1816 fp::ExceptionBehavior EB
= FPI
.getExceptionBehavior().getValue();
1818 unsigned Opcode
= getConstrainedOpcode(FPI
.getIntrinsicID());
1822 unsigned Flags
= MachineInstr::copyFlagsFromInstruction(FPI
);
1823 if (EB
== fp::ExceptionBehavior::ebIgnore
)
1824 Flags
|= MachineInstr::NoFPExcept
;
1826 SmallVector
<llvm::SrcOp
, 4> VRegs
;
1827 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(0)));
1828 if (!FPI
.isUnaryOp())
1829 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(1)));
1830 if (FPI
.isTernaryOp())
1831 VRegs
.push_back(getOrCreateVReg(*FPI
.getArgOperand(2)));
1833 MIRBuilder
.buildInstr(Opcode
, {getOrCreateVReg(FPI
)}, VRegs
, Flags
);
1837 bool IRTranslator::translateKnownIntrinsic(const CallInst
&CI
, Intrinsic::ID ID
,
1838 MachineIRBuilder
&MIRBuilder
) {
1839 if (auto *MI
= dyn_cast
<AnyMemIntrinsic
>(&CI
)) {
1840 if (ORE
->enabled()) {
1841 const Function
&F
= *MI
->getParent()->getParent();
1842 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
1843 if (MemoryOpRemark::canHandle(MI
, TLI
)) {
1844 MemoryOpRemark
R(*ORE
, "gisel-irtranslator-memsize", *DL
, TLI
);
1850 // If this is a simple intrinsic (that is, we just need to add a def of
1851 // a vreg, and uses for each arg operand, then translate it.
1852 if (translateSimpleIntrinsic(CI
, ID
, MIRBuilder
))
1858 case Intrinsic::lifetime_start
:
1859 case Intrinsic::lifetime_end
: {
1860 // No stack colouring in O0, discard region information.
1861 if (MF
->getTarget().getOptLevel() == CodeGenOpt::None
)
1864 unsigned Op
= ID
== Intrinsic::lifetime_start
? TargetOpcode::LIFETIME_START
1865 : TargetOpcode::LIFETIME_END
;
1867 // Get the underlying objects for the location passed on the lifetime
1869 SmallVector
<const Value
*, 4> Allocas
;
1870 getUnderlyingObjects(CI
.getArgOperand(1), Allocas
);
1872 // Iterate over each underlying object, creating lifetime markers for each
1873 // static alloca. Quit if we find a non-static alloca.
1874 for (const Value
*V
: Allocas
) {
1875 const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
);
1879 if (!AI
->isStaticAlloca())
1882 MIRBuilder
.buildInstr(Op
).addFrameIndex(getOrCreateFrameIndex(*AI
));
1886 case Intrinsic::dbg_declare
: {
1887 const DbgDeclareInst
&DI
= cast
<DbgDeclareInst
>(CI
);
1888 assert(DI
.getVariable() && "Missing variable");
1890 const Value
*Address
= DI
.getAddress();
1891 if (!Address
|| isa
<UndefValue
>(Address
)) {
1892 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI
<< "\n");
1896 assert(DI
.getVariable()->isValidLocationForIntrinsic(
1897 MIRBuilder
.getDebugLoc()) &&
1898 "Expected inlined-at fields to agree");
1899 auto AI
= dyn_cast
<AllocaInst
>(Address
);
1900 if (AI
&& AI
->isStaticAlloca()) {
1901 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1902 // instructions (in fact, they get ignored if they *do* exist).
1903 MF
->setVariableDbgInfo(DI
.getVariable(), DI
.getExpression(),
1904 getOrCreateFrameIndex(*AI
), DI
.getDebugLoc());
1906 // A dbg.declare describes the address of a source variable, so lower it
1907 // into an indirect DBG_VALUE.
1908 MIRBuilder
.buildIndirectDbgValue(getOrCreateVReg(*Address
),
1909 DI
.getVariable(), DI
.getExpression());
1913 case Intrinsic::dbg_label
: {
1914 const DbgLabelInst
&DI
= cast
<DbgLabelInst
>(CI
);
1915 assert(DI
.getLabel() && "Missing label");
1917 assert(DI
.getLabel()->isValidLocationForIntrinsic(
1918 MIRBuilder
.getDebugLoc()) &&
1919 "Expected inlined-at fields to agree");
1921 MIRBuilder
.buildDbgLabel(DI
.getLabel());
1924 case Intrinsic::vaend
:
1925 // No target I know of cares about va_end. Certainly no in-tree target
1926 // does. Simplest intrinsic ever!
1928 case Intrinsic::vastart
: {
1929 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
1930 Value
*Ptr
= CI
.getArgOperand(0);
1931 unsigned ListSize
= TLI
.getVaListSizeInBits(*DL
) / 8;
1933 // FIXME: Get alignment
1934 MIRBuilder
.buildInstr(TargetOpcode::G_VASTART
, {}, {getOrCreateVReg(*Ptr
)})
1935 .addMemOperand(MF
->getMachineMemOperand(MachinePointerInfo(Ptr
),
1936 MachineMemOperand::MOStore
,
1937 ListSize
, Align(1)));
1940 case Intrinsic::dbg_value
: {
1941 // This form of DBG_VALUE is target-independent.
1942 const DbgValueInst
&DI
= cast
<DbgValueInst
>(CI
);
1943 const Value
*V
= DI
.getValue();
1944 assert(DI
.getVariable()->isValidLocationForIntrinsic(
1945 MIRBuilder
.getDebugLoc()) &&
1946 "Expected inlined-at fields to agree");
1947 if (!V
|| DI
.hasArgList()) {
1948 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1949 // terminate any prior location.
1950 MIRBuilder
.buildIndirectDbgValue(0, DI
.getVariable(), DI
.getExpression());
1951 } else if (const auto *CI
= dyn_cast
<Constant
>(V
)) {
1952 MIRBuilder
.buildConstDbgValue(*CI
, DI
.getVariable(), DI
.getExpression());
1954 for (Register Reg
: getOrCreateVRegs(*V
)) {
1955 // FIXME: This does not handle register-indirect values at offset 0. The
1956 // direct/indirect thing shouldn't really be handled by something as
1957 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1958 // pretty baked in right now.
1959 MIRBuilder
.buildDirectDbgValue(Reg
, DI
.getVariable(), DI
.getExpression());
1964 case Intrinsic::uadd_with_overflow
:
1965 return translateOverflowIntrinsic(CI
, TargetOpcode::G_UADDO
, MIRBuilder
);
1966 case Intrinsic::sadd_with_overflow
:
1967 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SADDO
, MIRBuilder
);
1968 case Intrinsic::usub_with_overflow
:
1969 return translateOverflowIntrinsic(CI
, TargetOpcode::G_USUBO
, MIRBuilder
);
1970 case Intrinsic::ssub_with_overflow
:
1971 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SSUBO
, MIRBuilder
);
1972 case Intrinsic::umul_with_overflow
:
1973 return translateOverflowIntrinsic(CI
, TargetOpcode::G_UMULO
, MIRBuilder
);
1974 case Intrinsic::smul_with_overflow
:
1975 return translateOverflowIntrinsic(CI
, TargetOpcode::G_SMULO
, MIRBuilder
);
1976 case Intrinsic::uadd_sat
:
1977 return translateBinaryOp(TargetOpcode::G_UADDSAT
, CI
, MIRBuilder
);
1978 case Intrinsic::sadd_sat
:
1979 return translateBinaryOp(TargetOpcode::G_SADDSAT
, CI
, MIRBuilder
);
1980 case Intrinsic::usub_sat
:
1981 return translateBinaryOp(TargetOpcode::G_USUBSAT
, CI
, MIRBuilder
);
1982 case Intrinsic::ssub_sat
:
1983 return translateBinaryOp(TargetOpcode::G_SSUBSAT
, CI
, MIRBuilder
);
1984 case Intrinsic::ushl_sat
:
1985 return translateBinaryOp(TargetOpcode::G_USHLSAT
, CI
, MIRBuilder
);
1986 case Intrinsic::sshl_sat
:
1987 return translateBinaryOp(TargetOpcode::G_SSHLSAT
, CI
, MIRBuilder
);
1988 case Intrinsic::umin
:
1989 return translateBinaryOp(TargetOpcode::G_UMIN
, CI
, MIRBuilder
);
1990 case Intrinsic::umax
:
1991 return translateBinaryOp(TargetOpcode::G_UMAX
, CI
, MIRBuilder
);
1992 case Intrinsic::smin
:
1993 return translateBinaryOp(TargetOpcode::G_SMIN
, CI
, MIRBuilder
);
1994 case Intrinsic::smax
:
1995 return translateBinaryOp(TargetOpcode::G_SMAX
, CI
, MIRBuilder
);
1996 case Intrinsic::abs
:
1997 // TODO: Preserve "int min is poison" arg in GMIR?
1998 return translateUnaryOp(TargetOpcode::G_ABS
, CI
, MIRBuilder
);
1999 case Intrinsic::smul_fix
:
2000 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX
, CI
, MIRBuilder
);
2001 case Intrinsic::umul_fix
:
2002 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX
, CI
, MIRBuilder
);
2003 case Intrinsic::smul_fix_sat
:
2004 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT
, CI
, MIRBuilder
);
2005 case Intrinsic::umul_fix_sat
:
2006 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT
, CI
, MIRBuilder
);
2007 case Intrinsic::sdiv_fix
:
2008 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX
, CI
, MIRBuilder
);
2009 case Intrinsic::udiv_fix
:
2010 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX
, CI
, MIRBuilder
);
2011 case Intrinsic::sdiv_fix_sat
:
2012 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT
, CI
, MIRBuilder
);
2013 case Intrinsic::udiv_fix_sat
:
2014 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT
, CI
, MIRBuilder
);
2015 case Intrinsic::fmuladd
: {
2016 const TargetMachine
&TM
= MF
->getTarget();
2017 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
2018 Register Dst
= getOrCreateVReg(CI
);
2019 Register Op0
= getOrCreateVReg(*CI
.getArgOperand(0));
2020 Register Op1
= getOrCreateVReg(*CI
.getArgOperand(1));
2021 Register Op2
= getOrCreateVReg(*CI
.getArgOperand(2));
2022 if (TM
.Options
.AllowFPOpFusion
!= FPOpFusion::Strict
&&
2023 TLI
.isFMAFasterThanFMulAndFAdd(*MF
,
2024 TLI
.getValueType(*DL
, CI
.getType()))) {
2025 // TODO: Revisit this to see if we should move this part of the
2026 // lowering to the combiner.
2027 MIRBuilder
.buildFMA(Dst
, Op0
, Op1
, Op2
,
2028 MachineInstr::copyFlagsFromInstruction(CI
));
2030 LLT Ty
= getLLTForType(*CI
.getType(), *DL
);
2031 auto FMul
= MIRBuilder
.buildFMul(
2032 Ty
, Op0
, Op1
, MachineInstr::copyFlagsFromInstruction(CI
));
2033 MIRBuilder
.buildFAdd(Dst
, FMul
, Op2
,
2034 MachineInstr::copyFlagsFromInstruction(CI
));
2038 case Intrinsic::convert_from_fp16
:
2039 // FIXME: This intrinsic should probably be removed from the IR.
2040 MIRBuilder
.buildFPExt(getOrCreateVReg(CI
),
2041 getOrCreateVReg(*CI
.getArgOperand(0)),
2042 MachineInstr::copyFlagsFromInstruction(CI
));
2044 case Intrinsic::convert_to_fp16
:
2045 // FIXME: This intrinsic should probably be removed from the IR.
2046 MIRBuilder
.buildFPTrunc(getOrCreateVReg(CI
),
2047 getOrCreateVReg(*CI
.getArgOperand(0)),
2048 MachineInstr::copyFlagsFromInstruction(CI
));
2050 case Intrinsic::memcpy_inline
:
2051 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMCPY_INLINE
);
2052 case Intrinsic::memcpy
:
2053 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMCPY
);
2054 case Intrinsic::memmove
:
2055 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMMOVE
);
2056 case Intrinsic::memset
:
2057 return translateMemFunc(CI
, MIRBuilder
, TargetOpcode::G_MEMSET
);
2058 case Intrinsic::eh_typeid_for
: {
2059 GlobalValue
*GV
= ExtractTypeInfo(CI
.getArgOperand(0));
2060 Register Reg
= getOrCreateVReg(CI
);
2061 unsigned TypeID
= MF
->getTypeIDFor(GV
);
2062 MIRBuilder
.buildConstant(Reg
, TypeID
);
2065 case Intrinsic::objectsize
:
2066 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2068 case Intrinsic::is_constant
:
2069 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2071 case Intrinsic::stackguard
:
2072 getStackGuard(getOrCreateVReg(CI
), MIRBuilder
);
2074 case Intrinsic::stackprotector
: {
2075 LLT PtrTy
= getLLTForType(*CI
.getArgOperand(0)->getType(), *DL
);
2076 Register GuardVal
= MRI
->createGenericVirtualRegister(PtrTy
);
2077 getStackGuard(GuardVal
, MIRBuilder
);
2079 AllocaInst
*Slot
= cast
<AllocaInst
>(CI
.getArgOperand(1));
2080 int FI
= getOrCreateFrameIndex(*Slot
);
2081 MF
->getFrameInfo().setStackProtectorIndex(FI
);
2083 MIRBuilder
.buildStore(
2084 GuardVal
, getOrCreateVReg(*Slot
),
2085 *MF
->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF
, FI
),
2086 MachineMemOperand::MOStore
|
2087 MachineMemOperand::MOVolatile
,
2091 case Intrinsic::stacksave
: {
2092 // Save the stack pointer to the location provided by the intrinsic.
2093 Register Reg
= getOrCreateVReg(CI
);
2094 Register StackPtr
= MF
->getSubtarget()
2095 .getTargetLowering()
2096 ->getStackPointerRegisterToSaveRestore();
2098 // If the target doesn't specify a stack pointer, then fall back.
2102 MIRBuilder
.buildCopy(Reg
, StackPtr
);
2105 case Intrinsic::stackrestore
: {
2106 // Restore the stack pointer from the location provided by the intrinsic.
2107 Register Reg
= getOrCreateVReg(*CI
.getArgOperand(0));
2108 Register StackPtr
= MF
->getSubtarget()
2109 .getTargetLowering()
2110 ->getStackPointerRegisterToSaveRestore();
2112 // If the target doesn't specify a stack pointer, then fall back.
2116 MIRBuilder
.buildCopy(StackPtr
, Reg
);
2119 case Intrinsic::cttz
:
2120 case Intrinsic::ctlz
: {
2121 ConstantInt
*Cst
= cast
<ConstantInt
>(CI
.getArgOperand(1));
2122 bool isTrailing
= ID
== Intrinsic::cttz
;
2123 unsigned Opcode
= isTrailing
2124 ? Cst
->isZero() ? TargetOpcode::G_CTTZ
2125 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2126 : Cst
->isZero() ? TargetOpcode::G_CTLZ
2127 : TargetOpcode::G_CTLZ_ZERO_UNDEF
;
2128 MIRBuilder
.buildInstr(Opcode
, {getOrCreateVReg(CI
)},
2129 {getOrCreateVReg(*CI
.getArgOperand(0))});
2132 case Intrinsic::invariant_start
: {
2133 LLT PtrTy
= getLLTForType(*CI
.getArgOperand(0)->getType(), *DL
);
2134 Register Undef
= MRI
->createGenericVirtualRegister(PtrTy
);
2135 MIRBuilder
.buildUndef(Undef
);
2138 case Intrinsic::invariant_end
:
2140 case Intrinsic::expect
:
2141 case Intrinsic::annotation
:
2142 case Intrinsic::ptr_annotation
:
2143 case Intrinsic::launder_invariant_group
:
2144 case Intrinsic::strip_invariant_group
: {
2145 // Drop the intrinsic, but forward the value.
2146 MIRBuilder
.buildCopy(getOrCreateVReg(CI
),
2147 getOrCreateVReg(*CI
.getArgOperand(0)));
2150 case Intrinsic::assume
:
2151 case Intrinsic::experimental_noalias_scope_decl
:
2152 case Intrinsic::var_annotation
:
2153 case Intrinsic::sideeffect
:
2154 // Discard annotate attributes, assumptions, and artificial side-effects.
2156 case Intrinsic::read_volatile_register
:
2157 case Intrinsic::read_register
: {
2158 Value
*Arg
= CI
.getArgOperand(0);
2160 .buildInstr(TargetOpcode::G_READ_REGISTER
, {getOrCreateVReg(CI
)}, {})
2161 .addMetadata(cast
<MDNode
>(cast
<MetadataAsValue
>(Arg
)->getMetadata()));
2164 case Intrinsic::write_register
: {
2165 Value
*Arg
= CI
.getArgOperand(0);
2166 MIRBuilder
.buildInstr(TargetOpcode::G_WRITE_REGISTER
)
2167 .addMetadata(cast
<MDNode
>(cast
<MetadataAsValue
>(Arg
)->getMetadata()))
2168 .addUse(getOrCreateVReg(*CI
.getArgOperand(1)));
2171 case Intrinsic::localescape
: {
2172 MachineBasicBlock
&EntryMBB
= MF
->front();
2173 StringRef EscapedName
= GlobalValue::dropLLVMManglingEscape(MF
->getName());
2175 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2176 // is the same on all targets.
2177 for (unsigned Idx
= 0, E
= CI
.getNumArgOperands(); Idx
< E
; ++Idx
) {
2178 Value
*Arg
= CI
.getArgOperand(Idx
)->stripPointerCasts();
2179 if (isa
<ConstantPointerNull
>(Arg
))
2180 continue; // Skip null pointers. They represent a hole in index space.
2182 int FI
= getOrCreateFrameIndex(*cast
<AllocaInst
>(Arg
));
2183 MCSymbol
*FrameAllocSym
=
2184 MF
->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName
,
2187 // This should be inserted at the start of the entry block.
2189 MIRBuilder
.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE
)
2190 .addSym(FrameAllocSym
)
2193 EntryMBB
.insert(EntryMBB
.begin(), LocalEscape
);
2198 case Intrinsic::vector_reduce_fadd
:
2199 case Intrinsic::vector_reduce_fmul
: {
2200 // Need to check for the reassoc flag to decide whether we want a
2201 // sequential reduction opcode or not.
2202 Register Dst
= getOrCreateVReg(CI
);
2203 Register ScalarSrc
= getOrCreateVReg(*CI
.getArgOperand(0));
2204 Register VecSrc
= getOrCreateVReg(*CI
.getArgOperand(1));
2206 if (!CI
.hasAllowReassoc()) {
2207 // The sequential ordering case.
2208 Opc
= ID
== Intrinsic::vector_reduce_fadd
2209 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2210 : TargetOpcode::G_VECREDUCE_SEQ_FMUL
;
2211 MIRBuilder
.buildInstr(Opc
, {Dst
}, {ScalarSrc
, VecSrc
},
2212 MachineInstr::copyFlagsFromInstruction(CI
));
2215 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2216 // since the associativity doesn't matter.
2218 if (ID
== Intrinsic::vector_reduce_fadd
) {
2219 Opc
= TargetOpcode::G_VECREDUCE_FADD
;
2220 ScalarOpc
= TargetOpcode::G_FADD
;
2222 Opc
= TargetOpcode::G_VECREDUCE_FMUL
;
2223 ScalarOpc
= TargetOpcode::G_FMUL
;
2225 LLT DstTy
= MRI
->getType(Dst
);
2226 auto Rdx
= MIRBuilder
.buildInstr(
2227 Opc
, {DstTy
}, {VecSrc
}, MachineInstr::copyFlagsFromInstruction(CI
));
2228 MIRBuilder
.buildInstr(ScalarOpc
, {Dst
}, {ScalarSrc
, Rdx
},
2229 MachineInstr::copyFlagsFromInstruction(CI
));
2233 case Intrinsic::isnan
: {
2234 Register Src
= getOrCreateVReg(*CI
.getArgOperand(0));
2235 unsigned Flags
= MachineInstr::copyFlagsFromInstruction(CI
);
2236 if (!CI
.getFunction()->getAttributes().hasFnAttr(llvm::Attribute::StrictFP
))
2237 Flags
|= MachineInstr::NoFPExcept
;
2238 MIRBuilder
.buildIsNaN(getOrCreateVReg(CI
), Src
, Flags
);
2241 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2242 case Intrinsic::INTRINSIC:
2243 #include "llvm/IR/ConstrainedOps.def"
2244 return translateConstrainedFPIntrinsic(cast
<ConstrainedFPIntrinsic
>(CI
),
2251 bool IRTranslator::translateInlineAsm(const CallBase
&CB
,
2252 MachineIRBuilder
&MIRBuilder
) {
2254 const InlineAsmLowering
*ALI
= MF
->getSubtarget().getInlineAsmLowering();
2258 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2262 return ALI
->lowerInlineAsm(
2263 MIRBuilder
, CB
, [&](const Value
&Val
) { return getOrCreateVRegs(Val
); });
2266 bool IRTranslator::translateCallBase(const CallBase
&CB
,
2267 MachineIRBuilder
&MIRBuilder
) {
2268 ArrayRef
<Register
> Res
= getOrCreateVRegs(CB
);
2270 SmallVector
<ArrayRef
<Register
>, 8> Args
;
2271 Register SwiftInVReg
= 0;
2272 Register SwiftErrorVReg
= 0;
2273 for (auto &Arg
: CB
.args()) {
2274 if (CLI
->supportSwiftError() && isSwiftError(Arg
)) {
2275 assert(SwiftInVReg
== 0 && "Expected only one swift error argument");
2276 LLT Ty
= getLLTForType(*Arg
->getType(), *DL
);
2277 SwiftInVReg
= MRI
->createGenericVirtualRegister(Ty
);
2278 MIRBuilder
.buildCopy(SwiftInVReg
, SwiftError
.getOrCreateVRegUseAt(
2279 &CB
, &MIRBuilder
.getMBB(), Arg
));
2280 Args
.emplace_back(makeArrayRef(SwiftInVReg
));
2282 SwiftError
.getOrCreateVRegDefAt(&CB
, &MIRBuilder
.getMBB(), Arg
);
2285 Args
.push_back(getOrCreateVRegs(*Arg
));
2288 if (auto *CI
= dyn_cast
<CallInst
>(&CB
)) {
2289 if (ORE
->enabled()) {
2290 const Function
&F
= *CI
->getParent()->getParent();
2291 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
2292 if (MemoryOpRemark::canHandle(CI
, TLI
)) {
2293 MemoryOpRemark
R(*ORE
, "gisel-irtranslator-memsize", *DL
, TLI
);
2299 // We don't set HasCalls on MFI here yet because call lowering may decide to
2300 // optimize into tail calls. Instead, we defer that to selection where a final
2301 // scan is done to check if any instructions are calls.
2303 CLI
->lowerCall(MIRBuilder
, CB
, Res
, Args
, SwiftErrorVReg
,
2304 [&]() { return getOrCreateVReg(*CB
.getCalledOperand()); });
2306 // Check if we just inserted a tail call.
2308 assert(!HasTailCall
&& "Can't tail call return twice from block?");
2309 const TargetInstrInfo
*TII
= MF
->getSubtarget().getInstrInfo();
2310 HasTailCall
= TII
->isTailCall(*std::prev(MIRBuilder
.getInsertPt()));
2316 bool IRTranslator::translateCall(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2317 const CallInst
&CI
= cast
<CallInst
>(U
);
2318 auto TII
= MF
->getTarget().getIntrinsicInfo();
2319 const Function
*F
= CI
.getCalledFunction();
2321 // FIXME: support Windows dllimport function calls.
2322 if (F
&& (F
->hasDLLImportStorageClass() ||
2323 (MF
->getTarget().getTargetTriple().isOSWindows() &&
2324 F
->hasExternalWeakLinkage())))
2327 // FIXME: support control flow guard targets.
2328 if (CI
.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget
))
2331 if (CI
.isInlineAsm())
2332 return translateInlineAsm(CI
, MIRBuilder
);
2334 Intrinsic::ID ID
= Intrinsic::not_intrinsic
;
2335 if (F
&& F
->isIntrinsic()) {
2336 ID
= F
->getIntrinsicID();
2337 if (TII
&& ID
== Intrinsic::not_intrinsic
)
2338 ID
= static_cast<Intrinsic::ID
>(TII
->getIntrinsicID(F
));
2341 if (!F
|| !F
->isIntrinsic() || ID
== Intrinsic::not_intrinsic
)
2342 return translateCallBase(CI
, MIRBuilder
);
2344 assert(ID
!= Intrinsic::not_intrinsic
&& "unknown intrinsic");
2346 if (translateKnownIntrinsic(CI
, ID
, MIRBuilder
))
2349 ArrayRef
<Register
> ResultRegs
;
2350 if (!CI
.getType()->isVoidTy())
2351 ResultRegs
= getOrCreateVRegs(CI
);
2353 // Ignore the callsite attributes. Backend code is most likely not expecting
2354 // an intrinsic to sometimes have side effects and sometimes not.
2355 MachineInstrBuilder MIB
=
2356 MIRBuilder
.buildIntrinsic(ID
, ResultRegs
, !F
->doesNotAccessMemory());
2357 if (isa
<FPMathOperator
>(CI
))
2358 MIB
->copyIRFlags(CI
);
2360 for (auto &Arg
: enumerate(CI
.arg_operands())) {
2361 // If this is required to be an immediate, don't materialize it in a
2363 if (CI
.paramHasAttr(Arg
.index(), Attribute::ImmArg
)) {
2364 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(Arg
.value())) {
2365 // imm arguments are more convenient than cimm (and realistically
2366 // probably sufficient), so use them.
2367 assert(CI
->getBitWidth() <= 64 &&
2368 "large intrinsic immediates not handled");
2369 MIB
.addImm(CI
->getSExtValue());
2371 MIB
.addFPImm(cast
<ConstantFP
>(Arg
.value()));
2373 } else if (auto MD
= dyn_cast
<MetadataAsValue
>(Arg
.value())) {
2374 auto *MDN
= dyn_cast
<MDNode
>(MD
->getMetadata());
2375 if (!MDN
) // This was probably an MDString.
2377 MIB
.addMetadata(MDN
);
2379 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(*Arg
.value());
2380 if (VRegs
.size() > 1)
2382 MIB
.addUse(VRegs
[0]);
2386 // Add a MachineMemOperand if it is a target mem intrinsic.
2387 const TargetLowering
&TLI
= *MF
->getSubtarget().getTargetLowering();
2388 TargetLowering::IntrinsicInfo Info
;
2389 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2390 if (TLI
.getTgtMemIntrinsic(Info
, CI
, *MF
, ID
)) {
2391 Align Alignment
= Info
.align
.getValueOr(
2392 DL
->getABITypeAlign(Info
.memVT
.getTypeForEVT(F
->getContext())));
2393 LLT MemTy
= Info
.memVT
.isSimple()
2394 ? getLLTForMVT(Info
.memVT
.getSimpleVT())
2395 : LLT::scalar(Info
.memVT
.getStoreSizeInBits());
2396 MIB
.addMemOperand(MF
->getMachineMemOperand(MachinePointerInfo(Info
.ptrVal
),
2397 Info
.flags
, MemTy
, Alignment
));
2403 bool IRTranslator::findUnwindDestinations(
2404 const BasicBlock
*EHPadBB
,
2405 BranchProbability Prob
,
2406 SmallVectorImpl
<std::pair
<MachineBasicBlock
*, BranchProbability
>>
2408 EHPersonality Personality
= classifyEHPersonality(
2409 EHPadBB
->getParent()->getFunction().getPersonalityFn());
2410 bool IsMSVCCXX
= Personality
== EHPersonality::MSVC_CXX
;
2411 bool IsCoreCLR
= Personality
== EHPersonality::CoreCLR
;
2412 bool IsWasmCXX
= Personality
== EHPersonality::Wasm_CXX
;
2413 bool IsSEH
= isAsynchronousEHPersonality(Personality
);
2416 // Ignore this for now.
2421 const Instruction
*Pad
= EHPadBB
->getFirstNonPHI();
2422 BasicBlock
*NewEHPadBB
= nullptr;
2423 if (isa
<LandingPadInst
>(Pad
)) {
2424 // Stop on landingpads. They are not funclets.
2425 UnwindDests
.emplace_back(&getMBB(*EHPadBB
), Prob
);
2428 if (isa
<CleanupPadInst
>(Pad
)) {
2429 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2431 UnwindDests
.emplace_back(&getMBB(*EHPadBB
), Prob
);
2432 UnwindDests
.back().first
->setIsEHScopeEntry();
2433 UnwindDests
.back().first
->setIsEHFuncletEntry();
2436 if (auto *CatchSwitch
= dyn_cast
<CatchSwitchInst
>(Pad
)) {
2437 // Add the catchpad handlers to the possible destinations.
2438 for (const BasicBlock
*CatchPadBB
: CatchSwitch
->handlers()) {
2439 UnwindDests
.emplace_back(&getMBB(*CatchPadBB
), Prob
);
2440 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2441 if (IsMSVCCXX
|| IsCoreCLR
)
2442 UnwindDests
.back().first
->setIsEHFuncletEntry();
2444 UnwindDests
.back().first
->setIsEHScopeEntry();
2446 NewEHPadBB
= CatchSwitch
->getUnwindDest();
2451 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2452 if (BPI
&& NewEHPadBB
)
2453 Prob
*= BPI
->getEdgeProbability(EHPadBB
, NewEHPadBB
);
2454 EHPadBB
= NewEHPadBB
;
2459 bool IRTranslator::translateInvoke(const User
&U
,
2460 MachineIRBuilder
&MIRBuilder
) {
2461 const InvokeInst
&I
= cast
<InvokeInst
>(U
);
2462 MCContext
&Context
= MF
->getContext();
2464 const BasicBlock
*ReturnBB
= I
.getSuccessor(0);
2465 const BasicBlock
*EHPadBB
= I
.getSuccessor(1);
2467 const Function
*Fn
= I
.getCalledFunction();
2469 // FIXME: support invoking patchpoint and statepoint intrinsics.
2470 if (Fn
&& Fn
->isIntrinsic())
2473 // FIXME: support whatever these are.
2474 if (I
.countOperandBundlesOfType(LLVMContext::OB_deopt
))
2477 // FIXME: support control flow guard targets.
2478 if (I
.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget
))
2481 // FIXME: support Windows exception handling.
2482 if (!isa
<LandingPadInst
>(EHPadBB
->getFirstNonPHI()))
2485 bool LowerInlineAsm
= false;
2486 if (I
.isInlineAsm()) {
2487 const InlineAsm
*IA
= cast
<InlineAsm
>(I
.getCalledOperand());
2488 if (!IA
->canThrow()) {
2489 // Fast path without emitting EH_LABELs.
2491 if (!translateInlineAsm(I
, MIRBuilder
))
2494 MachineBasicBlock
*InvokeMBB
= &MIRBuilder
.getMBB(),
2495 *ReturnMBB
= &getMBB(*ReturnBB
);
2497 // Update successor info.
2498 addSuccessorWithProb(InvokeMBB
, ReturnMBB
, BranchProbability::getOne());
2500 MIRBuilder
.buildBr(*ReturnMBB
);
2503 LowerInlineAsm
= true;
2507 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2508 // the region covered by the try.
2509 MCSymbol
*BeginSymbol
= Context
.createTempSymbol();
2510 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
).addSym(BeginSymbol
);
2512 if (LowerInlineAsm
) {
2513 if (!translateInlineAsm(I
, MIRBuilder
))
2515 } else if (!translateCallBase(I
, MIRBuilder
))
2518 MCSymbol
*EndSymbol
= Context
.createTempSymbol();
2519 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
).addSym(EndSymbol
);
2521 SmallVector
<std::pair
<MachineBasicBlock
*, BranchProbability
>, 1> UnwindDests
;
2522 BranchProbabilityInfo
*BPI
= FuncInfo
.BPI
;
2523 MachineBasicBlock
*InvokeMBB
= &MIRBuilder
.getMBB();
2524 BranchProbability EHPadBBProb
=
2525 BPI
? BPI
->getEdgeProbability(InvokeMBB
->getBasicBlock(), EHPadBB
)
2526 : BranchProbability::getZero();
2528 if (!findUnwindDestinations(EHPadBB
, EHPadBBProb
, UnwindDests
))
2531 MachineBasicBlock
&EHPadMBB
= getMBB(*EHPadBB
),
2532 &ReturnMBB
= getMBB(*ReturnBB
);
2533 // Update successor info.
2534 addSuccessorWithProb(InvokeMBB
, &ReturnMBB
);
2535 for (auto &UnwindDest
: UnwindDests
) {
2536 UnwindDest
.first
->setIsEHPad();
2537 addSuccessorWithProb(InvokeMBB
, UnwindDest
.first
, UnwindDest
.second
);
2539 InvokeMBB
->normalizeSuccProbs();
2541 MF
->addInvoke(&EHPadMBB
, BeginSymbol
, EndSymbol
);
2542 MIRBuilder
.buildBr(ReturnMBB
);
2546 bool IRTranslator::translateCallBr(const User
&U
,
2547 MachineIRBuilder
&MIRBuilder
) {
2548 // FIXME: Implement this.
2552 bool IRTranslator::translateLandingPad(const User
&U
,
2553 MachineIRBuilder
&MIRBuilder
) {
2554 const LandingPadInst
&LP
= cast
<LandingPadInst
>(U
);
2556 MachineBasicBlock
&MBB
= MIRBuilder
.getMBB();
2560 // If there aren't registers to copy the values into (e.g., during SjLj
2561 // exceptions), then don't bother.
2562 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2563 const Constant
*PersonalityFn
= MF
->getFunction().getPersonalityFn();
2564 if (TLI
.getExceptionPointerRegister(PersonalityFn
) == 0 &&
2565 TLI
.getExceptionSelectorRegister(PersonalityFn
) == 0)
2568 // If landingpad's return type is token type, we don't create DAG nodes
2569 // for its exception pointer and selector value. The extraction of exception
2570 // pointer or selector value from token type landingpads is not currently
2572 if (LP
.getType()->isTokenTy())
2575 // Add a label to mark the beginning of the landing pad. Deletion of the
2576 // landing pad can thus be detected via the MachineModuleInfo.
2577 MIRBuilder
.buildInstr(TargetOpcode::EH_LABEL
)
2578 .addSym(MF
->addLandingPad(&MBB
));
2580 // If the unwinder does not preserve all registers, ensure that the
2581 // function marks the clobbered registers as used.
2582 const TargetRegisterInfo
&TRI
= *MF
->getSubtarget().getRegisterInfo();
2583 if (auto *RegMask
= TRI
.getCustomEHPadPreservedMask(*MF
))
2584 MF
->getRegInfo().addPhysRegsUsedFromRegMask(RegMask
);
2586 LLT Ty
= getLLTForType(*LP
.getType(), *DL
);
2587 Register Undef
= MRI
->createGenericVirtualRegister(Ty
);
2588 MIRBuilder
.buildUndef(Undef
);
2590 SmallVector
<LLT
, 2> Tys
;
2591 for (Type
*Ty
: cast
<StructType
>(LP
.getType())->elements())
2592 Tys
.push_back(getLLTForType(*Ty
, *DL
));
2593 assert(Tys
.size() == 2 && "Only two-valued landingpads are supported");
2595 // Mark exception register as live in.
2596 Register ExceptionReg
= TLI
.getExceptionPointerRegister(PersonalityFn
);
2600 MBB
.addLiveIn(ExceptionReg
);
2601 ArrayRef
<Register
> ResRegs
= getOrCreateVRegs(LP
);
2602 MIRBuilder
.buildCopy(ResRegs
[0], ExceptionReg
);
2604 Register SelectorReg
= TLI
.getExceptionSelectorRegister(PersonalityFn
);
2608 MBB
.addLiveIn(SelectorReg
);
2609 Register PtrVReg
= MRI
->createGenericVirtualRegister(Tys
[0]);
2610 MIRBuilder
.buildCopy(PtrVReg
, SelectorReg
);
2611 MIRBuilder
.buildCast(ResRegs
[1], PtrVReg
);
2616 bool IRTranslator::translateAlloca(const User
&U
,
2617 MachineIRBuilder
&MIRBuilder
) {
2618 auto &AI
= cast
<AllocaInst
>(U
);
2620 if (AI
.isSwiftError())
2623 if (AI
.isStaticAlloca()) {
2624 Register Res
= getOrCreateVReg(AI
);
2625 int FI
= getOrCreateFrameIndex(AI
);
2626 MIRBuilder
.buildFrameIndex(Res
, FI
);
2630 // FIXME: support stack probing for Windows.
2631 if (MF
->getTarget().getTargetTriple().isOSWindows())
2634 // Now we're in the harder dynamic case.
2635 Register NumElts
= getOrCreateVReg(*AI
.getArraySize());
2636 Type
*IntPtrIRTy
= DL
->getIntPtrType(AI
.getType());
2637 LLT IntPtrTy
= getLLTForType(*IntPtrIRTy
, *DL
);
2638 if (MRI
->getType(NumElts
) != IntPtrTy
) {
2639 Register ExtElts
= MRI
->createGenericVirtualRegister(IntPtrTy
);
2640 MIRBuilder
.buildZExtOrTrunc(ExtElts
, NumElts
);
2644 Type
*Ty
= AI
.getAllocatedType();
2646 Register AllocSize
= MRI
->createGenericVirtualRegister(IntPtrTy
);
2648 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy
, DL
->getTypeAllocSize(Ty
)));
2649 MIRBuilder
.buildMul(AllocSize
, NumElts
, TySize
);
2651 // Round the size of the allocation up to the stack alignment size
2652 // by add SA-1 to the size. This doesn't overflow because we're computing
2653 // an address inside an alloca.
2654 Align StackAlign
= MF
->getSubtarget().getFrameLowering()->getStackAlign();
2655 auto SAMinusOne
= MIRBuilder
.buildConstant(IntPtrTy
, StackAlign
.value() - 1);
2656 auto AllocAdd
= MIRBuilder
.buildAdd(IntPtrTy
, AllocSize
, SAMinusOne
,
2657 MachineInstr::NoUWrap
);
2659 MIRBuilder
.buildConstant(IntPtrTy
, ~(uint64_t)(StackAlign
.value() - 1));
2660 auto AlignedAlloc
= MIRBuilder
.buildAnd(IntPtrTy
, AllocAdd
, AlignCst
);
2662 Align Alignment
= std::max(AI
.getAlign(), DL
->getPrefTypeAlign(Ty
));
2663 if (Alignment
<= StackAlign
)
2664 Alignment
= Align(1);
2665 MIRBuilder
.buildDynStackAlloc(getOrCreateVReg(AI
), AlignedAlloc
, Alignment
);
2667 MF
->getFrameInfo().CreateVariableSizedObject(Alignment
, &AI
);
2668 assert(MF
->getFrameInfo().hasVarSizedObjects());
2672 bool IRTranslator::translateVAArg(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2673 // FIXME: We may need more info about the type. Because of how LLT works,
2674 // we're completely discarding the i64/double distinction here (amongst
2675 // others). Fortunately the ABIs I know of where that matters don't use va_arg
2676 // anyway but that's not guaranteed.
2677 MIRBuilder
.buildInstr(TargetOpcode::G_VAARG
, {getOrCreateVReg(U
)},
2678 {getOrCreateVReg(*U
.getOperand(0)),
2679 DL
->getABITypeAlign(U
.getType()).value()});
2683 bool IRTranslator::translateInsertElement(const User
&U
,
2684 MachineIRBuilder
&MIRBuilder
) {
2685 // If it is a <1 x Ty> vector, use the scalar as it is
2686 // not a legal vector type in LLT.
2687 if (cast
<FixedVectorType
>(U
.getType())->getNumElements() == 1)
2688 return translateCopy(U
, *U
.getOperand(1), MIRBuilder
);
2690 Register Res
= getOrCreateVReg(U
);
2691 Register Val
= getOrCreateVReg(*U
.getOperand(0));
2692 Register Elt
= getOrCreateVReg(*U
.getOperand(1));
2693 Register Idx
= getOrCreateVReg(*U
.getOperand(2));
2694 MIRBuilder
.buildInsertVectorElement(Res
, Val
, Elt
, Idx
);
2698 bool IRTranslator::translateExtractElement(const User
&U
,
2699 MachineIRBuilder
&MIRBuilder
) {
2700 // If it is a <1 x Ty> vector, use the scalar as it is
2701 // not a legal vector type in LLT.
2702 if (cast
<FixedVectorType
>(U
.getOperand(0)->getType())->getNumElements() == 1)
2703 return translateCopy(U
, *U
.getOperand(0), MIRBuilder
);
2705 Register Res
= getOrCreateVReg(U
);
2706 Register Val
= getOrCreateVReg(*U
.getOperand(0));
2707 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2708 unsigned PreferredVecIdxWidth
= TLI
.getVectorIdxTy(*DL
).getSizeInBits();
2710 if (auto *CI
= dyn_cast
<ConstantInt
>(U
.getOperand(1))) {
2711 if (CI
->getBitWidth() != PreferredVecIdxWidth
) {
2712 APInt NewIdx
= CI
->getValue().sextOrTrunc(PreferredVecIdxWidth
);
2713 auto *NewIdxCI
= ConstantInt::get(CI
->getContext(), NewIdx
);
2714 Idx
= getOrCreateVReg(*NewIdxCI
);
2718 Idx
= getOrCreateVReg(*U
.getOperand(1));
2719 if (MRI
->getType(Idx
).getSizeInBits() != PreferredVecIdxWidth
) {
2720 const LLT VecIdxTy
= LLT::scalar(PreferredVecIdxWidth
);
2721 Idx
= MIRBuilder
.buildSExtOrTrunc(VecIdxTy
, Idx
).getReg(0);
2723 MIRBuilder
.buildExtractVectorElement(Res
, Val
, Idx
);
2727 bool IRTranslator::translateShuffleVector(const User
&U
,
2728 MachineIRBuilder
&MIRBuilder
) {
2730 if (auto *SVI
= dyn_cast
<ShuffleVectorInst
>(&U
))
2731 Mask
= SVI
->getShuffleMask();
2733 Mask
= cast
<ConstantExpr
>(U
).getShuffleMask();
2734 ArrayRef
<int> MaskAlloc
= MF
->allocateShuffleMask(Mask
);
2736 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR
, {getOrCreateVReg(U
)},
2737 {getOrCreateVReg(*U
.getOperand(0)),
2738 getOrCreateVReg(*U
.getOperand(1))})
2739 .addShuffleMask(MaskAlloc
);
2743 bool IRTranslator::translatePHI(const User
&U
, MachineIRBuilder
&MIRBuilder
) {
2744 const PHINode
&PI
= cast
<PHINode
>(U
);
2746 SmallVector
<MachineInstr
*, 4> Insts
;
2747 for (auto Reg
: getOrCreateVRegs(PI
)) {
2748 auto MIB
= MIRBuilder
.buildInstr(TargetOpcode::G_PHI
, {Reg
}, {});
2749 Insts
.push_back(MIB
.getInstr());
2752 PendingPHIs
.emplace_back(&PI
, std::move(Insts
));
2756 bool IRTranslator::translateAtomicCmpXchg(const User
&U
,
2757 MachineIRBuilder
&MIRBuilder
) {
2758 const AtomicCmpXchgInst
&I
= cast
<AtomicCmpXchgInst
>(U
);
2760 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2761 auto Flags
= TLI
.getAtomicMemOperandFlags(I
, *DL
);
2763 auto Res
= getOrCreateVRegs(I
);
2764 Register OldValRes
= Res
[0];
2765 Register SuccessRes
= Res
[1];
2766 Register Addr
= getOrCreateVReg(*I
.getPointerOperand());
2767 Register Cmp
= getOrCreateVReg(*I
.getCompareOperand());
2768 Register NewVal
= getOrCreateVReg(*I
.getNewValOperand());
2770 AAMDNodes AAMetadata
;
2771 I
.getAAMetadata(AAMetadata
);
2773 MIRBuilder
.buildAtomicCmpXchgWithSuccess(
2774 OldValRes
, SuccessRes
, Addr
, Cmp
, NewVal
,
2775 *MF
->getMachineMemOperand(
2776 MachinePointerInfo(I
.getPointerOperand()), Flags
, MRI
->getType(Cmp
),
2777 getMemOpAlign(I
), AAMetadata
, nullptr, I
.getSyncScopeID(),
2778 I
.getSuccessOrdering(), I
.getFailureOrdering()));
2782 bool IRTranslator::translateAtomicRMW(const User
&U
,
2783 MachineIRBuilder
&MIRBuilder
) {
2784 const AtomicRMWInst
&I
= cast
<AtomicRMWInst
>(U
);
2785 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2786 auto Flags
= TLI
.getAtomicMemOperandFlags(I
, *DL
);
2788 Register Res
= getOrCreateVReg(I
);
2789 Register Addr
= getOrCreateVReg(*I
.getPointerOperand());
2790 Register Val
= getOrCreateVReg(*I
.getValOperand());
2792 unsigned Opcode
= 0;
2793 switch (I
.getOperation()) {
2796 case AtomicRMWInst::Xchg
:
2797 Opcode
= TargetOpcode::G_ATOMICRMW_XCHG
;
2799 case AtomicRMWInst::Add
:
2800 Opcode
= TargetOpcode::G_ATOMICRMW_ADD
;
2802 case AtomicRMWInst::Sub
:
2803 Opcode
= TargetOpcode::G_ATOMICRMW_SUB
;
2805 case AtomicRMWInst::And
:
2806 Opcode
= TargetOpcode::G_ATOMICRMW_AND
;
2808 case AtomicRMWInst::Nand
:
2809 Opcode
= TargetOpcode::G_ATOMICRMW_NAND
;
2811 case AtomicRMWInst::Or
:
2812 Opcode
= TargetOpcode::G_ATOMICRMW_OR
;
2814 case AtomicRMWInst::Xor
:
2815 Opcode
= TargetOpcode::G_ATOMICRMW_XOR
;
2817 case AtomicRMWInst::Max
:
2818 Opcode
= TargetOpcode::G_ATOMICRMW_MAX
;
2820 case AtomicRMWInst::Min
:
2821 Opcode
= TargetOpcode::G_ATOMICRMW_MIN
;
2823 case AtomicRMWInst::UMax
:
2824 Opcode
= TargetOpcode::G_ATOMICRMW_UMAX
;
2826 case AtomicRMWInst::UMin
:
2827 Opcode
= TargetOpcode::G_ATOMICRMW_UMIN
;
2829 case AtomicRMWInst::FAdd
:
2830 Opcode
= TargetOpcode::G_ATOMICRMW_FADD
;
2832 case AtomicRMWInst::FSub
:
2833 Opcode
= TargetOpcode::G_ATOMICRMW_FSUB
;
2837 AAMDNodes AAMetadata
;
2838 I
.getAAMetadata(AAMetadata
);
2840 MIRBuilder
.buildAtomicRMW(
2841 Opcode
, Res
, Addr
, Val
,
2842 *MF
->getMachineMemOperand(MachinePointerInfo(I
.getPointerOperand()),
2843 Flags
, MRI
->getType(Val
), getMemOpAlign(I
),
2844 AAMetadata
, nullptr, I
.getSyncScopeID(),
2849 bool IRTranslator::translateFence(const User
&U
,
2850 MachineIRBuilder
&MIRBuilder
) {
2851 const FenceInst
&Fence
= cast
<FenceInst
>(U
);
2852 MIRBuilder
.buildFence(static_cast<unsigned>(Fence
.getOrdering()),
2853 Fence
.getSyncScopeID());
2857 bool IRTranslator::translateFreeze(const User
&U
,
2858 MachineIRBuilder
&MIRBuilder
) {
2859 const ArrayRef
<Register
> DstRegs
= getOrCreateVRegs(U
);
2860 const ArrayRef
<Register
> SrcRegs
= getOrCreateVRegs(*U
.getOperand(0));
2862 assert(DstRegs
.size() == SrcRegs
.size() &&
2863 "Freeze with different source and destination type?");
2865 for (unsigned I
= 0; I
< DstRegs
.size(); ++I
) {
2866 MIRBuilder
.buildFreeze(DstRegs
[I
], SrcRegs
[I
]);
2872 void IRTranslator::finishPendingPhis() {
2874 DILocationVerifier Verifier
;
2875 GISelObserverWrapper
WrapperObserver(&Verifier
);
2876 RAIIDelegateInstaller
DelInstall(*MF
, &WrapperObserver
);
2877 #endif // ifndef NDEBUG
2878 for (auto &Phi
: PendingPHIs
) {
2879 const PHINode
*PI
= Phi
.first
;
2880 ArrayRef
<MachineInstr
*> ComponentPHIs
= Phi
.second
;
2881 MachineBasicBlock
*PhiMBB
= ComponentPHIs
[0]->getParent();
2882 EntryBuilder
->setDebugLoc(PI
->getDebugLoc());
2884 Verifier
.setCurrentInst(PI
);
2885 #endif // ifndef NDEBUG
2887 SmallSet
<const MachineBasicBlock
*, 16> SeenPreds
;
2888 for (unsigned i
= 0; i
< PI
->getNumIncomingValues(); ++i
) {
2889 auto IRPred
= PI
->getIncomingBlock(i
);
2890 ArrayRef
<Register
> ValRegs
= getOrCreateVRegs(*PI
->getIncomingValue(i
));
2891 for (auto Pred
: getMachinePredBBs({IRPred
, PI
->getParent()})) {
2892 if (SeenPreds
.count(Pred
) || !PhiMBB
->isPredecessor(Pred
))
2894 SeenPreds
.insert(Pred
);
2895 for (unsigned j
= 0; j
< ValRegs
.size(); ++j
) {
2896 MachineInstrBuilder
MIB(*MF
, ComponentPHIs
[j
]);
2897 MIB
.addUse(ValRegs
[j
]);
2905 bool IRTranslator::valueIsSplit(const Value
&V
,
2906 SmallVectorImpl
<uint64_t> *Offsets
) {
2907 SmallVector
<LLT
, 4> SplitTys
;
2908 if (Offsets
&& !Offsets
->empty())
2910 computeValueLLTs(*DL
, *V
.getType(), SplitTys
, Offsets
);
2911 return SplitTys
.size() > 1;
2914 bool IRTranslator::translate(const Instruction
&Inst
) {
2915 CurBuilder
->setDebugLoc(Inst
.getDebugLoc());
2917 auto &TLI
= *MF
->getSubtarget().getTargetLowering();
2918 if (TLI
.fallBackToDAGISel(Inst
))
2921 switch (Inst
.getOpcode()) {
2922 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2923 case Instruction::OPCODE: \
2924 return translate##OPCODE(Inst, *CurBuilder.get());
2925 #include "llvm/IR/Instruction.def"
2931 bool IRTranslator::translate(const Constant
&C
, Register Reg
) {
2932 // We only emit constants into the entry block from here. To prevent jumpy
2933 // debug behaviour set the line to 0.
2934 if (auto CurrInstDL
= CurBuilder
->getDL())
2935 EntryBuilder
->setDebugLoc(DILocation::get(C
.getContext(), 0, 0,
2936 CurrInstDL
.getScope(),
2937 CurrInstDL
.getInlinedAt()));
2939 if (auto CI
= dyn_cast
<ConstantInt
>(&C
))
2940 EntryBuilder
->buildConstant(Reg
, *CI
);
2941 else if (auto CF
= dyn_cast
<ConstantFP
>(&C
))
2942 EntryBuilder
->buildFConstant(Reg
, *CF
);
2943 else if (isa
<UndefValue
>(C
))
2944 EntryBuilder
->buildUndef(Reg
);
2945 else if (isa
<ConstantPointerNull
>(C
))
2946 EntryBuilder
->buildConstant(Reg
, 0);
2947 else if (auto GV
= dyn_cast
<GlobalValue
>(&C
))
2948 EntryBuilder
->buildGlobalValue(Reg
, GV
);
2949 else if (auto CAZ
= dyn_cast
<ConstantAggregateZero
>(&C
)) {
2950 if (!isa
<FixedVectorType
>(CAZ
->getType()))
2952 // Return the scalar if it is a <1 x Ty> vector.
2953 unsigned NumElts
= CAZ
->getElementCount().getFixedValue();
2955 return translateCopy(C
, *CAZ
->getElementValue(0u), *EntryBuilder
.get());
2956 SmallVector
<Register
, 4> Ops
;
2957 for (unsigned I
= 0; I
< NumElts
; ++I
) {
2958 Constant
&Elt
= *CAZ
->getElementValue(I
);
2959 Ops
.push_back(getOrCreateVReg(Elt
));
2961 EntryBuilder
->buildBuildVector(Reg
, Ops
);
2962 } else if (auto CV
= dyn_cast
<ConstantDataVector
>(&C
)) {
2963 // Return the scalar if it is a <1 x Ty> vector.
2964 if (CV
->getNumElements() == 1)
2965 return translateCopy(C
, *CV
->getElementAsConstant(0),
2966 *EntryBuilder
.get());
2967 SmallVector
<Register
, 4> Ops
;
2968 for (unsigned i
= 0; i
< CV
->getNumElements(); ++i
) {
2969 Constant
&Elt
= *CV
->getElementAsConstant(i
);
2970 Ops
.push_back(getOrCreateVReg(Elt
));
2972 EntryBuilder
->buildBuildVector(Reg
, Ops
);
2973 } else if (auto CE
= dyn_cast
<ConstantExpr
>(&C
)) {
2974 switch(CE
->getOpcode()) {
2975 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2976 case Instruction::OPCODE: \
2977 return translate##OPCODE(*CE, *EntryBuilder.get());
2978 #include "llvm/IR/Instruction.def"
2982 } else if (auto CV
= dyn_cast
<ConstantVector
>(&C
)) {
2983 if (CV
->getNumOperands() == 1)
2984 return translateCopy(C
, *CV
->getOperand(0), *EntryBuilder
.get());
2985 SmallVector
<Register
, 4> Ops
;
2986 for (unsigned i
= 0; i
< CV
->getNumOperands(); ++i
) {
2987 Ops
.push_back(getOrCreateVReg(*CV
->getOperand(i
)));
2989 EntryBuilder
->buildBuildVector(Reg
, Ops
);
2990 } else if (auto *BA
= dyn_cast
<BlockAddress
>(&C
)) {
2991 EntryBuilder
->buildBlockAddress(Reg
, BA
);
2998 void IRTranslator::finalizeBasicBlock() {
2999 for (auto &BTB
: SL
->BitTestCases
) {
3000 // Emit header first, if it wasn't already emitted.
3002 emitBitTestHeader(BTB
, BTB
.Parent
);
3004 BranchProbability UnhandledProb
= BTB
.Prob
;
3005 for (unsigned j
= 0, ej
= BTB
.Cases
.size(); j
!= ej
; ++j
) {
3006 UnhandledProb
-= BTB
.Cases
[j
].ExtraProb
;
3007 // Set the current basic block to the mbb we wish to insert the code into
3008 MachineBasicBlock
*MBB
= BTB
.Cases
[j
].ThisBB
;
3009 // If all cases cover a contiguous range, it is not necessary to jump to
3010 // the default block after the last bit test fails. This is because the
3011 // range check during bit test header creation has guaranteed that every
3012 // case here doesn't go outside the range. In this case, there is no need
3013 // to perform the last bit test, as it will always be true. Instead, make
3014 // the second-to-last bit-test fall through to the target of the last bit
3015 // test, and delete the last bit test.
3017 MachineBasicBlock
*NextMBB
;
3018 if (BTB
.ContiguousRange
&& j
+ 2 == ej
) {
3019 // Second-to-last bit-test with contiguous range: fall through to the
3020 // target of the final bit test.
3021 NextMBB
= BTB
.Cases
[j
+ 1].TargetBB
;
3022 } else if (j
+ 1 == ej
) {
3023 // For the last bit test, fall through to Default.
3024 NextMBB
= BTB
.Default
;
3026 // Otherwise, fall through to the next bit test.
3027 NextMBB
= BTB
.Cases
[j
+ 1].ThisBB
;
3030 emitBitTestCase(BTB
, NextMBB
, UnhandledProb
, BTB
.Reg
, BTB
.Cases
[j
], MBB
);
3032 if (BTB
.ContiguousRange
&& j
+ 2 == ej
) {
3033 // We need to record the replacement phi edge here that normally
3034 // happens in emitBitTestCase before we delete the case, otherwise the
3035 // phi edge will be lost.
3036 addMachineCFGPred({BTB
.Parent
->getBasicBlock(),
3037 BTB
.Cases
[ej
- 1].TargetBB
->getBasicBlock()},
3039 // Since we're not going to use the final bit test, remove it.
3040 BTB
.Cases
.pop_back();
3044 // This is "default" BB. We have two jumps to it. From "header" BB and from
3045 // last "case" BB, unless the latter was skipped.
3046 CFGEdge HeaderToDefaultEdge
= {BTB
.Parent
->getBasicBlock(),
3047 BTB
.Default
->getBasicBlock()};
3048 addMachineCFGPred(HeaderToDefaultEdge
, BTB
.Parent
);
3049 if (!BTB
.ContiguousRange
) {
3050 addMachineCFGPred(HeaderToDefaultEdge
, BTB
.Cases
.back().ThisBB
);
3053 SL
->BitTestCases
.clear();
3055 for (auto &JTCase
: SL
->JTCases
) {
3056 // Emit header first, if it wasn't already emitted.
3057 if (!JTCase
.first
.Emitted
)
3058 emitJumpTableHeader(JTCase
.second
, JTCase
.first
, JTCase
.first
.HeaderBB
);
3060 emitJumpTable(JTCase
.second
, JTCase
.second
.MBB
);
3062 SL
->JTCases
.clear();
3064 for (auto &SwCase
: SL
->SwitchCases
)
3065 emitSwitchCase(SwCase
, &CurBuilder
->getMBB(), *CurBuilder
);
3066 SL
->SwitchCases
.clear();
3069 void IRTranslator::finalizeFunction() {
3070 // Release the memory used by the different maps we
3071 // needed during the translation.
3072 PendingPHIs
.clear();
3074 FrameIndices
.clear();
3075 MachinePreds
.clear();
3076 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3077 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3078 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3079 EntryBuilder
.reset();
3084 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3085 /// variadic musttail call.
3086 static bool checkForMustTailInVarArgFn(bool IsVarArg
, const BasicBlock
&BB
) {
3090 // Walk the block backwards, because tail calls usually only appear at the end
3092 return std::any_of(BB
.rbegin(), BB
.rend(), [](const Instruction
&I
) {
3093 const auto *CI
= dyn_cast
<CallInst
>(&I
);
3094 return CI
&& CI
->isMustTailCall();
3098 bool IRTranslator::runOnMachineFunction(MachineFunction
&CurMF
) {
3100 const Function
&F
= MF
->getFunction();
3103 GISelCSEAnalysisWrapper
&Wrapper
=
3104 getAnalysis
<GISelCSEAnalysisWrapperPass
>().getCSEWrapper();
3105 // Set the CSEConfig and run the analysis.
3106 GISelCSEInfo
*CSEInfo
= nullptr;
3107 TPC
= &getAnalysis
<TargetPassConfig
>();
3108 bool EnableCSE
= EnableCSEInIRTranslator
.getNumOccurrences()
3109 ? EnableCSEInIRTranslator
3110 : TPC
->isGISelCSEEnabled();
3113 EntryBuilder
= std::make_unique
<CSEMIRBuilder
>(CurMF
);
3114 CSEInfo
= &Wrapper
.get(TPC
->getCSEConfig());
3115 EntryBuilder
->setCSEInfo(CSEInfo
);
3116 CurBuilder
= std::make_unique
<CSEMIRBuilder
>(CurMF
);
3117 CurBuilder
->setCSEInfo(CSEInfo
);
3119 EntryBuilder
= std::make_unique
<MachineIRBuilder
>();
3120 CurBuilder
= std::make_unique
<MachineIRBuilder
>();
3122 CLI
= MF
->getSubtarget().getCallLowering();
3123 CurBuilder
->setMF(*MF
);
3124 EntryBuilder
->setMF(*MF
);
3125 MRI
= &MF
->getRegInfo();
3126 DL
= &F
.getParent()->getDataLayout();
3127 ORE
= std::make_unique
<OptimizationRemarkEmitter
>(&F
);
3128 const TargetMachine
&TM
= MF
->getTarget();
3129 TM
.resetTargetOptions(F
);
3130 EnableOpts
= OptLevel
!= CodeGenOpt::None
&& !skipFunction(F
);
3133 FuncInfo
.BPI
= &getAnalysis
<BranchProbabilityInfoWrapperPass
>().getBPI();
3135 FuncInfo
.BPI
= nullptr;
3137 FuncInfo
.CanLowerReturn
= CLI
->checkReturnTypeForCallConv(*MF
);
3139 const auto &TLI
= *MF
->getSubtarget().getTargetLowering();
3141 SL
= std::make_unique
<GISelSwitchLowering
>(this, FuncInfo
);
3142 SL
->init(TLI
, TM
, *DL
);
3146 assert(PendingPHIs
.empty() && "stale PHIs");
3148 // Targets which want to use big endian can enable it using
3149 // enableBigEndian()
3150 if (!DL
->isLittleEndian() && !CLI
->enableBigEndian()) {
3151 // Currently we don't properly handle big endian code.
3152 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3153 F
.getSubprogram(), &F
.getEntryBlock());
3154 R
<< "unable to translate in big endian mode";
3155 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3158 // Release the per-function state when we return, whether we succeeded or not.
3159 auto FinalizeOnReturn
= make_scope_exit([this]() { finalizeFunction(); });
3161 // Setup a separate basic-block for the arguments and constants
3162 MachineBasicBlock
*EntryBB
= MF
->CreateMachineBasicBlock();
3163 MF
->push_back(EntryBB
);
3164 EntryBuilder
->setMBB(*EntryBB
);
3166 DebugLoc DbgLoc
= F
.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3167 SwiftError
.setFunction(CurMF
);
3168 SwiftError
.createEntriesInEntryBlock(DbgLoc
);
3170 bool IsVarArg
= F
.isVarArg();
3171 bool HasMustTailInVarArgFn
= false;
3173 // Create all blocks, in IR order, to preserve the layout.
3174 for (const BasicBlock
&BB
: F
) {
3175 auto *&MBB
= BBToMBB
[&BB
];
3177 MBB
= MF
->CreateMachineBasicBlock(&BB
);
3180 if (BB
.hasAddressTaken())
3181 MBB
->setHasAddressTaken();
3183 if (!HasMustTailInVarArgFn
)
3184 HasMustTailInVarArgFn
= checkForMustTailInVarArgFn(IsVarArg
, BB
);
3187 MF
->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn
);
3189 // Make our arguments/constants entry block fallthrough to the IR entry block.
3190 EntryBB
->addSuccessor(&getMBB(F
.front()));
3192 if (CLI
->fallBackToDAGISel(*MF
)) {
3193 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3194 F
.getSubprogram(), &F
.getEntryBlock());
3195 R
<< "unable to lower function: " << ore::NV("Prototype", F
.getType());
3196 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3200 // Lower the actual args into this basic block.
3201 SmallVector
<ArrayRef
<Register
>, 8> VRegArgs
;
3202 for (const Argument
&Arg
: F
.args()) {
3203 if (DL
->getTypeStoreSize(Arg
.getType()).isZero())
3204 continue; // Don't handle zero sized types.
3205 ArrayRef
<Register
> VRegs
= getOrCreateVRegs(Arg
);
3206 VRegArgs
.push_back(VRegs
);
3208 if (Arg
.hasSwiftErrorAttr()) {
3209 assert(VRegs
.size() == 1 && "Too many vregs for Swift error");
3210 SwiftError
.setCurrentVReg(EntryBB
, SwiftError
.getFunctionArg(), VRegs
[0]);
3214 if (!CLI
->lowerFormalArguments(*EntryBuilder
.get(), F
, VRegArgs
, FuncInfo
)) {
3215 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3216 F
.getSubprogram(), &F
.getEntryBlock());
3217 R
<< "unable to lower arguments: " << ore::NV("Prototype", F
.getType());
3218 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3222 // Need to visit defs before uses when translating instructions.
3223 GISelObserverWrapper WrapperObserver
;
3224 if (EnableCSE
&& CSEInfo
)
3225 WrapperObserver
.addObserver(CSEInfo
);
3227 ReversePostOrderTraversal
<const Function
*> RPOT(&F
);
3229 DILocationVerifier Verifier
;
3230 WrapperObserver
.addObserver(&Verifier
);
3231 #endif // ifndef NDEBUG
3232 RAIIDelegateInstaller
DelInstall(*MF
, &WrapperObserver
);
3233 RAIIMFObserverInstaller
ObsInstall(*MF
, WrapperObserver
);
3234 for (const BasicBlock
*BB
: RPOT
) {
3235 MachineBasicBlock
&MBB
= getMBB(*BB
);
3236 // Set the insertion point of all the following translations to
3237 // the end of this basic block.
3238 CurBuilder
->setMBB(MBB
);
3239 HasTailCall
= false;
3240 for (const Instruction
&Inst
: *BB
) {
3241 // If we translated a tail call in the last step, then we know
3242 // everything after the call is either a return, or something that is
3243 // handled by the call itself. (E.g. a lifetime marker or assume
3244 // intrinsic.) In this case, we should stop translating the block and
3249 Verifier
.setCurrentInst(&Inst
);
3250 #endif // ifndef NDEBUG
3251 if (translate(Inst
))
3254 OptimizationRemarkMissed
R("gisel-irtranslator", "GISelFailure",
3255 Inst
.getDebugLoc(), BB
);
3256 R
<< "unable to translate instruction: " << ore::NV("Opcode", &Inst
);
3258 if (ORE
->allowExtraAnalysis("gisel-irtranslator")) {
3259 std::string InstStrStorage
;
3260 raw_string_ostream
InstStr(InstStrStorage
);
3263 R
<< ": '" << InstStr
.str() << "'";
3266 reportTranslationError(*MF
, *TPC
, *ORE
, R
);
3270 finalizeBasicBlock();
3273 WrapperObserver
.removeObserver(&Verifier
);
3277 finishPendingPhis();
3279 SwiftError
.propagateVRegs();
3281 // Merge the argument lowering and constants block with its single
3282 // successor, the LLVM-IR entry block. We want the basic block to
3284 assert(EntryBB
->succ_size() == 1 &&
3285 "Custom BB used for lowering should have only one successor");
3286 // Get the successor of the current entry block.
3287 MachineBasicBlock
&NewEntryBB
= **EntryBB
->succ_begin();
3288 assert(NewEntryBB
.pred_size() == 1 &&
3289 "LLVM-IR entry block has a predecessor!?");
3290 // Move all the instruction from the current entry block to the
3292 NewEntryBB
.splice(NewEntryBB
.begin(), EntryBB
, EntryBB
->begin(),
3295 // Update the live-in information for the new entry block.
3296 for (const MachineBasicBlock::RegisterMaskPair
&LiveIn
: EntryBB
->liveins())
3297 NewEntryBB
.addLiveIn(LiveIn
);
3298 NewEntryBB
.sortUniqueLiveIns();
3300 // Get rid of the now empty basic block.
3301 EntryBB
->removeSuccessor(&NewEntryBB
);
3302 MF
->remove(EntryBB
);
3303 MF
->DeleteMachineBasicBlock(EntryBB
);
3305 assert(&MF
->front() == &NewEntryBB
&&
3306 "New entry wasn't next in the list of basic block!");
3308 // Initialize stack protector information.
3309 StackProtector
&SP
= getAnalysis
<StackProtector
>();
3310 SP
.copyToMachineFrameInfo(MF
->getFrameInfo());