Run DCE after a LoopFlatten test to reduce spurious output [nfc]
[llvm-project.git] / llvm / lib / CodeGen / GlobalISel / IRTranslator.cpp
blobd8f9e30b259977987988d11383b7708aff9bf62e
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/AssumptionCache.h"
20 #include "llvm/Analysis/BranchProbabilityInfo.h"
21 #include "llvm/Analysis/Loads.h"
22 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
23 #include "llvm/Analysis/ValueTracking.h"
24 #include "llvm/CodeGen/Analysis.h"
25 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
26 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
27 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
28 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
29 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
30 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
31 #include "llvm/CodeGen/LowLevelType.h"
32 #include "llvm/CodeGen/LowLevelTypeUtils.h"
33 #include "llvm/CodeGen/MachineBasicBlock.h"
34 #include "llvm/CodeGen/MachineFrameInfo.h"
35 #include "llvm/CodeGen/MachineFunction.h"
36 #include "llvm/CodeGen/MachineInstrBuilder.h"
37 #include "llvm/CodeGen/MachineMemOperand.h"
38 #include "llvm/CodeGen/MachineModuleInfo.h"
39 #include "llvm/CodeGen/MachineOperand.h"
40 #include "llvm/CodeGen/MachineRegisterInfo.h"
41 #include "llvm/CodeGen/RuntimeLibcalls.h"
42 #include "llvm/CodeGen/StackProtector.h"
43 #include "llvm/CodeGen/SwitchLoweringUtils.h"
44 #include "llvm/CodeGen/TargetFrameLowering.h"
45 #include "llvm/CodeGen/TargetInstrInfo.h"
46 #include "llvm/CodeGen/TargetLowering.h"
47 #include "llvm/CodeGen/TargetOpcodes.h"
48 #include "llvm/CodeGen/TargetPassConfig.h"
49 #include "llvm/CodeGen/TargetRegisterInfo.h"
50 #include "llvm/CodeGen/TargetSubtargetInfo.h"
51 #include "llvm/IR/BasicBlock.h"
52 #include "llvm/IR/CFG.h"
53 #include "llvm/IR/Constant.h"
54 #include "llvm/IR/Constants.h"
55 #include "llvm/IR/DataLayout.h"
56 #include "llvm/IR/DerivedTypes.h"
57 #include "llvm/IR/DiagnosticInfo.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/GetElementPtrTypeIterator.h"
60 #include "llvm/IR/InlineAsm.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instructions.h"
63 #include "llvm/IR/IntrinsicInst.h"
64 #include "llvm/IR/Intrinsics.h"
65 #include "llvm/IR/LLVMContext.h"
66 #include "llvm/IR/Metadata.h"
67 #include "llvm/IR/PatternMatch.h"
68 #include "llvm/IR/Statepoint.h"
69 #include "llvm/IR/Type.h"
70 #include "llvm/IR/User.h"
71 #include "llvm/IR/Value.h"
72 #include "llvm/InitializePasses.h"
73 #include "llvm/MC/MCContext.h"
74 #include "llvm/Pass.h"
75 #include "llvm/Support/Casting.h"
76 #include "llvm/Support/CodeGen.h"
77 #include "llvm/Support/Debug.h"
78 #include "llvm/Support/ErrorHandling.h"
79 #include "llvm/Support/MathExtras.h"
80 #include "llvm/Support/raw_ostream.h"
81 #include "llvm/Target/TargetIntrinsicInfo.h"
82 #include "llvm/Target/TargetMachine.h"
83 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
84 #include <algorithm>
85 #include <cassert>
86 #include <cstdint>
87 #include <iterator>
88 #include <optional>
89 #include <string>
90 #include <utility>
91 #include <vector>
93 #define DEBUG_TYPE "irtranslator"
95 using namespace llvm;
97 static cl::opt<bool>
98 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
99 cl::desc("Should enable CSE in irtranslator"),
100 cl::Optional, cl::init(false));
101 char IRTranslator::ID = 0;
103 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
104 false, false)
105 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
106 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
107 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
108 INITIALIZE_PASS_DEPENDENCY(StackProtector)
109 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
110 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
111 false, false)
113 static void reportTranslationError(MachineFunction &MF,
114 const TargetPassConfig &TPC,
115 OptimizationRemarkEmitter &ORE,
116 OptimizationRemarkMissed &R) {
117 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
119 // Print the function name explicitly if we don't have a debug location (which
120 // makes the diagnostic less useful) or if we're going to emit a raw error.
121 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
122 R << (" (in function: " + MF.getName() + ")").str();
124 if (TPC.isGlobalISelAbortEnabled())
125 report_fatal_error(Twine(R.getMsg()));
126 else
127 ORE.emit(R);
130 IRTranslator::IRTranslator(CodeGenOptLevel optlevel)
131 : MachineFunctionPass(ID), OptLevel(optlevel) {}
133 #ifndef NDEBUG
134 namespace {
135 /// Verify that every instruction created has the same DILocation as the
136 /// instruction being translated.
137 class DILocationVerifier : public GISelChangeObserver {
138 const Instruction *CurrInst = nullptr;
140 public:
141 DILocationVerifier() = default;
142 ~DILocationVerifier() = default;
144 const Instruction *getCurrentInst() const { return CurrInst; }
145 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
147 void erasingInstr(MachineInstr &MI) override {}
148 void changingInstr(MachineInstr &MI) override {}
149 void changedInstr(MachineInstr &MI) override {}
151 void createdInstr(MachineInstr &MI) override {
152 assert(getCurrentInst() && "Inserted instruction without a current MI");
154 // Only print the check message if we're actually checking it.
155 #ifndef NDEBUG
156 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
157 << " was copied to " << MI);
158 #endif
159 // We allow insts in the entry block to have no debug loc because
160 // they could have originated from constants, and we don't want a jumpy
161 // debug experience.
162 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
163 (MI.getParent()->isEntryBlock() && !MI.getDebugLoc())) &&
164 "Line info was not transferred to all instructions");
167 } // namespace
168 #endif // ifndef NDEBUG
171 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
172 AU.addRequired<StackProtector>();
173 AU.addRequired<TargetPassConfig>();
174 AU.addRequired<GISelCSEAnalysisWrapperPass>();
175 AU.addRequired<AssumptionCacheTracker>();
176 if (OptLevel != CodeGenOptLevel::None) {
177 AU.addRequired<BranchProbabilityInfoWrapperPass>();
178 AU.addRequired<AAResultsWrapperPass>();
180 AU.addRequired<TargetLibraryInfoWrapperPass>();
181 AU.addPreserved<TargetLibraryInfoWrapperPass>();
182 getSelectionDAGFallbackAnalysisUsage(AU);
183 MachineFunctionPass::getAnalysisUsage(AU);
186 IRTranslator::ValueToVRegInfo::VRegListT &
187 IRTranslator::allocateVRegs(const Value &Val) {
188 auto VRegsIt = VMap.findVRegs(Val);
189 if (VRegsIt != VMap.vregs_end())
190 return *VRegsIt->second;
191 auto *Regs = VMap.getVRegs(Val);
192 auto *Offsets = VMap.getOffsets(Val);
193 SmallVector<LLT, 4> SplitTys;
194 computeValueLLTs(*DL, *Val.getType(), SplitTys,
195 Offsets->empty() ? Offsets : nullptr);
196 for (unsigned i = 0; i < SplitTys.size(); ++i)
197 Regs->push_back(0);
198 return *Regs;
201 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
202 auto VRegsIt = VMap.findVRegs(Val);
203 if (VRegsIt != VMap.vregs_end())
204 return *VRegsIt->second;
206 if (Val.getType()->isVoidTy())
207 return *VMap.getVRegs(Val);
209 // Create entry for this type.
210 auto *VRegs = VMap.getVRegs(Val);
211 auto *Offsets = VMap.getOffsets(Val);
213 assert(Val.getType()->isSized() &&
214 "Don't know how to create an empty vreg");
216 SmallVector<LLT, 4> SplitTys;
217 computeValueLLTs(*DL, *Val.getType(), SplitTys,
218 Offsets->empty() ? Offsets : nullptr);
220 if (!isa<Constant>(Val)) {
221 for (auto Ty : SplitTys)
222 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
223 return *VRegs;
226 if (Val.getType()->isAggregateType()) {
227 // UndefValue, ConstantAggregateZero
228 auto &C = cast<Constant>(Val);
229 unsigned Idx = 0;
230 while (auto Elt = C.getAggregateElement(Idx++)) {
231 auto EltRegs = getOrCreateVRegs(*Elt);
232 llvm::copy(EltRegs, std::back_inserter(*VRegs));
234 } else {
235 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
236 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
237 bool Success = translate(cast<Constant>(Val), VRegs->front());
238 if (!Success) {
239 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
240 MF->getFunction().getSubprogram(),
241 &MF->getFunction().getEntryBlock());
242 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
243 reportTranslationError(*MF, *TPC, *ORE, R);
244 return *VRegs;
248 return *VRegs;
251 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
252 auto MapEntry = FrameIndices.find(&AI);
253 if (MapEntry != FrameIndices.end())
254 return MapEntry->second;
256 uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
257 uint64_t Size =
258 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
260 // Always allocate at least one byte.
261 Size = std::max<uint64_t>(Size, 1u);
263 int &FI = FrameIndices[&AI];
264 FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
265 return FI;
268 Align IRTranslator::getMemOpAlign(const Instruction &I) {
269 if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
270 return SI->getAlign();
271 if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
272 return LI->getAlign();
273 if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
274 return AI->getAlign();
275 if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
276 return AI->getAlign();
278 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
279 R << "unable to translate memop: " << ore::NV("Opcode", &I);
280 reportTranslationError(*MF, *TPC, *ORE, R);
281 return Align(1);
284 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
285 MachineBasicBlock *&MBB = BBToMBB[&BB];
286 assert(MBB && "BasicBlock was not encountered before");
287 return *MBB;
290 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
291 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
292 MachinePreds[Edge].push_back(NewPred);
295 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
296 MachineIRBuilder &MIRBuilder) {
297 // Get or create a virtual register for each value.
298 // Unless the value is a Constant => loadimm cst?
299 // or inline constant each time?
300 // Creation of a virtual register needs to have a size.
301 Register Op0 = getOrCreateVReg(*U.getOperand(0));
302 Register Op1 = getOrCreateVReg(*U.getOperand(1));
303 Register Res = getOrCreateVReg(U);
304 uint32_t Flags = 0;
305 if (isa<Instruction>(U)) {
306 const Instruction &I = cast<Instruction>(U);
307 Flags = MachineInstr::copyFlagsFromInstruction(I);
310 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
311 return true;
314 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
315 MachineIRBuilder &MIRBuilder) {
316 Register Op0 = getOrCreateVReg(*U.getOperand(0));
317 Register Res = getOrCreateVReg(U);
318 uint32_t Flags = 0;
319 if (isa<Instruction>(U)) {
320 const Instruction &I = cast<Instruction>(U);
321 Flags = MachineInstr::copyFlagsFromInstruction(I);
323 MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
324 return true;
327 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
328 return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
331 bool IRTranslator::translateCompare(const User &U,
332 MachineIRBuilder &MIRBuilder) {
333 auto *CI = dyn_cast<CmpInst>(&U);
334 Register Op0 = getOrCreateVReg(*U.getOperand(0));
335 Register Op1 = getOrCreateVReg(*U.getOperand(1));
336 Register Res = getOrCreateVReg(U);
337 CmpInst::Predicate Pred =
338 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
339 cast<ConstantExpr>(U).getPredicate());
340 if (CmpInst::isIntPredicate(Pred))
341 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
342 else if (Pred == CmpInst::FCMP_FALSE)
343 MIRBuilder.buildCopy(
344 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
345 else if (Pred == CmpInst::FCMP_TRUE)
346 MIRBuilder.buildCopy(
347 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
348 else {
349 uint32_t Flags = 0;
350 if (CI)
351 Flags = MachineInstr::copyFlagsFromInstruction(*CI);
352 MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
355 return true;
358 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
359 const ReturnInst &RI = cast<ReturnInst>(U);
360 const Value *Ret = RI.getReturnValue();
361 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
362 Ret = nullptr;
364 ArrayRef<Register> VRegs;
365 if (Ret)
366 VRegs = getOrCreateVRegs(*Ret);
368 Register SwiftErrorVReg = 0;
369 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
370 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
371 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
374 // The target may mess up with the insertion point, but
375 // this is not important as a return is the last instruction
376 // of the block anyway.
377 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
380 void IRTranslator::emitBranchForMergedCondition(
381 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
382 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
383 BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
384 // If the leaf of the tree is a comparison, merge the condition into
385 // the caseblock.
386 if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
387 CmpInst::Predicate Condition;
388 if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
389 Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
390 } else {
391 const FCmpInst *FC = cast<FCmpInst>(Cond);
392 Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
395 SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
396 BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
397 CurBuilder->getDebugLoc(), TProb, FProb);
398 SL->SwitchCases.push_back(CB);
399 return;
402 // Create a CaseBlock record representing this branch.
403 CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
404 SwitchCG::CaseBlock CB(
405 Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
406 nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
407 SL->SwitchCases.push_back(CB);
410 static bool isValInBlock(const Value *V, const BasicBlock *BB) {
411 if (const Instruction *I = dyn_cast<Instruction>(V))
412 return I->getParent() == BB;
413 return true;
416 void IRTranslator::findMergedConditions(
417 const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
418 MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
419 Instruction::BinaryOps Opc, BranchProbability TProb,
420 BranchProbability FProb, bool InvertCond) {
421 using namespace PatternMatch;
422 assert((Opc == Instruction::And || Opc == Instruction::Or) &&
423 "Expected Opc to be AND/OR");
424 // Skip over not part of the tree and remember to invert op and operands at
425 // next level.
426 Value *NotCond;
427 if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
428 isValInBlock(NotCond, CurBB->getBasicBlock())) {
429 findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
430 !InvertCond);
431 return;
434 const Instruction *BOp = dyn_cast<Instruction>(Cond);
435 const Value *BOpOp0, *BOpOp1;
436 // Compute the effective opcode for Cond, taking into account whether it needs
437 // to be inverted, e.g.
438 // and (not (or A, B)), C
439 // gets lowered as
440 // and (and (not A, not B), C)
441 Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
442 if (BOp) {
443 BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
444 ? Instruction::And
445 : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
446 ? Instruction::Or
447 : (Instruction::BinaryOps)0);
448 if (InvertCond) {
449 if (BOpc == Instruction::And)
450 BOpc = Instruction::Or;
451 else if (BOpc == Instruction::Or)
452 BOpc = Instruction::And;
456 // If this node is not part of the or/and tree, emit it as a branch.
457 // Note that all nodes in the tree should have same opcode.
458 bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
459 if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
460 !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
461 !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
462 emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
463 InvertCond);
464 return;
467 // Create TmpBB after CurBB.
468 MachineFunction::iterator BBI(CurBB);
469 MachineBasicBlock *TmpBB =
470 MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
471 CurBB->getParent()->insert(++BBI, TmpBB);
473 if (Opc == Instruction::Or) {
474 // Codegen X | Y as:
475 // BB1:
476 // jmp_if_X TBB
477 // jmp TmpBB
478 // TmpBB:
479 // jmp_if_Y TBB
480 // jmp FBB
483 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
484 // The requirement is that
485 // TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
486 // = TrueProb for original BB.
487 // Assuming the original probabilities are A and B, one choice is to set
488 // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
489 // A/(1+B) and 2B/(1+B). This choice assumes that
490 // TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
491 // Another choice is to assume TrueProb for BB1 equals to TrueProb for
492 // TmpBB, but the math is more complicated.
494 auto NewTrueProb = TProb / 2;
495 auto NewFalseProb = TProb / 2 + FProb;
496 // Emit the LHS condition.
497 findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
498 NewFalseProb, InvertCond);
500 // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
501 SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
502 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
503 // Emit the RHS condition into TmpBB.
504 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
505 Probs[1], InvertCond);
506 } else {
507 assert(Opc == Instruction::And && "Unknown merge op!");
508 // Codegen X & Y as:
509 // BB1:
510 // jmp_if_X TmpBB
511 // jmp FBB
512 // TmpBB:
513 // jmp_if_Y TBB
514 // jmp FBB
516 // This requires creation of TmpBB after CurBB.
518 // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
519 // The requirement is that
520 // FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
521 // = FalseProb for original BB.
522 // Assuming the original probabilities are A and B, one choice is to set
523 // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
524 // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
525 // TrueProb for BB1 * FalseProb for TmpBB.
527 auto NewTrueProb = TProb + FProb / 2;
528 auto NewFalseProb = FProb / 2;
529 // Emit the LHS condition.
530 findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
531 NewFalseProb, InvertCond);
533 // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
534 SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
535 BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
536 // Emit the RHS condition into TmpBB.
537 findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
538 Probs[1], InvertCond);
542 bool IRTranslator::shouldEmitAsBranches(
543 const std::vector<SwitchCG::CaseBlock> &Cases) {
544 // For multiple cases, it's better to emit as branches.
545 if (Cases.size() != 2)
546 return true;
548 // If this is two comparisons of the same values or'd or and'd together, they
549 // will get folded into a single comparison, so don't emit two blocks.
550 if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
551 Cases[0].CmpRHS == Cases[1].CmpRHS) ||
552 (Cases[0].CmpRHS == Cases[1].CmpLHS &&
553 Cases[0].CmpLHS == Cases[1].CmpRHS)) {
554 return false;
557 // Handle: (X != null) | (Y != null) --> (X|Y) != 0
558 // Handle: (X == null) & (Y == null) --> (X|Y) == 0
559 if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
560 Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
561 isa<Constant>(Cases[0].CmpRHS) &&
562 cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
563 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
564 Cases[0].TrueBB == Cases[1].ThisBB)
565 return false;
566 if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
567 Cases[0].FalseBB == Cases[1].ThisBB)
568 return false;
571 return true;
574 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
575 const BranchInst &BrInst = cast<BranchInst>(U);
576 auto &CurMBB = MIRBuilder.getMBB();
577 auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
579 if (BrInst.isUnconditional()) {
580 // If the unconditional target is the layout successor, fallthrough.
581 if (OptLevel == CodeGenOptLevel::None ||
582 !CurMBB.isLayoutSuccessor(Succ0MBB))
583 MIRBuilder.buildBr(*Succ0MBB);
585 // Link successors.
586 for (const BasicBlock *Succ : successors(&BrInst))
587 CurMBB.addSuccessor(&getMBB(*Succ));
588 return true;
591 // If this condition is one of the special cases we handle, do special stuff
592 // now.
593 const Value *CondVal = BrInst.getCondition();
594 MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
596 const auto &TLI = *MF->getSubtarget().getTargetLowering();
598 // If this is a series of conditions that are or'd or and'd together, emit
599 // this as a sequence of branches instead of setcc's with and/or operations.
600 // As long as jumps are not expensive (exceptions for multi-use logic ops,
601 // unpredictable branches, and vector extracts because those jumps are likely
602 // expensive for any target), this should improve performance.
603 // For example, instead of something like:
604 // cmp A, B
605 // C = seteq
606 // cmp D, E
607 // F = setle
608 // or C, F
609 // jnz foo
610 // Emit:
611 // cmp A, B
612 // je foo
613 // cmp D, E
614 // jle foo
615 using namespace PatternMatch;
616 const Instruction *CondI = dyn_cast<Instruction>(CondVal);
617 if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
618 !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
619 Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
620 Value *Vec;
621 const Value *BOp0, *BOp1;
622 if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
623 Opcode = Instruction::And;
624 else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
625 Opcode = Instruction::Or;
627 if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
628 match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
629 findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
630 getEdgeProbability(&CurMBB, Succ0MBB),
631 getEdgeProbability(&CurMBB, Succ1MBB),
632 /*InvertCond=*/false);
633 assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
635 // Allow some cases to be rejected.
636 if (shouldEmitAsBranches(SL->SwitchCases)) {
637 // Emit the branch for this block.
638 emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
639 SL->SwitchCases.erase(SL->SwitchCases.begin());
640 return true;
643 // Okay, we decided not to do this, remove any inserted MBB's and clear
644 // SwitchCases.
645 for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
646 MF->erase(SL->SwitchCases[I].ThisBB);
648 SL->SwitchCases.clear();
652 // Create a CaseBlock record representing this branch.
653 SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
654 ConstantInt::getTrue(MF->getFunction().getContext()),
655 nullptr, Succ0MBB, Succ1MBB, &CurMBB,
656 CurBuilder->getDebugLoc());
658 // Use emitSwitchCase to actually insert the fast branch sequence for this
659 // cond branch.
660 emitSwitchCase(CB, &CurMBB, *CurBuilder);
661 return true;
664 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
665 MachineBasicBlock *Dst,
666 BranchProbability Prob) {
667 if (!FuncInfo.BPI) {
668 Src->addSuccessorWithoutProb(Dst);
669 return;
671 if (Prob.isUnknown())
672 Prob = getEdgeProbability(Src, Dst);
673 Src->addSuccessor(Dst, Prob);
676 BranchProbability
677 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
678 const MachineBasicBlock *Dst) const {
679 const BasicBlock *SrcBB = Src->getBasicBlock();
680 const BasicBlock *DstBB = Dst->getBasicBlock();
681 if (!FuncInfo.BPI) {
682 // If BPI is not available, set the default probability as 1 / N, where N is
683 // the number of successors.
684 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
685 return BranchProbability(1, SuccSize);
687 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
690 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
691 using namespace SwitchCG;
692 // Extract cases from the switch.
693 const SwitchInst &SI = cast<SwitchInst>(U);
694 BranchProbabilityInfo *BPI = FuncInfo.BPI;
695 CaseClusterVector Clusters;
696 Clusters.reserve(SI.getNumCases());
697 for (const auto &I : SI.cases()) {
698 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
699 assert(Succ && "Could not find successor mbb in mapping");
700 const ConstantInt *CaseVal = I.getCaseValue();
701 BranchProbability Prob =
702 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
703 : BranchProbability(1, SI.getNumCases() + 1);
704 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
707 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
709 // Cluster adjacent cases with the same destination. We do this at all
710 // optimization levels because it's cheap to do and will make codegen faster
711 // if there are many clusters.
712 sortAndRangeify(Clusters);
714 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
716 // If there is only the default destination, jump there directly.
717 if (Clusters.empty()) {
718 SwitchMBB->addSuccessor(DefaultMBB);
719 if (DefaultMBB != SwitchMBB->getNextNode())
720 MIB.buildBr(*DefaultMBB);
721 return true;
724 SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
725 SL->findBitTestClusters(Clusters, &SI);
727 LLVM_DEBUG({
728 dbgs() << "Case clusters: ";
729 for (const CaseCluster &C : Clusters) {
730 if (C.Kind == CC_JumpTable)
731 dbgs() << "JT:";
732 if (C.Kind == CC_BitTests)
733 dbgs() << "BT:";
735 C.Low->getValue().print(dbgs(), true);
736 if (C.Low != C.High) {
737 dbgs() << '-';
738 C.High->getValue().print(dbgs(), true);
740 dbgs() << ' ';
742 dbgs() << '\n';
745 assert(!Clusters.empty());
746 SwitchWorkList WorkList;
747 CaseClusterIt First = Clusters.begin();
748 CaseClusterIt Last = Clusters.end() - 1;
749 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
750 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
752 // FIXME: At the moment we don't do any splitting optimizations here like
753 // SelectionDAG does, so this worklist only has one entry.
754 while (!WorkList.empty()) {
755 SwitchWorkListItem W = WorkList.pop_back_val();
756 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
757 return false;
759 return true;
762 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
763 MachineBasicBlock *MBB) {
764 // Emit the code for the jump table
765 assert(JT.Reg != -1U && "Should lower JT Header first!");
766 MachineIRBuilder MIB(*MBB->getParent());
767 MIB.setMBB(*MBB);
768 MIB.setDebugLoc(CurBuilder->getDebugLoc());
770 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
771 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
773 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
774 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
777 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
778 SwitchCG::JumpTableHeader &JTH,
779 MachineBasicBlock *HeaderBB) {
780 MachineIRBuilder MIB(*HeaderBB->getParent());
781 MIB.setMBB(*HeaderBB);
782 MIB.setDebugLoc(CurBuilder->getDebugLoc());
784 const Value &SValue = *JTH.SValue;
785 // Subtract the lowest switch case value from the value being switched on.
786 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
787 Register SwitchOpReg = getOrCreateVReg(SValue);
788 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
789 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
791 // This value may be smaller or larger than the target's pointer type, and
792 // therefore require extension or truncating.
793 Type *PtrIRTy = SValue.getType()->getPointerTo();
794 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
795 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
797 JT.Reg = Sub.getReg(0);
799 if (JTH.FallthroughUnreachable) {
800 if (JT.MBB != HeaderBB->getNextNode())
801 MIB.buildBr(*JT.MBB);
802 return true;
805 // Emit the range check for the jump table, and branch to the default block
806 // for the switch statement if the value being switched on exceeds the
807 // largest case in the switch.
808 auto Cst = getOrCreateVReg(
809 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
810 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
811 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
813 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
815 // Avoid emitting unnecessary branches to the next block.
816 if (JT.MBB != HeaderBB->getNextNode())
817 BrCond = MIB.buildBr(*JT.MBB);
818 return true;
821 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
822 MachineBasicBlock *SwitchBB,
823 MachineIRBuilder &MIB) {
824 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
825 Register Cond;
826 DebugLoc OldDbgLoc = MIB.getDebugLoc();
827 MIB.setDebugLoc(CB.DbgLoc);
828 MIB.setMBB(*CB.ThisBB);
830 if (CB.PredInfo.NoCmp) {
831 // Branch or fall through to TrueBB.
832 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
833 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
834 CB.ThisBB);
835 CB.ThisBB->normalizeSuccProbs();
836 if (CB.TrueBB != CB.ThisBB->getNextNode())
837 MIB.buildBr(*CB.TrueBB);
838 MIB.setDebugLoc(OldDbgLoc);
839 return;
842 const LLT i1Ty = LLT::scalar(1);
843 // Build the compare.
844 if (!CB.CmpMHS) {
845 const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
846 // For conditional branch lowering, we might try to do something silly like
847 // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
848 // just re-use the existing condition vreg.
849 if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI && CI->isOne() &&
850 CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
851 Cond = CondLHS;
852 } else {
853 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
854 if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
855 Cond =
856 MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
857 else
858 Cond =
859 MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
861 } else {
862 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
863 "Can only handle SLE ranges");
865 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
866 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
868 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
869 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
870 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
871 Cond =
872 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
873 } else {
874 const LLT CmpTy = MRI->getType(CmpOpReg);
875 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
876 auto Diff = MIB.buildConstant(CmpTy, High - Low);
877 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
881 // Update successor info
882 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
884 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
885 CB.ThisBB);
887 // TrueBB and FalseBB are always different unless the incoming IR is
888 // degenerate. This only happens when running llc on weird IR.
889 if (CB.TrueBB != CB.FalseBB)
890 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
891 CB.ThisBB->normalizeSuccProbs();
893 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
894 CB.ThisBB);
896 MIB.buildBrCond(Cond, *CB.TrueBB);
897 MIB.buildBr(*CB.FalseBB);
898 MIB.setDebugLoc(OldDbgLoc);
901 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
902 MachineBasicBlock *SwitchMBB,
903 MachineBasicBlock *CurMBB,
904 MachineBasicBlock *DefaultMBB,
905 MachineIRBuilder &MIB,
906 MachineFunction::iterator BBI,
907 BranchProbability UnhandledProbs,
908 SwitchCG::CaseClusterIt I,
909 MachineBasicBlock *Fallthrough,
910 bool FallthroughUnreachable) {
911 using namespace SwitchCG;
912 MachineFunction *CurMF = SwitchMBB->getParent();
913 // FIXME: Optimize away range check based on pivot comparisons.
914 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
915 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
916 BranchProbability DefaultProb = W.DefaultProb;
918 // The jump block hasn't been inserted yet; insert it here.
919 MachineBasicBlock *JumpMBB = JT->MBB;
920 CurMF->insert(BBI, JumpMBB);
922 // Since the jump table block is separate from the switch block, we need
923 // to keep track of it as a machine predecessor to the default block,
924 // otherwise we lose the phi edges.
925 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
926 CurMBB);
927 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
928 JumpMBB);
930 auto JumpProb = I->Prob;
931 auto FallthroughProb = UnhandledProbs;
933 // If the default statement is a target of the jump table, we evenly
934 // distribute the default probability to successors of CurMBB. Also
935 // update the probability on the edge from JumpMBB to Fallthrough.
936 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
937 SE = JumpMBB->succ_end();
938 SI != SE; ++SI) {
939 if (*SI == DefaultMBB) {
940 JumpProb += DefaultProb / 2;
941 FallthroughProb -= DefaultProb / 2;
942 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
943 JumpMBB->normalizeSuccProbs();
944 } else {
945 // Also record edges from the jump table block to it's successors.
946 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
947 JumpMBB);
951 if (FallthroughUnreachable)
952 JTH->FallthroughUnreachable = true;
954 if (!JTH->FallthroughUnreachable)
955 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
956 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
957 CurMBB->normalizeSuccProbs();
959 // The jump table header will be inserted in our current block, do the
960 // range check, and fall through to our fallthrough block.
961 JTH->HeaderBB = CurMBB;
962 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
964 // If we're in the right place, emit the jump table header right now.
965 if (CurMBB == SwitchMBB) {
966 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
967 return false;
968 JTH->Emitted = true;
970 return true;
972 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
973 Value *Cond,
974 MachineBasicBlock *Fallthrough,
975 bool FallthroughUnreachable,
976 BranchProbability UnhandledProbs,
977 MachineBasicBlock *CurMBB,
978 MachineIRBuilder &MIB,
979 MachineBasicBlock *SwitchMBB) {
980 using namespace SwitchCG;
981 const Value *RHS, *LHS, *MHS;
982 CmpInst::Predicate Pred;
983 if (I->Low == I->High) {
984 // Check Cond == I->Low.
985 Pred = CmpInst::ICMP_EQ;
986 LHS = Cond;
987 RHS = I->Low;
988 MHS = nullptr;
989 } else {
990 // Check I->Low <= Cond <= I->High.
991 Pred = CmpInst::ICMP_SLE;
992 LHS = I->Low;
993 MHS = Cond;
994 RHS = I->High;
997 // If Fallthrough is unreachable, fold away the comparison.
998 // The false probability is the sum of all unhandled cases.
999 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
1000 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
1002 emitSwitchCase(CB, SwitchMBB, MIB);
1003 return true;
1006 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1007 MachineBasicBlock *SwitchBB) {
1008 MachineIRBuilder &MIB = *CurBuilder;
1009 MIB.setMBB(*SwitchBB);
1011 // Subtract the minimum value.
1012 Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1014 LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1015 Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1016 auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1018 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
1019 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1021 LLT MaskTy = SwitchOpTy;
1022 if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1023 !llvm::has_single_bit<uint32_t>(MaskTy.getSizeInBits()))
1024 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1025 else {
1026 // Ensure that the type will fit the mask value.
1027 for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1028 if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1029 // Switch table case range are encoded into series of masks.
1030 // Just use pointer type, it's guaranteed to fit.
1031 MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1032 break;
1036 Register SubReg = RangeSub.getReg(0);
1037 if (SwitchOpTy != MaskTy)
1038 SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1040 B.RegVT = getMVTForLLT(MaskTy);
1041 B.Reg = SubReg;
1043 MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1045 if (!B.FallthroughUnreachable)
1046 addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1047 addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1049 SwitchBB->normalizeSuccProbs();
1051 if (!B.FallthroughUnreachable) {
1052 // Conditional branch to the default block.
1053 auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1054 auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1055 RangeSub, RangeCst);
1056 MIB.buildBrCond(RangeCmp, *B.Default);
1059 // Avoid emitting unnecessary branches to the next block.
1060 if (MBB != SwitchBB->getNextNode())
1061 MIB.buildBr(*MBB);
1064 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1065 MachineBasicBlock *NextMBB,
1066 BranchProbability BranchProbToNext,
1067 Register Reg, SwitchCG::BitTestCase &B,
1068 MachineBasicBlock *SwitchBB) {
1069 MachineIRBuilder &MIB = *CurBuilder;
1070 MIB.setMBB(*SwitchBB);
1072 LLT SwitchTy = getLLTForMVT(BB.RegVT);
1073 Register Cmp;
1074 unsigned PopCount = llvm::popcount(B.Mask);
1075 if (PopCount == 1) {
1076 // Testing for a single bit; just compare the shift count with what it
1077 // would need to be to shift a 1 bit in that position.
1078 auto MaskTrailingZeros =
1079 MIB.buildConstant(SwitchTy, llvm::countr_zero(B.Mask));
1080 Cmp =
1081 MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1082 .getReg(0);
1083 } else if (PopCount == BB.Range) {
1084 // There is only one zero bit in the range, test for it directly.
1085 auto MaskTrailingOnes =
1086 MIB.buildConstant(SwitchTy, llvm::countr_one(B.Mask));
1087 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1088 .getReg(0);
1089 } else {
1090 // Make desired shift.
1091 auto CstOne = MIB.buildConstant(SwitchTy, 1);
1092 auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1094 // Emit bit tests and jumps.
1095 auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1096 auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1097 auto CstZero = MIB.buildConstant(SwitchTy, 0);
1098 Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1099 .getReg(0);
1102 // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1103 addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1104 // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1105 addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1106 // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1107 // one as they are relative probabilities (and thus work more like weights),
1108 // and hence we need to normalize them to let the sum of them become one.
1109 SwitchBB->normalizeSuccProbs();
1111 // Record the fact that the IR edge from the header to the bit test target
1112 // will go through our new block. Neeeded for PHIs to have nodes added.
1113 addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1114 SwitchBB);
1116 MIB.buildBrCond(Cmp, *B.TargetBB);
1118 // Avoid emitting unnecessary branches to the next block.
1119 if (NextMBB != SwitchBB->getNextNode())
1120 MIB.buildBr(*NextMBB);
1123 bool IRTranslator::lowerBitTestWorkItem(
1124 SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1125 MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1126 MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1127 BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1128 SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1129 bool FallthroughUnreachable) {
1130 using namespace SwitchCG;
1131 MachineFunction *CurMF = SwitchMBB->getParent();
1132 // FIXME: Optimize away range check based on pivot comparisons.
1133 BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1134 // The bit test blocks haven't been inserted yet; insert them here.
1135 for (BitTestCase &BTC : BTB->Cases)
1136 CurMF->insert(BBI, BTC.ThisBB);
1138 // Fill in fields of the BitTestBlock.
1139 BTB->Parent = CurMBB;
1140 BTB->Default = Fallthrough;
1142 BTB->DefaultProb = UnhandledProbs;
1143 // If the cases in bit test don't form a contiguous range, we evenly
1144 // distribute the probability on the edge to Fallthrough to two
1145 // successors of CurMBB.
1146 if (!BTB->ContiguousRange) {
1147 BTB->Prob += DefaultProb / 2;
1148 BTB->DefaultProb -= DefaultProb / 2;
1151 if (FallthroughUnreachable)
1152 BTB->FallthroughUnreachable = true;
1154 // If we're in the right place, emit the bit test header right now.
1155 if (CurMBB == SwitchMBB) {
1156 emitBitTestHeader(*BTB, SwitchMBB);
1157 BTB->Emitted = true;
1159 return true;
1162 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1163 Value *Cond,
1164 MachineBasicBlock *SwitchMBB,
1165 MachineBasicBlock *DefaultMBB,
1166 MachineIRBuilder &MIB) {
1167 using namespace SwitchCG;
1168 MachineFunction *CurMF = FuncInfo.MF;
1169 MachineBasicBlock *NextMBB = nullptr;
1170 MachineFunction::iterator BBI(W.MBB);
1171 if (++BBI != FuncInfo.MF->end())
1172 NextMBB = &*BBI;
1174 if (EnableOpts) {
1175 // Here, we order cases by probability so the most likely case will be
1176 // checked first. However, two clusters can have the same probability in
1177 // which case their relative ordering is non-deterministic. So we use Low
1178 // as a tie-breaker as clusters are guaranteed to never overlap.
1179 llvm::sort(W.FirstCluster, W.LastCluster + 1,
1180 [](const CaseCluster &a, const CaseCluster &b) {
1181 return a.Prob != b.Prob
1182 ? a.Prob > b.Prob
1183 : a.Low->getValue().slt(b.Low->getValue());
1186 // Rearrange the case blocks so that the last one falls through if possible
1187 // without changing the order of probabilities.
1188 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1189 --I;
1190 if (I->Prob > W.LastCluster->Prob)
1191 break;
1192 if (I->Kind == CC_Range && I->MBB == NextMBB) {
1193 std::swap(*I, *W.LastCluster);
1194 break;
1199 // Compute total probability.
1200 BranchProbability DefaultProb = W.DefaultProb;
1201 BranchProbability UnhandledProbs = DefaultProb;
1202 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1203 UnhandledProbs += I->Prob;
1205 MachineBasicBlock *CurMBB = W.MBB;
1206 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1207 bool FallthroughUnreachable = false;
1208 MachineBasicBlock *Fallthrough;
1209 if (I == W.LastCluster) {
1210 // For the last cluster, fall through to the default destination.
1211 Fallthrough = DefaultMBB;
1212 FallthroughUnreachable = isa<UnreachableInst>(
1213 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1214 } else {
1215 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1216 CurMF->insert(BBI, Fallthrough);
1218 UnhandledProbs -= I->Prob;
1220 switch (I->Kind) {
1221 case CC_BitTests: {
1222 if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1223 DefaultProb, UnhandledProbs, I, Fallthrough,
1224 FallthroughUnreachable)) {
1225 LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1226 return false;
1228 break;
1231 case CC_JumpTable: {
1232 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1233 UnhandledProbs, I, Fallthrough,
1234 FallthroughUnreachable)) {
1235 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1236 return false;
1238 break;
1240 case CC_Range: {
1241 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1242 FallthroughUnreachable, UnhandledProbs,
1243 CurMBB, MIB, SwitchMBB)) {
1244 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1245 return false;
1247 break;
1250 CurMBB = Fallthrough;
1253 return true;
1256 bool IRTranslator::translateIndirectBr(const User &U,
1257 MachineIRBuilder &MIRBuilder) {
1258 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1260 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1261 MIRBuilder.buildBrIndirect(Tgt);
1263 // Link successors.
1264 SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1265 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1266 for (const BasicBlock *Succ : successors(&BrInst)) {
1267 // It's legal for indirectbr instructions to have duplicate blocks in the
1268 // destination list. We don't allow this in MIR. Skip anything that's
1269 // already a successor.
1270 if (!AddedSuccessors.insert(Succ).second)
1271 continue;
1272 CurBB.addSuccessor(&getMBB(*Succ));
1275 return true;
1278 static bool isSwiftError(const Value *V) {
1279 if (auto Arg = dyn_cast<Argument>(V))
1280 return Arg->hasSwiftErrorAttr();
1281 if (auto AI = dyn_cast<AllocaInst>(V))
1282 return AI->isSwiftError();
1283 return false;
1286 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1287 const LoadInst &LI = cast<LoadInst>(U);
1289 unsigned StoreSize = DL->getTypeStoreSize(LI.getType());
1290 if (StoreSize == 0)
1291 return true;
1293 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1294 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1295 Register Base = getOrCreateVReg(*LI.getPointerOperand());
1296 AAMDNodes AAInfo = LI.getAAMetadata();
1298 const Value *Ptr = LI.getPointerOperand();
1299 Type *OffsetIRTy = DL->getIndexType(Ptr->getType());
1300 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1302 if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1303 assert(Regs.size() == 1 && "swifterror should be single pointer");
1304 Register VReg =
1305 SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1306 MIRBuilder.buildCopy(Regs[0], VReg);
1307 return true;
1310 auto &TLI = *MF->getSubtarget().getTargetLowering();
1311 MachineMemOperand::Flags Flags =
1312 TLI.getLoadMemOperandFlags(LI, *DL, AC, LibInfo);
1313 if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1314 if (AA->pointsToConstantMemory(
1315 MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1316 Flags |= MachineMemOperand::MOInvariant;
1320 const MDNode *Ranges =
1321 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1322 for (unsigned i = 0; i < Regs.size(); ++i) {
1323 Register Addr;
1324 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1326 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1327 Align BaseAlign = getMemOpAlign(LI);
1328 auto MMO = MF->getMachineMemOperand(
1329 Ptr, Flags, MRI->getType(Regs[i]),
1330 commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1331 LI.getSyncScopeID(), LI.getOrdering());
1332 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1335 return true;
1338 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1339 const StoreInst &SI = cast<StoreInst>(U);
1340 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1341 return true;
1343 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1344 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1345 Register Base = getOrCreateVReg(*SI.getPointerOperand());
1347 Type *OffsetIRTy = DL->getIndexType(SI.getPointerOperandType());
1348 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1350 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1351 assert(Vals.size() == 1 && "swifterror should be single pointer");
1353 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1354 SI.getPointerOperand());
1355 MIRBuilder.buildCopy(VReg, Vals[0]);
1356 return true;
1359 auto &TLI = *MF->getSubtarget().getTargetLowering();
1360 MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1362 for (unsigned i = 0; i < Vals.size(); ++i) {
1363 Register Addr;
1364 MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1366 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1367 Align BaseAlign = getMemOpAlign(SI);
1368 auto MMO = MF->getMachineMemOperand(
1369 Ptr, Flags, MRI->getType(Vals[i]),
1370 commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1371 SI.getSyncScopeID(), SI.getOrdering());
1372 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1374 return true;
1377 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1378 const Value *Src = U.getOperand(0);
1379 Type *Int32Ty = Type::getInt32Ty(U.getContext());
1381 // getIndexedOffsetInType is designed for GEPs, so the first index is the
1382 // usual array element rather than looking into the actual aggregate.
1383 SmallVector<Value *, 1> Indices;
1384 Indices.push_back(ConstantInt::get(Int32Ty, 0));
1386 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1387 for (auto Idx : EVI->indices())
1388 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1389 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1390 for (auto Idx : IVI->indices())
1391 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1392 } else {
1393 for (unsigned i = 1; i < U.getNumOperands(); ++i)
1394 Indices.push_back(U.getOperand(i));
1397 return 8 * static_cast<uint64_t>(
1398 DL.getIndexedOffsetInType(Src->getType(), Indices));
1401 bool IRTranslator::translateExtractValue(const User &U,
1402 MachineIRBuilder &MIRBuilder) {
1403 const Value *Src = U.getOperand(0);
1404 uint64_t Offset = getOffsetFromIndices(U, *DL);
1405 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1406 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1407 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1408 auto &DstRegs = allocateVRegs(U);
1410 for (unsigned i = 0; i < DstRegs.size(); ++i)
1411 DstRegs[i] = SrcRegs[Idx++];
1413 return true;
1416 bool IRTranslator::translateInsertValue(const User &U,
1417 MachineIRBuilder &MIRBuilder) {
1418 const Value *Src = U.getOperand(0);
1419 uint64_t Offset = getOffsetFromIndices(U, *DL);
1420 auto &DstRegs = allocateVRegs(U);
1421 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1422 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1423 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1424 auto *InsertedIt = InsertedRegs.begin();
1426 for (unsigned i = 0; i < DstRegs.size(); ++i) {
1427 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1428 DstRegs[i] = *InsertedIt++;
1429 else
1430 DstRegs[i] = SrcRegs[i];
1433 return true;
1436 bool IRTranslator::translateSelect(const User &U,
1437 MachineIRBuilder &MIRBuilder) {
1438 Register Tst = getOrCreateVReg(*U.getOperand(0));
1439 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1440 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1441 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1443 uint32_t Flags = 0;
1444 if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1445 Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1447 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1448 MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1451 return true;
1454 bool IRTranslator::translateCopy(const User &U, const Value &V,
1455 MachineIRBuilder &MIRBuilder) {
1456 Register Src = getOrCreateVReg(V);
1457 auto &Regs = *VMap.getVRegs(U);
1458 if (Regs.empty()) {
1459 Regs.push_back(Src);
1460 VMap.getOffsets(U)->push_back(0);
1461 } else {
1462 // If we already assigned a vreg for this instruction, we can't change that.
1463 // Emit a copy to satisfy the users we already emitted.
1464 MIRBuilder.buildCopy(Regs[0], Src);
1466 return true;
1469 bool IRTranslator::translateBitCast(const User &U,
1470 MachineIRBuilder &MIRBuilder) {
1471 // If we're bitcasting to the source type, we can reuse the source vreg.
1472 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1473 getLLTForType(*U.getType(), *DL)) {
1474 // If the source is a ConstantInt then it was probably created by
1475 // ConstantHoisting and we should leave it alone.
1476 if (isa<ConstantInt>(U.getOperand(0)))
1477 return translateCast(TargetOpcode::G_CONSTANT_FOLD_BARRIER, U,
1478 MIRBuilder);
1479 return translateCopy(U, *U.getOperand(0), MIRBuilder);
1482 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1485 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1486 MachineIRBuilder &MIRBuilder) {
1487 Register Op = getOrCreateVReg(*U.getOperand(0));
1488 Register Res = getOrCreateVReg(U);
1489 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1490 return true;
1493 bool IRTranslator::translateGetElementPtr(const User &U,
1494 MachineIRBuilder &MIRBuilder) {
1495 Value &Op0 = *U.getOperand(0);
1496 Register BaseReg = getOrCreateVReg(Op0);
1497 Type *PtrIRTy = Op0.getType();
1498 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1499 Type *OffsetIRTy = DL->getIndexType(PtrIRTy);
1500 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1502 uint32_t Flags = 0;
1503 if (isa<Instruction>(U)) {
1504 const Instruction &I = cast<Instruction>(U);
1505 Flags = MachineInstr::copyFlagsFromInstruction(I);
1508 // Normalize Vector GEP - all scalar operands should be converted to the
1509 // splat vector.
1510 unsigned VectorWidth = 0;
1512 // True if we should use a splat vector; using VectorWidth alone is not
1513 // sufficient.
1514 bool WantSplatVector = false;
1515 if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1516 VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1517 // We don't produce 1 x N vectors; those are treated as scalars.
1518 WantSplatVector = VectorWidth > 1;
1521 // We might need to splat the base pointer into a vector if the offsets
1522 // are vectors.
1523 if (WantSplatVector && !PtrTy.isVector()) {
1524 BaseReg =
1525 MIRBuilder
1526 .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
1527 .getReg(0);
1528 PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1529 PtrTy = getLLTForType(*PtrIRTy, *DL);
1530 OffsetIRTy = DL->getIndexType(PtrIRTy);
1531 OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1534 int64_t Offset = 0;
1535 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1536 GTI != E; ++GTI) {
1537 const Value *Idx = GTI.getOperand();
1538 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1539 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1540 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1541 continue;
1542 } else {
1543 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1545 // If this is a scalar constant or a splat vector of constants,
1546 // handle it quickly.
1547 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1548 Offset += ElementSize * CI->getSExtValue();
1549 continue;
1552 if (Offset != 0) {
1553 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1554 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1555 .getReg(0);
1556 Offset = 0;
1559 Register IdxReg = getOrCreateVReg(*Idx);
1560 LLT IdxTy = MRI->getType(IdxReg);
1561 if (IdxTy != OffsetTy) {
1562 if (!IdxTy.isVector() && WantSplatVector) {
1563 IdxReg = MIRBuilder.buildSplatVector(
1564 OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1567 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1570 // N = N + Idx * ElementSize;
1571 // Avoid doing it for ElementSize of 1.
1572 Register GepOffsetReg;
1573 if (ElementSize != 1) {
1574 auto ElementSizeMIB = MIRBuilder.buildConstant(
1575 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1576 GepOffsetReg =
1577 MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1578 } else
1579 GepOffsetReg = IdxReg;
1581 BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1585 if (Offset != 0) {
1586 auto OffsetMIB =
1587 MIRBuilder.buildConstant(OffsetTy, Offset);
1589 if (int64_t(Offset) >= 0 && cast<GEPOperator>(U).isInBounds())
1590 Flags |= MachineInstr::MIFlag::NoUWrap;
1592 MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0),
1593 Flags);
1594 return true;
1597 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1598 return true;
1601 bool IRTranslator::translateMemFunc(const CallInst &CI,
1602 MachineIRBuilder &MIRBuilder,
1603 unsigned Opcode) {
1604 const Value *SrcPtr = CI.getArgOperand(1);
1605 // If the source is undef, then just emit a nop.
1606 if (isa<UndefValue>(SrcPtr))
1607 return true;
1609 SmallVector<Register, 3> SrcRegs;
1611 unsigned MinPtrSize = UINT_MAX;
1612 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1613 Register SrcReg = getOrCreateVReg(**AI);
1614 LLT SrcTy = MRI->getType(SrcReg);
1615 if (SrcTy.isPointer())
1616 MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1617 SrcRegs.push_back(SrcReg);
1620 LLT SizeTy = LLT::scalar(MinPtrSize);
1622 // The size operand should be the minimum of the pointer sizes.
1623 Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1624 if (MRI->getType(SizeOpReg) != SizeTy)
1625 SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1627 auto ICall = MIRBuilder.buildInstr(Opcode);
1628 for (Register SrcReg : SrcRegs)
1629 ICall.addUse(SrcReg);
1631 Align DstAlign;
1632 Align SrcAlign;
1633 unsigned IsVol =
1634 cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1636 ConstantInt *CopySize = nullptr;
1638 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1639 DstAlign = MCI->getDestAlign().valueOrOne();
1640 SrcAlign = MCI->getSourceAlign().valueOrOne();
1641 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1642 } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1643 DstAlign = MCI->getDestAlign().valueOrOne();
1644 SrcAlign = MCI->getSourceAlign().valueOrOne();
1645 CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1646 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1647 DstAlign = MMI->getDestAlign().valueOrOne();
1648 SrcAlign = MMI->getSourceAlign().valueOrOne();
1649 CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1650 } else {
1651 auto *MSI = cast<MemSetInst>(&CI);
1652 DstAlign = MSI->getDestAlign().valueOrOne();
1655 if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1656 // We need to propagate the tail call flag from the IR inst as an argument.
1657 // Otherwise, we have to pessimize and assume later that we cannot tail call
1658 // any memory intrinsics.
1659 ICall.addImm(CI.isTailCall() ? 1 : 0);
1662 // Create mem operands to store the alignment and volatile info.
1663 MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad;
1664 MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore;
1665 if (IsVol) {
1666 LoadFlags |= MachineMemOperand::MOVolatile;
1667 StoreFlags |= MachineMemOperand::MOVolatile;
1670 AAMDNodes AAInfo = CI.getAAMetadata();
1671 if (AA && CopySize &&
1672 AA->pointsToConstantMemory(MemoryLocation(
1673 SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1674 LoadFlags |= MachineMemOperand::MOInvariant;
1676 // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1677 // but the previous usage implied it did. Probably should check
1678 // isDereferenceableAndAlignedPointer.
1679 LoadFlags |= MachineMemOperand::MODereferenceable;
1682 ICall.addMemOperand(
1683 MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),
1684 StoreFlags, 1, DstAlign, AAInfo));
1685 if (Opcode != TargetOpcode::G_MEMSET)
1686 ICall.addMemOperand(MF->getMachineMemOperand(
1687 MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1689 return true;
1692 void IRTranslator::getStackGuard(Register DstReg,
1693 MachineIRBuilder &MIRBuilder) {
1694 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1695 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1696 auto MIB =
1697 MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1699 auto &TLI = *MF->getSubtarget().getTargetLowering();
1700 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1701 if (!Global)
1702 return;
1704 unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1705 LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1707 MachinePointerInfo MPInfo(Global);
1708 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1709 MachineMemOperand::MODereferenceable;
1710 MachineMemOperand *MemRef = MF->getMachineMemOperand(
1711 MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1712 MIB.setMemRefs({MemRef});
1715 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1716 MachineIRBuilder &MIRBuilder) {
1717 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1718 MIRBuilder.buildInstr(
1719 Op, {ResRegs[0], ResRegs[1]},
1720 {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1722 return true;
1725 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1726 MachineIRBuilder &MIRBuilder) {
1727 Register Dst = getOrCreateVReg(CI);
1728 Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1729 Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1730 uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1731 MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1732 return true;
1735 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1736 switch (ID) {
1737 default:
1738 break;
1739 case Intrinsic::bswap:
1740 return TargetOpcode::G_BSWAP;
1741 case Intrinsic::bitreverse:
1742 return TargetOpcode::G_BITREVERSE;
1743 case Intrinsic::fshl:
1744 return TargetOpcode::G_FSHL;
1745 case Intrinsic::fshr:
1746 return TargetOpcode::G_FSHR;
1747 case Intrinsic::ceil:
1748 return TargetOpcode::G_FCEIL;
1749 case Intrinsic::cos:
1750 return TargetOpcode::G_FCOS;
1751 case Intrinsic::ctpop:
1752 return TargetOpcode::G_CTPOP;
1753 case Intrinsic::exp:
1754 return TargetOpcode::G_FEXP;
1755 case Intrinsic::exp2:
1756 return TargetOpcode::G_FEXP2;
1757 case Intrinsic::exp10:
1758 return TargetOpcode::G_FEXP10;
1759 case Intrinsic::fabs:
1760 return TargetOpcode::G_FABS;
1761 case Intrinsic::copysign:
1762 return TargetOpcode::G_FCOPYSIGN;
1763 case Intrinsic::minnum:
1764 return TargetOpcode::G_FMINNUM;
1765 case Intrinsic::maxnum:
1766 return TargetOpcode::G_FMAXNUM;
1767 case Intrinsic::minimum:
1768 return TargetOpcode::G_FMINIMUM;
1769 case Intrinsic::maximum:
1770 return TargetOpcode::G_FMAXIMUM;
1771 case Intrinsic::canonicalize:
1772 return TargetOpcode::G_FCANONICALIZE;
1773 case Intrinsic::floor:
1774 return TargetOpcode::G_FFLOOR;
1775 case Intrinsic::fma:
1776 return TargetOpcode::G_FMA;
1777 case Intrinsic::log:
1778 return TargetOpcode::G_FLOG;
1779 case Intrinsic::log2:
1780 return TargetOpcode::G_FLOG2;
1781 case Intrinsic::log10:
1782 return TargetOpcode::G_FLOG10;
1783 case Intrinsic::ldexp:
1784 return TargetOpcode::G_FLDEXP;
1785 case Intrinsic::nearbyint:
1786 return TargetOpcode::G_FNEARBYINT;
1787 case Intrinsic::pow:
1788 return TargetOpcode::G_FPOW;
1789 case Intrinsic::powi:
1790 return TargetOpcode::G_FPOWI;
1791 case Intrinsic::rint:
1792 return TargetOpcode::G_FRINT;
1793 case Intrinsic::round:
1794 return TargetOpcode::G_INTRINSIC_ROUND;
1795 case Intrinsic::roundeven:
1796 return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1797 case Intrinsic::sin:
1798 return TargetOpcode::G_FSIN;
1799 case Intrinsic::sqrt:
1800 return TargetOpcode::G_FSQRT;
1801 case Intrinsic::trunc:
1802 return TargetOpcode::G_INTRINSIC_TRUNC;
1803 case Intrinsic::readcyclecounter:
1804 return TargetOpcode::G_READCYCLECOUNTER;
1805 case Intrinsic::ptrmask:
1806 return TargetOpcode::G_PTRMASK;
1807 case Intrinsic::lrint:
1808 return TargetOpcode::G_INTRINSIC_LRINT;
1809 // FADD/FMUL require checking the FMF, so are handled elsewhere.
1810 case Intrinsic::vector_reduce_fmin:
1811 return TargetOpcode::G_VECREDUCE_FMIN;
1812 case Intrinsic::vector_reduce_fmax:
1813 return TargetOpcode::G_VECREDUCE_FMAX;
1814 case Intrinsic::vector_reduce_fminimum:
1815 return TargetOpcode::G_VECREDUCE_FMINIMUM;
1816 case Intrinsic::vector_reduce_fmaximum:
1817 return TargetOpcode::G_VECREDUCE_FMAXIMUM;
1818 case Intrinsic::vector_reduce_add:
1819 return TargetOpcode::G_VECREDUCE_ADD;
1820 case Intrinsic::vector_reduce_mul:
1821 return TargetOpcode::G_VECREDUCE_MUL;
1822 case Intrinsic::vector_reduce_and:
1823 return TargetOpcode::G_VECREDUCE_AND;
1824 case Intrinsic::vector_reduce_or:
1825 return TargetOpcode::G_VECREDUCE_OR;
1826 case Intrinsic::vector_reduce_xor:
1827 return TargetOpcode::G_VECREDUCE_XOR;
1828 case Intrinsic::vector_reduce_smax:
1829 return TargetOpcode::G_VECREDUCE_SMAX;
1830 case Intrinsic::vector_reduce_smin:
1831 return TargetOpcode::G_VECREDUCE_SMIN;
1832 case Intrinsic::vector_reduce_umax:
1833 return TargetOpcode::G_VECREDUCE_UMAX;
1834 case Intrinsic::vector_reduce_umin:
1835 return TargetOpcode::G_VECREDUCE_UMIN;
1836 case Intrinsic::lround:
1837 return TargetOpcode::G_LROUND;
1838 case Intrinsic::llround:
1839 return TargetOpcode::G_LLROUND;
1840 case Intrinsic::get_fpmode:
1841 return TargetOpcode::G_GET_FPMODE;
1843 return Intrinsic::not_intrinsic;
1846 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1847 Intrinsic::ID ID,
1848 MachineIRBuilder &MIRBuilder) {
1850 unsigned Op = getSimpleIntrinsicOpcode(ID);
1852 // Is this a simple intrinsic?
1853 if (Op == Intrinsic::not_intrinsic)
1854 return false;
1856 // Yes. Let's translate it.
1857 SmallVector<llvm::SrcOp, 4> VRegs;
1858 for (const auto &Arg : CI.args())
1859 VRegs.push_back(getOrCreateVReg(*Arg));
1861 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1862 MachineInstr::copyFlagsFromInstruction(CI));
1863 return true;
1866 // TODO: Include ConstainedOps.def when all strict instructions are defined.
1867 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1868 switch (ID) {
1869 case Intrinsic::experimental_constrained_fadd:
1870 return TargetOpcode::G_STRICT_FADD;
1871 case Intrinsic::experimental_constrained_fsub:
1872 return TargetOpcode::G_STRICT_FSUB;
1873 case Intrinsic::experimental_constrained_fmul:
1874 return TargetOpcode::G_STRICT_FMUL;
1875 case Intrinsic::experimental_constrained_fdiv:
1876 return TargetOpcode::G_STRICT_FDIV;
1877 case Intrinsic::experimental_constrained_frem:
1878 return TargetOpcode::G_STRICT_FREM;
1879 case Intrinsic::experimental_constrained_fma:
1880 return TargetOpcode::G_STRICT_FMA;
1881 case Intrinsic::experimental_constrained_sqrt:
1882 return TargetOpcode::G_STRICT_FSQRT;
1883 case Intrinsic::experimental_constrained_ldexp:
1884 return TargetOpcode::G_STRICT_FLDEXP;
1885 default:
1886 return 0;
1890 bool IRTranslator::translateConstrainedFPIntrinsic(
1891 const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1892 fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
1894 unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1895 if (!Opcode)
1896 return false;
1898 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1899 if (EB == fp::ExceptionBehavior::ebIgnore)
1900 Flags |= MachineInstr::NoFPExcept;
1902 SmallVector<llvm::SrcOp, 4> VRegs;
1903 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1904 if (!FPI.isUnaryOp())
1905 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1906 if (FPI.isTernaryOp())
1907 VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1909 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1910 return true;
1913 std::optional<MCRegister> IRTranslator::getArgPhysReg(Argument &Arg) {
1914 auto VRegs = getOrCreateVRegs(Arg);
1915 if (VRegs.size() != 1)
1916 return std::nullopt;
1918 // Arguments are lowered as a copy of a livein physical register.
1919 auto *VRegDef = MF->getRegInfo().getVRegDef(VRegs[0]);
1920 if (!VRegDef || !VRegDef->isCopy())
1921 return std::nullopt;
1922 return VRegDef->getOperand(1).getReg().asMCReg();
1925 bool IRTranslator::translateIfEntryValueArgument(const DbgValueInst &DebugInst,
1926 MachineIRBuilder &MIRBuilder) {
1927 auto *Arg = dyn_cast<Argument>(DebugInst.getValue());
1928 if (!Arg)
1929 return false;
1931 const DIExpression *Expr = DebugInst.getExpression();
1932 if (!Expr->isEntryValue())
1933 return false;
1935 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
1936 if (!PhysReg) {
1937 LLVM_DEBUG(dbgs() << "Dropping dbg.value: expression is entry_value but "
1938 "couldn't find a physical register\n"
1939 << DebugInst << "\n");
1940 return true;
1943 MIRBuilder.buildDirectDbgValue(*PhysReg, DebugInst.getVariable(),
1944 DebugInst.getExpression());
1945 return true;
1948 bool IRTranslator::translateIfEntryValueArgument(
1949 const DbgDeclareInst &DebugInst) {
1950 auto *Arg = dyn_cast<Argument>(DebugInst.getAddress());
1951 if (!Arg)
1952 return false;
1954 const DIExpression *Expr = DebugInst.getExpression();
1955 if (!Expr->isEntryValue())
1956 return false;
1958 std::optional<MCRegister> PhysReg = getArgPhysReg(*Arg);
1959 if (!PhysReg)
1960 return false;
1962 // Append an op deref to account for the fact that this is a dbg_declare.
1963 Expr = DIExpression::append(Expr, dwarf::DW_OP_deref);
1964 MF->setVariableDbgInfo(DebugInst.getVariable(), Expr, *PhysReg,
1965 DebugInst.getDebugLoc());
1966 return true;
1969 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1970 MachineIRBuilder &MIRBuilder) {
1971 if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1972 if (ORE->enabled()) {
1973 if (MemoryOpRemark::canHandle(MI, *LibInfo)) {
1974 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
1975 R.visit(MI);
1980 // If this is a simple intrinsic (that is, we just need to add a def of
1981 // a vreg, and uses for each arg operand, then translate it.
1982 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1983 return true;
1985 switch (ID) {
1986 default:
1987 break;
1988 case Intrinsic::lifetime_start:
1989 case Intrinsic::lifetime_end: {
1990 // No stack colouring in O0, discard region information.
1991 if (MF->getTarget().getOptLevel() == CodeGenOptLevel::None)
1992 return true;
1994 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1995 : TargetOpcode::LIFETIME_END;
1997 // Get the underlying objects for the location passed on the lifetime
1998 // marker.
1999 SmallVector<const Value *, 4> Allocas;
2000 getUnderlyingObjects(CI.getArgOperand(1), Allocas);
2002 // Iterate over each underlying object, creating lifetime markers for each
2003 // static alloca. Quit if we find a non-static alloca.
2004 for (const Value *V : Allocas) {
2005 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
2006 if (!AI)
2007 continue;
2009 if (!AI->isStaticAlloca())
2010 return true;
2012 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
2014 return true;
2016 case Intrinsic::dbg_declare: {
2017 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
2018 assert(DI.getVariable() && "Missing variable");
2020 const Value *Address = DI.getAddress();
2021 if (!Address || isa<UndefValue>(Address)) {
2022 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
2023 return true;
2026 assert(DI.getVariable()->isValidLocationForIntrinsic(
2027 MIRBuilder.getDebugLoc()) &&
2028 "Expected inlined-at fields to agree");
2029 auto AI = dyn_cast<AllocaInst>(Address);
2030 if (AI && AI->isStaticAlloca()) {
2031 // Static allocas are tracked at the MF level, no need for DBG_VALUE
2032 // instructions (in fact, they get ignored if they *do* exist).
2033 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
2034 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
2035 return true;
2038 if (translateIfEntryValueArgument(DI))
2039 return true;
2041 // A dbg.declare describes the address of a source variable, so lower it
2042 // into an indirect DBG_VALUE.
2043 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
2044 DI.getVariable(), DI.getExpression());
2045 return true;
2047 case Intrinsic::dbg_label: {
2048 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
2049 assert(DI.getLabel() && "Missing label");
2051 assert(DI.getLabel()->isValidLocationForIntrinsic(
2052 MIRBuilder.getDebugLoc()) &&
2053 "Expected inlined-at fields to agree");
2055 MIRBuilder.buildDbgLabel(DI.getLabel());
2056 return true;
2058 case Intrinsic::vaend:
2059 // No target I know of cares about va_end. Certainly no in-tree target
2060 // does. Simplest intrinsic ever!
2061 return true;
2062 case Intrinsic::vastart: {
2063 auto &TLI = *MF->getSubtarget().getTargetLowering();
2064 Value *Ptr = CI.getArgOperand(0);
2065 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
2067 // FIXME: Get alignment
2068 MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
2069 .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
2070 MachineMemOperand::MOStore,
2071 ListSize, Align(1)));
2072 return true;
2074 case Intrinsic::dbg_value: {
2075 // This form of DBG_VALUE is target-independent.
2076 const DbgValueInst &DI = cast<DbgValueInst>(CI);
2077 const Value *V = DI.getValue();
2078 assert(DI.getVariable()->isValidLocationForIntrinsic(
2079 MIRBuilder.getDebugLoc()) &&
2080 "Expected inlined-at fields to agree");
2081 if (!V || DI.hasArgList()) {
2082 // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
2083 // terminate any prior location.
2084 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
2085 return true;
2087 if (const auto *CI = dyn_cast<Constant>(V)) {
2088 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
2089 return true;
2091 if (auto *AI = dyn_cast<AllocaInst>(V);
2092 AI && AI->isStaticAlloca() && DI.getExpression()->startsWithDeref()) {
2093 // If the value is an alloca and the expression starts with a
2094 // dereference, track a stack slot instead of a register, as registers
2095 // may be clobbered.
2096 auto ExprOperands = DI.getExpression()->getElements();
2097 auto *ExprDerefRemoved =
2098 DIExpression::get(AI->getContext(), ExprOperands.drop_front());
2099 MIRBuilder.buildFIDbgValue(getOrCreateFrameIndex(*AI), DI.getVariable(),
2100 ExprDerefRemoved);
2101 return true;
2103 if (translateIfEntryValueArgument(DI, MIRBuilder))
2104 return true;
2105 for (Register Reg : getOrCreateVRegs(*V)) {
2106 // FIXME: This does not handle register-indirect values at offset 0. The
2107 // direct/indirect thing shouldn't really be handled by something as
2108 // implicit as reg+noreg vs reg+imm in the first place, but it seems
2109 // pretty baked in right now.
2110 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
2112 return true;
2114 case Intrinsic::uadd_with_overflow:
2115 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2116 case Intrinsic::sadd_with_overflow:
2117 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2118 case Intrinsic::usub_with_overflow:
2119 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2120 case Intrinsic::ssub_with_overflow:
2121 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2122 case Intrinsic::umul_with_overflow:
2123 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2124 case Intrinsic::smul_with_overflow:
2125 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2126 case Intrinsic::uadd_sat:
2127 return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2128 case Intrinsic::sadd_sat:
2129 return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2130 case Intrinsic::usub_sat:
2131 return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2132 case Intrinsic::ssub_sat:
2133 return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2134 case Intrinsic::ushl_sat:
2135 return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2136 case Intrinsic::sshl_sat:
2137 return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2138 case Intrinsic::umin:
2139 return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2140 case Intrinsic::umax:
2141 return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2142 case Intrinsic::smin:
2143 return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2144 case Intrinsic::smax:
2145 return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2146 case Intrinsic::abs:
2147 // TODO: Preserve "int min is poison" arg in GMIR?
2148 return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2149 case Intrinsic::smul_fix:
2150 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2151 case Intrinsic::umul_fix:
2152 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2153 case Intrinsic::smul_fix_sat:
2154 return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2155 case Intrinsic::umul_fix_sat:
2156 return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2157 case Intrinsic::sdiv_fix:
2158 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2159 case Intrinsic::udiv_fix:
2160 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2161 case Intrinsic::sdiv_fix_sat:
2162 return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2163 case Intrinsic::udiv_fix_sat:
2164 return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2165 case Intrinsic::fmuladd: {
2166 const TargetMachine &TM = MF->getTarget();
2167 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2168 Register Dst = getOrCreateVReg(CI);
2169 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2170 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2171 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2172 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2173 TLI.isFMAFasterThanFMulAndFAdd(*MF,
2174 TLI.getValueType(*DL, CI.getType()))) {
2175 // TODO: Revisit this to see if we should move this part of the
2176 // lowering to the combiner.
2177 MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2178 MachineInstr::copyFlagsFromInstruction(CI));
2179 } else {
2180 LLT Ty = getLLTForType(*CI.getType(), *DL);
2181 auto FMul = MIRBuilder.buildFMul(
2182 Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2183 MIRBuilder.buildFAdd(Dst, FMul, Op2,
2184 MachineInstr::copyFlagsFromInstruction(CI));
2186 return true;
2188 case Intrinsic::convert_from_fp16:
2189 // FIXME: This intrinsic should probably be removed from the IR.
2190 MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2191 getOrCreateVReg(*CI.getArgOperand(0)),
2192 MachineInstr::copyFlagsFromInstruction(CI));
2193 return true;
2194 case Intrinsic::convert_to_fp16:
2195 // FIXME: This intrinsic should probably be removed from the IR.
2196 MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2197 getOrCreateVReg(*CI.getArgOperand(0)),
2198 MachineInstr::copyFlagsFromInstruction(CI));
2199 return true;
2200 case Intrinsic::frexp: {
2201 ArrayRef<Register> VRegs = getOrCreateVRegs(CI);
2202 MIRBuilder.buildFFrexp(VRegs[0], VRegs[1],
2203 getOrCreateVReg(*CI.getArgOperand(0)),
2204 MachineInstr::copyFlagsFromInstruction(CI));
2205 return true;
2207 case Intrinsic::memcpy_inline:
2208 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2209 case Intrinsic::memcpy:
2210 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2211 case Intrinsic::memmove:
2212 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2213 case Intrinsic::memset:
2214 return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2215 case Intrinsic::eh_typeid_for: {
2216 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2217 Register Reg = getOrCreateVReg(CI);
2218 unsigned TypeID = MF->getTypeIDFor(GV);
2219 MIRBuilder.buildConstant(Reg, TypeID);
2220 return true;
2222 case Intrinsic::objectsize:
2223 llvm_unreachable("llvm.objectsize.* should have been lowered already");
2225 case Intrinsic::is_constant:
2226 llvm_unreachable("llvm.is.constant.* should have been lowered already");
2228 case Intrinsic::stackguard:
2229 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2230 return true;
2231 case Intrinsic::stackprotector: {
2232 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2233 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2234 Register GuardVal;
2235 if (TLI.useLoadStackGuardNode()) {
2236 GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2237 getStackGuard(GuardVal, MIRBuilder);
2238 } else
2239 GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2241 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2242 int FI = getOrCreateFrameIndex(*Slot);
2243 MF->getFrameInfo().setStackProtectorIndex(FI);
2245 MIRBuilder.buildStore(
2246 GuardVal, getOrCreateVReg(*Slot),
2247 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2248 MachineMemOperand::MOStore |
2249 MachineMemOperand::MOVolatile,
2250 PtrTy, Align(8)));
2251 return true;
2253 case Intrinsic::stacksave: {
2254 MIRBuilder.buildInstr(TargetOpcode::G_STACKSAVE, {getOrCreateVReg(CI)}, {});
2255 return true;
2257 case Intrinsic::stackrestore: {
2258 MIRBuilder.buildInstr(TargetOpcode::G_STACKRESTORE, {},
2259 {getOrCreateVReg(*CI.getArgOperand(0))});
2260 return true;
2262 case Intrinsic::cttz:
2263 case Intrinsic::ctlz: {
2264 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2265 bool isTrailing = ID == Intrinsic::cttz;
2266 unsigned Opcode = isTrailing
2267 ? Cst->isZero() ? TargetOpcode::G_CTTZ
2268 : TargetOpcode::G_CTTZ_ZERO_UNDEF
2269 : Cst->isZero() ? TargetOpcode::G_CTLZ
2270 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2271 MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2272 {getOrCreateVReg(*CI.getArgOperand(0))});
2273 return true;
2275 case Intrinsic::invariant_start: {
2276 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2277 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2278 MIRBuilder.buildUndef(Undef);
2279 return true;
2281 case Intrinsic::invariant_end:
2282 return true;
2283 case Intrinsic::expect:
2284 case Intrinsic::annotation:
2285 case Intrinsic::ptr_annotation:
2286 case Intrinsic::launder_invariant_group:
2287 case Intrinsic::strip_invariant_group: {
2288 // Drop the intrinsic, but forward the value.
2289 MIRBuilder.buildCopy(getOrCreateVReg(CI),
2290 getOrCreateVReg(*CI.getArgOperand(0)));
2291 return true;
2293 case Intrinsic::assume:
2294 case Intrinsic::experimental_noalias_scope_decl:
2295 case Intrinsic::var_annotation:
2296 case Intrinsic::sideeffect:
2297 // Discard annotate attributes, assumptions, and artificial side-effects.
2298 return true;
2299 case Intrinsic::read_volatile_register:
2300 case Intrinsic::read_register: {
2301 Value *Arg = CI.getArgOperand(0);
2302 MIRBuilder
2303 .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2304 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2305 return true;
2307 case Intrinsic::write_register: {
2308 Value *Arg = CI.getArgOperand(0);
2309 MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2310 .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2311 .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2312 return true;
2314 case Intrinsic::localescape: {
2315 MachineBasicBlock &EntryMBB = MF->front();
2316 StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2318 // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2319 // is the same on all targets.
2320 for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2321 Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2322 if (isa<ConstantPointerNull>(Arg))
2323 continue; // Skip null pointers. They represent a hole in index space.
2325 int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2326 MCSymbol *FrameAllocSym =
2327 MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2328 Idx);
2330 // This should be inserted at the start of the entry block.
2331 auto LocalEscape =
2332 MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2333 .addSym(FrameAllocSym)
2334 .addFrameIndex(FI);
2336 EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2339 return true;
2341 case Intrinsic::vector_reduce_fadd:
2342 case Intrinsic::vector_reduce_fmul: {
2343 // Need to check for the reassoc flag to decide whether we want a
2344 // sequential reduction opcode or not.
2345 Register Dst = getOrCreateVReg(CI);
2346 Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2347 Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2348 unsigned Opc = 0;
2349 if (!CI.hasAllowReassoc()) {
2350 // The sequential ordering case.
2351 Opc = ID == Intrinsic::vector_reduce_fadd
2352 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2353 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2354 MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2355 MachineInstr::copyFlagsFromInstruction(CI));
2356 return true;
2358 // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2359 // since the associativity doesn't matter.
2360 unsigned ScalarOpc;
2361 if (ID == Intrinsic::vector_reduce_fadd) {
2362 Opc = TargetOpcode::G_VECREDUCE_FADD;
2363 ScalarOpc = TargetOpcode::G_FADD;
2364 } else {
2365 Opc = TargetOpcode::G_VECREDUCE_FMUL;
2366 ScalarOpc = TargetOpcode::G_FMUL;
2368 LLT DstTy = MRI->getType(Dst);
2369 auto Rdx = MIRBuilder.buildInstr(
2370 Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2371 MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2372 MachineInstr::copyFlagsFromInstruction(CI));
2374 return true;
2376 case Intrinsic::trap:
2377 case Intrinsic::debugtrap:
2378 case Intrinsic::ubsantrap: {
2379 StringRef TrapFuncName =
2380 CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
2381 if (TrapFuncName.empty())
2382 break; // Use the default handling.
2383 CallLowering::CallLoweringInfo Info;
2384 if (ID == Intrinsic::ubsantrap) {
2385 Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
2386 CI.getArgOperand(0)->getType(), 0});
2388 Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
2389 Info.CB = &CI;
2390 Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
2391 return CLI->lowerCall(MIRBuilder, Info);
2393 case Intrinsic::fptrunc_round: {
2394 uint32_t Flags = MachineInstr::copyFlagsFromInstruction(CI);
2396 // Convert the metadata argument to a constant integer
2397 Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2398 std::optional<RoundingMode> RoundMode =
2399 convertStrToRoundingMode(cast<MDString>(MD)->getString());
2401 // Add the Rounding mode as an integer
2402 MIRBuilder
2403 .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2404 {getOrCreateVReg(CI)},
2405 {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2406 .addImm((int)*RoundMode);
2408 return true;
2410 case Intrinsic::is_fpclass: {
2411 Value *FpValue = CI.getOperand(0);
2412 ConstantInt *TestMaskValue = cast<ConstantInt>(CI.getOperand(1));
2414 MIRBuilder
2415 .buildInstr(TargetOpcode::G_IS_FPCLASS, {getOrCreateVReg(CI)},
2416 {getOrCreateVReg(*FpValue)})
2417 .addImm(TestMaskValue->getZExtValue());
2419 return true;
2421 case Intrinsic::set_fpmode: {
2422 Value *FPState = CI.getOperand(0);
2423 MIRBuilder.buildInstr(TargetOpcode::G_SET_FPMODE, {},
2424 { getOrCreateVReg(*FPState) });
2425 return true;
2427 case Intrinsic::reset_fpmode: {
2428 MIRBuilder.buildInstr(TargetOpcode::G_RESET_FPMODE, {}, {});
2429 return true;
2431 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
2432 case Intrinsic::INTRINSIC:
2433 #include "llvm/IR/ConstrainedOps.def"
2434 return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2435 MIRBuilder);
2438 return false;
2441 bool IRTranslator::translateInlineAsm(const CallBase &CB,
2442 MachineIRBuilder &MIRBuilder) {
2444 const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2446 if (!ALI) {
2447 LLVM_DEBUG(
2448 dbgs() << "Inline asm lowering is not supported for this target yet\n");
2449 return false;
2452 return ALI->lowerInlineAsm(
2453 MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2456 bool IRTranslator::translateCallBase(const CallBase &CB,
2457 MachineIRBuilder &MIRBuilder) {
2458 ArrayRef<Register> Res = getOrCreateVRegs(CB);
2460 SmallVector<ArrayRef<Register>, 8> Args;
2461 Register SwiftInVReg = 0;
2462 Register SwiftErrorVReg = 0;
2463 for (const auto &Arg : CB.args()) {
2464 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2465 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2466 LLT Ty = getLLTForType(*Arg->getType(), *DL);
2467 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2468 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2469 &CB, &MIRBuilder.getMBB(), Arg));
2470 Args.emplace_back(ArrayRef(SwiftInVReg));
2471 SwiftErrorVReg =
2472 SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2473 continue;
2475 Args.push_back(getOrCreateVRegs(*Arg));
2478 if (auto *CI = dyn_cast<CallInst>(&CB)) {
2479 if (ORE->enabled()) {
2480 if (MemoryOpRemark::canHandle(CI, *LibInfo)) {
2481 MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, *LibInfo);
2482 R.visit(CI);
2487 // We don't set HasCalls on MFI here yet because call lowering may decide to
2488 // optimize into tail calls. Instead, we defer that to selection where a final
2489 // scan is done to check if any instructions are calls.
2490 bool Success =
2491 CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2492 [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2494 // Check if we just inserted a tail call.
2495 if (Success) {
2496 assert(!HasTailCall && "Can't tail call return twice from block?");
2497 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2498 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2501 return Success;
2504 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2505 const CallInst &CI = cast<CallInst>(U);
2506 auto TII = MF->getTarget().getIntrinsicInfo();
2507 const Function *F = CI.getCalledFunction();
2509 // FIXME: support Windows dllimport function calls and calls through
2510 // weak symbols.
2511 if (F && (F->hasDLLImportStorageClass() ||
2512 (MF->getTarget().getTargetTriple().isOSWindows() &&
2513 F->hasExternalWeakLinkage())))
2514 return false;
2516 // FIXME: support control flow guard targets.
2517 if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2518 return false;
2520 // FIXME: support statepoints and related.
2521 if (isa<GCStatepointInst, GCRelocateInst, GCResultInst>(U))
2522 return false;
2524 if (CI.isInlineAsm())
2525 return translateInlineAsm(CI, MIRBuilder);
2527 diagnoseDontCall(CI);
2529 Intrinsic::ID ID = Intrinsic::not_intrinsic;
2530 if (F && F->isIntrinsic()) {
2531 ID = F->getIntrinsicID();
2532 if (TII && ID == Intrinsic::not_intrinsic)
2533 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2536 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2537 return translateCallBase(CI, MIRBuilder);
2539 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2541 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2542 return true;
2544 ArrayRef<Register> ResultRegs;
2545 if (!CI.getType()->isVoidTy())
2546 ResultRegs = getOrCreateVRegs(CI);
2548 // Ignore the callsite attributes. Backend code is most likely not expecting
2549 // an intrinsic to sometimes have side effects and sometimes not.
2550 MachineInstrBuilder MIB = MIRBuilder.buildIntrinsic(ID, ResultRegs);
2551 if (isa<FPMathOperator>(CI))
2552 MIB->copyIRFlags(CI);
2554 for (const auto &Arg : enumerate(CI.args())) {
2555 // If this is required to be an immediate, don't materialize it in a
2556 // register.
2557 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2558 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2559 // imm arguments are more convenient than cimm (and realistically
2560 // probably sufficient), so use them.
2561 assert(CI->getBitWidth() <= 64 &&
2562 "large intrinsic immediates not handled");
2563 MIB.addImm(CI->getSExtValue());
2564 } else {
2565 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2567 } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2568 auto *MD = MDVal->getMetadata();
2569 auto *MDN = dyn_cast<MDNode>(MD);
2570 if (!MDN) {
2571 if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2572 MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2573 else // This was probably an MDString.
2574 return false;
2576 MIB.addMetadata(MDN);
2577 } else {
2578 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2579 if (VRegs.size() > 1)
2580 return false;
2581 MIB.addUse(VRegs[0]);
2585 // Add a MachineMemOperand if it is a target mem intrinsic.
2586 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2587 TargetLowering::IntrinsicInfo Info;
2588 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2589 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2590 Align Alignment = Info.align.value_or(
2591 DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2592 LLT MemTy = Info.memVT.isSimple()
2593 ? getLLTForMVT(Info.memVT.getSimpleVT())
2594 : LLT::scalar(Info.memVT.getStoreSizeInBits());
2596 // TODO: We currently just fallback to address space 0 if getTgtMemIntrinsic
2597 // didn't yield anything useful.
2598 MachinePointerInfo MPI;
2599 if (Info.ptrVal)
2600 MPI = MachinePointerInfo(Info.ptrVal, Info.offset);
2601 else if (Info.fallbackAddressSpace)
2602 MPI = MachinePointerInfo(*Info.fallbackAddressSpace);
2603 MIB.addMemOperand(
2604 MF->getMachineMemOperand(MPI, Info.flags, MemTy, Alignment, CI.getAAMetadata()));
2607 return true;
2610 bool IRTranslator::findUnwindDestinations(
2611 const BasicBlock *EHPadBB,
2612 BranchProbability Prob,
2613 SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2614 &UnwindDests) {
2615 EHPersonality Personality = classifyEHPersonality(
2616 EHPadBB->getParent()->getFunction().getPersonalityFn());
2617 bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2618 bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2619 bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2620 bool IsSEH = isAsynchronousEHPersonality(Personality);
2622 if (IsWasmCXX) {
2623 // Ignore this for now.
2624 return false;
2627 while (EHPadBB) {
2628 const Instruction *Pad = EHPadBB->getFirstNonPHI();
2629 BasicBlock *NewEHPadBB = nullptr;
2630 if (isa<LandingPadInst>(Pad)) {
2631 // Stop on landingpads. They are not funclets.
2632 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2633 break;
2635 if (isa<CleanupPadInst>(Pad)) {
2636 // Stop on cleanup pads. Cleanups are always funclet entries for all known
2637 // personalities.
2638 UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2639 UnwindDests.back().first->setIsEHScopeEntry();
2640 UnwindDests.back().first->setIsEHFuncletEntry();
2641 break;
2643 if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2644 // Add the catchpad handlers to the possible destinations.
2645 for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2646 UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2647 // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2648 if (IsMSVCCXX || IsCoreCLR)
2649 UnwindDests.back().first->setIsEHFuncletEntry();
2650 if (!IsSEH)
2651 UnwindDests.back().first->setIsEHScopeEntry();
2653 NewEHPadBB = CatchSwitch->getUnwindDest();
2654 } else {
2655 continue;
2658 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2659 if (BPI && NewEHPadBB)
2660 Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2661 EHPadBB = NewEHPadBB;
2663 return true;
2666 bool IRTranslator::translateInvoke(const User &U,
2667 MachineIRBuilder &MIRBuilder) {
2668 const InvokeInst &I = cast<InvokeInst>(U);
2669 MCContext &Context = MF->getContext();
2671 const BasicBlock *ReturnBB = I.getSuccessor(0);
2672 const BasicBlock *EHPadBB = I.getSuccessor(1);
2674 const Function *Fn = I.getCalledFunction();
2676 // FIXME: support invoking patchpoint and statepoint intrinsics.
2677 if (Fn && Fn->isIntrinsic())
2678 return false;
2680 // FIXME: support whatever these are.
2681 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2682 return false;
2684 // FIXME: support control flow guard targets.
2685 if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2686 return false;
2688 // FIXME: support Windows exception handling.
2689 if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2690 return false;
2692 // FIXME: support Windows dllimport function calls and calls through
2693 // weak symbols.
2694 if (Fn && (Fn->hasDLLImportStorageClass() ||
2695 (MF->getTarget().getTargetTriple().isOSWindows() &&
2696 Fn->hasExternalWeakLinkage())))
2697 return false;
2699 bool LowerInlineAsm = I.isInlineAsm();
2700 bool NeedEHLabel = true;
2702 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2703 // the region covered by the try.
2704 MCSymbol *BeginSymbol = nullptr;
2705 if (NeedEHLabel) {
2706 MIRBuilder.buildInstr(TargetOpcode::G_INVOKE_REGION_START);
2707 BeginSymbol = Context.createTempSymbol();
2708 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2711 if (LowerInlineAsm) {
2712 if (!translateInlineAsm(I, MIRBuilder))
2713 return false;
2714 } else if (!translateCallBase(I, MIRBuilder))
2715 return false;
2717 MCSymbol *EndSymbol = nullptr;
2718 if (NeedEHLabel) {
2719 EndSymbol = Context.createTempSymbol();
2720 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2723 SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2724 BranchProbabilityInfo *BPI = FuncInfo.BPI;
2725 MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2726 BranchProbability EHPadBBProb =
2727 BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2728 : BranchProbability::getZero();
2730 if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2731 return false;
2733 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2734 &ReturnMBB = getMBB(*ReturnBB);
2735 // Update successor info.
2736 addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2737 for (auto &UnwindDest : UnwindDests) {
2738 UnwindDest.first->setIsEHPad();
2739 addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2741 InvokeMBB->normalizeSuccProbs();
2743 if (NeedEHLabel) {
2744 assert(BeginSymbol && "Expected a begin symbol!");
2745 assert(EndSymbol && "Expected an end symbol!");
2746 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2749 MIRBuilder.buildBr(ReturnMBB);
2750 return true;
2753 bool IRTranslator::translateCallBr(const User &U,
2754 MachineIRBuilder &MIRBuilder) {
2755 // FIXME: Implement this.
2756 return false;
2759 bool IRTranslator::translateLandingPad(const User &U,
2760 MachineIRBuilder &MIRBuilder) {
2761 const LandingPadInst &LP = cast<LandingPadInst>(U);
2763 MachineBasicBlock &MBB = MIRBuilder.getMBB();
2765 MBB.setIsEHPad();
2767 // If there aren't registers to copy the values into (e.g., during SjLj
2768 // exceptions), then don't bother.
2769 auto &TLI = *MF->getSubtarget().getTargetLowering();
2770 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2771 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2772 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2773 return true;
2775 // If landingpad's return type is token type, we don't create DAG nodes
2776 // for its exception pointer and selector value. The extraction of exception
2777 // pointer or selector value from token type landingpads is not currently
2778 // supported.
2779 if (LP.getType()->isTokenTy())
2780 return true;
2782 // Add a label to mark the beginning of the landing pad. Deletion of the
2783 // landing pad can thus be detected via the MachineModuleInfo.
2784 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2785 .addSym(MF->addLandingPad(&MBB));
2787 // If the unwinder does not preserve all registers, ensure that the
2788 // function marks the clobbered registers as used.
2789 const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2790 if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2791 MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2793 LLT Ty = getLLTForType(*LP.getType(), *DL);
2794 Register Undef = MRI->createGenericVirtualRegister(Ty);
2795 MIRBuilder.buildUndef(Undef);
2797 SmallVector<LLT, 2> Tys;
2798 for (Type *Ty : cast<StructType>(LP.getType())->elements())
2799 Tys.push_back(getLLTForType(*Ty, *DL));
2800 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
2802 // Mark exception register as live in.
2803 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2804 if (!ExceptionReg)
2805 return false;
2807 MBB.addLiveIn(ExceptionReg);
2808 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2809 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2811 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2812 if (!SelectorReg)
2813 return false;
2815 MBB.addLiveIn(SelectorReg);
2816 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2817 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2818 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2820 return true;
2823 bool IRTranslator::translateAlloca(const User &U,
2824 MachineIRBuilder &MIRBuilder) {
2825 auto &AI = cast<AllocaInst>(U);
2827 if (AI.isSwiftError())
2828 return true;
2830 if (AI.isStaticAlloca()) {
2831 Register Res = getOrCreateVReg(AI);
2832 int FI = getOrCreateFrameIndex(AI);
2833 MIRBuilder.buildFrameIndex(Res, FI);
2834 return true;
2837 // FIXME: support stack probing for Windows.
2838 if (MF->getTarget().getTargetTriple().isOSWindows())
2839 return false;
2841 // Now we're in the harder dynamic case.
2842 Register NumElts = getOrCreateVReg(*AI.getArraySize());
2843 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2844 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2845 if (MRI->getType(NumElts) != IntPtrTy) {
2846 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2847 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2848 NumElts = ExtElts;
2851 Type *Ty = AI.getAllocatedType();
2853 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2854 Register TySize =
2855 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2856 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2858 // Round the size of the allocation up to the stack alignment size
2859 // by add SA-1 to the size. This doesn't overflow because we're computing
2860 // an address inside an alloca.
2861 Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2862 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2863 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2864 MachineInstr::NoUWrap);
2865 auto AlignCst =
2866 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2867 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2869 Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2870 if (Alignment <= StackAlign)
2871 Alignment = Align(1);
2872 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2874 MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2875 assert(MF->getFrameInfo().hasVarSizedObjects());
2876 return true;
2879 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2880 // FIXME: We may need more info about the type. Because of how LLT works,
2881 // we're completely discarding the i64/double distinction here (amongst
2882 // others). Fortunately the ABIs I know of where that matters don't use va_arg
2883 // anyway but that's not guaranteed.
2884 MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2885 {getOrCreateVReg(*U.getOperand(0)),
2886 DL->getABITypeAlign(U.getType()).value()});
2887 return true;
2890 bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
2891 if (!MF->getTarget().Options.TrapUnreachable)
2892 return true;
2894 auto &UI = cast<UnreachableInst>(U);
2895 // We may be able to ignore unreachable behind a noreturn call.
2896 if (MF->getTarget().Options.NoTrapAfterNoreturn) {
2897 const BasicBlock &BB = *UI.getParent();
2898 if (&UI != &BB.front()) {
2899 BasicBlock::const_iterator PredI =
2900 std::prev(BasicBlock::const_iterator(UI));
2901 if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2902 if (Call->doesNotReturn())
2903 return true;
2908 MIRBuilder.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>());
2909 return true;
2912 bool IRTranslator::translateInsertElement(const User &U,
2913 MachineIRBuilder &MIRBuilder) {
2914 // If it is a <1 x Ty> vector, use the scalar as it is
2915 // not a legal vector type in LLT.
2916 if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2917 return translateCopy(U, *U.getOperand(1), MIRBuilder);
2919 Register Res = getOrCreateVReg(U);
2920 Register Val = getOrCreateVReg(*U.getOperand(0));
2921 Register Elt = getOrCreateVReg(*U.getOperand(1));
2922 Register Idx = getOrCreateVReg(*U.getOperand(2));
2923 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2924 return true;
2927 bool IRTranslator::translateExtractElement(const User &U,
2928 MachineIRBuilder &MIRBuilder) {
2929 // If it is a <1 x Ty> vector, use the scalar as it is
2930 // not a legal vector type in LLT.
2931 if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2932 return translateCopy(U, *U.getOperand(0), MIRBuilder);
2934 Register Res = getOrCreateVReg(U);
2935 Register Val = getOrCreateVReg(*U.getOperand(0));
2936 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2937 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2938 Register Idx;
2939 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2940 if (CI->getBitWidth() != PreferredVecIdxWidth) {
2941 APInt NewIdx = CI->getValue().zextOrTrunc(PreferredVecIdxWidth);
2942 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2943 Idx = getOrCreateVReg(*NewIdxCI);
2946 if (!Idx)
2947 Idx = getOrCreateVReg(*U.getOperand(1));
2948 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2949 const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2950 Idx = MIRBuilder.buildZExtOrTrunc(VecIdxTy, Idx).getReg(0);
2952 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2953 return true;
2956 bool IRTranslator::translateShuffleVector(const User &U,
2957 MachineIRBuilder &MIRBuilder) {
2958 ArrayRef<int> Mask;
2959 if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2960 Mask = SVI->getShuffleMask();
2961 else
2962 Mask = cast<ConstantExpr>(U).getShuffleMask();
2963 ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2964 MIRBuilder
2965 .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2966 {getOrCreateVReg(*U.getOperand(0)),
2967 getOrCreateVReg(*U.getOperand(1))})
2968 .addShuffleMask(MaskAlloc);
2969 return true;
2972 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2973 const PHINode &PI = cast<PHINode>(U);
2975 SmallVector<MachineInstr *, 4> Insts;
2976 for (auto Reg : getOrCreateVRegs(PI)) {
2977 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2978 Insts.push_back(MIB.getInstr());
2981 PendingPHIs.emplace_back(&PI, std::move(Insts));
2982 return true;
2985 bool IRTranslator::translateAtomicCmpXchg(const User &U,
2986 MachineIRBuilder &MIRBuilder) {
2987 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2989 auto &TLI = *MF->getSubtarget().getTargetLowering();
2990 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2992 auto Res = getOrCreateVRegs(I);
2993 Register OldValRes = Res[0];
2994 Register SuccessRes = Res[1];
2995 Register Addr = getOrCreateVReg(*I.getPointerOperand());
2996 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2997 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2999 MIRBuilder.buildAtomicCmpXchgWithSuccess(
3000 OldValRes, SuccessRes, Addr, Cmp, NewVal,
3001 *MF->getMachineMemOperand(
3002 MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
3003 getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3004 I.getSuccessOrdering(), I.getFailureOrdering()));
3005 return true;
3008 bool IRTranslator::translateAtomicRMW(const User &U,
3009 MachineIRBuilder &MIRBuilder) {
3010 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
3011 auto &TLI = *MF->getSubtarget().getTargetLowering();
3012 auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
3014 Register Res = getOrCreateVReg(I);
3015 Register Addr = getOrCreateVReg(*I.getPointerOperand());
3016 Register Val = getOrCreateVReg(*I.getValOperand());
3018 unsigned Opcode = 0;
3019 switch (I.getOperation()) {
3020 default:
3021 return false;
3022 case AtomicRMWInst::Xchg:
3023 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
3024 break;
3025 case AtomicRMWInst::Add:
3026 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
3027 break;
3028 case AtomicRMWInst::Sub:
3029 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
3030 break;
3031 case AtomicRMWInst::And:
3032 Opcode = TargetOpcode::G_ATOMICRMW_AND;
3033 break;
3034 case AtomicRMWInst::Nand:
3035 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
3036 break;
3037 case AtomicRMWInst::Or:
3038 Opcode = TargetOpcode::G_ATOMICRMW_OR;
3039 break;
3040 case AtomicRMWInst::Xor:
3041 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
3042 break;
3043 case AtomicRMWInst::Max:
3044 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
3045 break;
3046 case AtomicRMWInst::Min:
3047 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
3048 break;
3049 case AtomicRMWInst::UMax:
3050 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
3051 break;
3052 case AtomicRMWInst::UMin:
3053 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
3054 break;
3055 case AtomicRMWInst::FAdd:
3056 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
3057 break;
3058 case AtomicRMWInst::FSub:
3059 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
3060 break;
3061 case AtomicRMWInst::FMax:
3062 Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
3063 break;
3064 case AtomicRMWInst::FMin:
3065 Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
3066 break;
3067 case AtomicRMWInst::UIncWrap:
3068 Opcode = TargetOpcode::G_ATOMICRMW_UINC_WRAP;
3069 break;
3070 case AtomicRMWInst::UDecWrap:
3071 Opcode = TargetOpcode::G_ATOMICRMW_UDEC_WRAP;
3072 break;
3075 MIRBuilder.buildAtomicRMW(
3076 Opcode, Res, Addr, Val,
3077 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
3078 Flags, MRI->getType(Val), getMemOpAlign(I),
3079 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
3080 I.getOrdering()));
3081 return true;
3084 bool IRTranslator::translateFence(const User &U,
3085 MachineIRBuilder &MIRBuilder) {
3086 const FenceInst &Fence = cast<FenceInst>(U);
3087 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
3088 Fence.getSyncScopeID());
3089 return true;
3092 bool IRTranslator::translateFreeze(const User &U,
3093 MachineIRBuilder &MIRBuilder) {
3094 const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
3095 const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
3097 assert(DstRegs.size() == SrcRegs.size() &&
3098 "Freeze with different source and destination type?");
3100 for (unsigned I = 0; I < DstRegs.size(); ++I) {
3101 MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
3104 return true;
3107 void IRTranslator::finishPendingPhis() {
3108 #ifndef NDEBUG
3109 DILocationVerifier Verifier;
3110 GISelObserverWrapper WrapperObserver(&Verifier);
3111 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3112 #endif // ifndef NDEBUG
3113 for (auto &Phi : PendingPHIs) {
3114 const PHINode *PI = Phi.first;
3115 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
3116 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
3117 EntryBuilder->setDebugLoc(PI->getDebugLoc());
3118 #ifndef NDEBUG
3119 Verifier.setCurrentInst(PI);
3120 #endif // ifndef NDEBUG
3122 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
3123 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
3124 auto IRPred = PI->getIncomingBlock(i);
3125 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
3126 for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
3127 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
3128 continue;
3129 SeenPreds.insert(Pred);
3130 for (unsigned j = 0; j < ValRegs.size(); ++j) {
3131 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
3132 MIB.addUse(ValRegs[j]);
3133 MIB.addMBB(Pred);
3140 bool IRTranslator::translate(const Instruction &Inst) {
3141 CurBuilder->setDebugLoc(Inst.getDebugLoc());
3142 CurBuilder->setPCSections(Inst.getMetadata(LLVMContext::MD_pcsections));
3144 auto &TLI = *MF->getSubtarget().getTargetLowering();
3145 if (TLI.fallBackToDAGISel(Inst))
3146 return false;
3148 switch (Inst.getOpcode()) {
3149 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3150 case Instruction::OPCODE: \
3151 return translate##OPCODE(Inst, *CurBuilder.get());
3152 #include "llvm/IR/Instruction.def"
3153 default:
3154 return false;
3158 bool IRTranslator::translate(const Constant &C, Register Reg) {
3159 // We only emit constants into the entry block from here. To prevent jumpy
3160 // debug behaviour remove debug line.
3161 if (auto CurrInstDL = CurBuilder->getDL())
3162 EntryBuilder->setDebugLoc(DebugLoc());
3164 if (auto CI = dyn_cast<ConstantInt>(&C))
3165 EntryBuilder->buildConstant(Reg, *CI);
3166 else if (auto CF = dyn_cast<ConstantFP>(&C))
3167 EntryBuilder->buildFConstant(Reg, *CF);
3168 else if (isa<UndefValue>(C))
3169 EntryBuilder->buildUndef(Reg);
3170 else if (isa<ConstantPointerNull>(C))
3171 EntryBuilder->buildConstant(Reg, 0);
3172 else if (auto GV = dyn_cast<GlobalValue>(&C))
3173 EntryBuilder->buildGlobalValue(Reg, GV);
3174 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3175 if (!isa<FixedVectorType>(CAZ->getType()))
3176 return false;
3177 // Return the scalar if it is a <1 x Ty> vector.
3178 unsigned NumElts = CAZ->getElementCount().getFixedValue();
3179 if (NumElts == 1)
3180 return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder);
3181 SmallVector<Register, 4> Ops;
3182 for (unsigned I = 0; I < NumElts; ++I) {
3183 Constant &Elt = *CAZ->getElementValue(I);
3184 Ops.push_back(getOrCreateVReg(Elt));
3186 EntryBuilder->buildBuildVector(Reg, Ops);
3187 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3188 // Return the scalar if it is a <1 x Ty> vector.
3189 if (CV->getNumElements() == 1)
3190 return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3191 SmallVector<Register, 4> Ops;
3192 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3193 Constant &Elt = *CV->getElementAsConstant(i);
3194 Ops.push_back(getOrCreateVReg(Elt));
3196 EntryBuilder->buildBuildVector(Reg, Ops);
3197 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3198 switch(CE->getOpcode()) {
3199 #define HANDLE_INST(NUM, OPCODE, CLASS) \
3200 case Instruction::OPCODE: \
3201 return translate##OPCODE(*CE, *EntryBuilder.get());
3202 #include "llvm/IR/Instruction.def"
3203 default:
3204 return false;
3206 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3207 if (CV->getNumOperands() == 1)
3208 return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3209 SmallVector<Register, 4> Ops;
3210 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3211 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3213 EntryBuilder->buildBuildVector(Reg, Ops);
3214 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3215 EntryBuilder->buildBlockAddress(Reg, BA);
3216 } else
3217 return false;
3219 return true;
3222 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3223 MachineBasicBlock &MBB) {
3224 for (auto &BTB : SL->BitTestCases) {
3225 // Emit header first, if it wasn't already emitted.
3226 if (!BTB.Emitted)
3227 emitBitTestHeader(BTB, BTB.Parent);
3229 BranchProbability UnhandledProb = BTB.Prob;
3230 for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3231 UnhandledProb -= BTB.Cases[j].ExtraProb;
3232 // Set the current basic block to the mbb we wish to insert the code into
3233 MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3234 // If all cases cover a contiguous range, it is not necessary to jump to
3235 // the default block after the last bit test fails. This is because the
3236 // range check during bit test header creation has guaranteed that every
3237 // case here doesn't go outside the range. In this case, there is no need
3238 // to perform the last bit test, as it will always be true. Instead, make
3239 // the second-to-last bit-test fall through to the target of the last bit
3240 // test, and delete the last bit test.
3242 MachineBasicBlock *NextMBB;
3243 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3244 // Second-to-last bit-test with contiguous range: fall through to the
3245 // target of the final bit test.
3246 NextMBB = BTB.Cases[j + 1].TargetBB;
3247 } else if (j + 1 == ej) {
3248 // For the last bit test, fall through to Default.
3249 NextMBB = BTB.Default;
3250 } else {
3251 // Otherwise, fall through to the next bit test.
3252 NextMBB = BTB.Cases[j + 1].ThisBB;
3255 emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3257 if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3258 // We need to record the replacement phi edge here that normally
3259 // happens in emitBitTestCase before we delete the case, otherwise the
3260 // phi edge will be lost.
3261 addMachineCFGPred({BTB.Parent->getBasicBlock(),
3262 BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3263 MBB);
3264 // Since we're not going to use the final bit test, remove it.
3265 BTB.Cases.pop_back();
3266 break;
3269 // This is "default" BB. We have two jumps to it. From "header" BB and from
3270 // last "case" BB, unless the latter was skipped.
3271 CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3272 BTB.Default->getBasicBlock()};
3273 addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3274 if (!BTB.ContiguousRange) {
3275 addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3278 SL->BitTestCases.clear();
3280 for (auto &JTCase : SL->JTCases) {
3281 // Emit header first, if it wasn't already emitted.
3282 if (!JTCase.first.Emitted)
3283 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3285 emitJumpTable(JTCase.second, JTCase.second.MBB);
3287 SL->JTCases.clear();
3289 for (auto &SwCase : SL->SwitchCases)
3290 emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3291 SL->SwitchCases.clear();
3293 // Check if we need to generate stack-protector guard checks.
3294 StackProtector &SP = getAnalysis<StackProtector>();
3295 if (SP.shouldEmitSDCheck(BB)) {
3296 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3297 bool FunctionBasedInstrumentation =
3298 TLI.getSSPStackGuardCheck(*MF->getFunction().getParent());
3299 SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3301 // Handle stack protector.
3302 if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3303 LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3304 return false;
3305 } else if (SPDescriptor.shouldEmitStackProtector()) {
3306 MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3307 MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3309 // Find the split point to split the parent mbb. At the same time copy all
3310 // physical registers used in the tail of parent mbb into virtual registers
3311 // before the split point and back into physical registers after the split
3312 // point. This prevents us needing to deal with Live-ins and many other
3313 // register allocation issues caused by us splitting the parent mbb. The
3314 // register allocator will clean up said virtual copies later on.
3315 MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3316 ParentMBB, *MF->getSubtarget().getInstrInfo());
3318 // Splice the terminator of ParentMBB into SuccessMBB.
3319 SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3320 ParentMBB->end());
3322 // Add compare/jump on neq/jump to the parent BB.
3323 if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3324 return false;
3326 // CodeGen Failure MBB if we have not codegened it yet.
3327 MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3328 if (FailureMBB->empty()) {
3329 if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3330 return false;
3333 // Clear the Per-BB State.
3334 SPDescriptor.resetPerBBState();
3336 return true;
3339 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3340 MachineBasicBlock *ParentBB) {
3341 CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3342 // First create the loads to the guard/stack slot for the comparison.
3343 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3344 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
3345 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3346 LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL));
3348 MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3349 int FI = MFI.getStackProtectorIndex();
3351 Register Guard;
3352 Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3353 const Module &M = *ParentBB->getParent()->getFunction().getParent();
3354 Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
3356 // Generate code to load the content of the guard slot.
3357 Register GuardVal =
3358 CurBuilder
3359 ->buildLoad(PtrMemTy, StackSlotPtr,
3360 MachinePointerInfo::getFixedStack(*MF, FI), Align,
3361 MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3362 .getReg(0);
3364 if (TLI.useStackGuardXorFP()) {
3365 LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3366 return false;
3369 // Retrieve guard check function, nullptr if instrumentation is inlined.
3370 if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3371 // This path is currently untestable on GlobalISel, since the only platform
3372 // that needs this seems to be Windows, and we fall back on that currently.
3373 // The code still lives here in case that changes.
3374 // Silence warning about unused variable until the code below that uses
3375 // 'GuardCheckFn' is enabled.
3376 (void)GuardCheckFn;
3377 return false;
3378 #if 0
3379 // The target provides a guard check function to validate the guard value.
3380 // Generate a call to that function with the content of the guard slot as
3381 // argument.
3382 FunctionType *FnTy = GuardCheckFn->getFunctionType();
3383 assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3384 ISD::ArgFlagsTy Flags;
3385 if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3386 Flags.setInReg();
3387 CallLowering::ArgInfo GuardArgInfo(
3388 {GuardVal, FnTy->getParamType(0), {Flags}});
3390 CallLowering::CallLoweringInfo Info;
3391 Info.OrigArgs.push_back(GuardArgInfo);
3392 Info.CallConv = GuardCheckFn->getCallingConv();
3393 Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3394 Info.OrigRet = {Register(), FnTy->getReturnType()};
3395 if (!CLI->lowerCall(MIRBuilder, Info)) {
3396 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3397 return false;
3399 return true;
3400 #endif
3403 // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3404 // Otherwise, emit a volatile load to retrieve the stack guard value.
3405 if (TLI.useLoadStackGuardNode()) {
3406 Guard =
3407 MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3408 getStackGuard(Guard, *CurBuilder);
3409 } else {
3410 // TODO: test using android subtarget when we support @llvm.thread.pointer.
3411 const Value *IRGuard = TLI.getSDagStackGuard(M);
3412 Register GuardPtr = getOrCreateVReg(*IRGuard);
3414 Guard = CurBuilder
3415 ->buildLoad(PtrMemTy, GuardPtr,
3416 MachinePointerInfo::getFixedStack(*MF, FI), Align,
3417 MachineMemOperand::MOLoad |
3418 MachineMemOperand::MOVolatile)
3419 .getReg(0);
3422 // Perform the comparison.
3423 auto Cmp =
3424 CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3425 // If the guard/stackslot do not equal, branch to failure MBB.
3426 CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3427 // Otherwise branch to success MBB.
3428 CurBuilder->buildBr(*SPD.getSuccessMBB());
3429 return true;
3432 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3433 MachineBasicBlock *FailureBB) {
3434 CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3435 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3437 const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3438 const char *Name = TLI.getLibcallName(Libcall);
3440 CallLowering::CallLoweringInfo Info;
3441 Info.CallConv = TLI.getLibcallCallingConv(Libcall);
3442 Info.Callee = MachineOperand::CreateES(Name);
3443 Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3445 if (!CLI->lowerCall(*CurBuilder, Info)) {
3446 LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3447 return false;
3450 // On PS4/PS5, the "return address" must still be within the calling
3451 // function, even if it's at the very end, so emit an explicit TRAP here.
3452 // WebAssembly needs an unreachable instruction after a non-returning call,
3453 // because the function return type can be different from __stack_chk_fail's
3454 // return type (void).
3455 const TargetMachine &TM = MF->getTarget();
3456 if (TM.getTargetTriple().isPS() || TM.getTargetTriple().isWasm()) {
3457 LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3458 return false;
3460 return true;
3463 void IRTranslator::finalizeFunction() {
3464 // Release the memory used by the different maps we
3465 // needed during the translation.
3466 PendingPHIs.clear();
3467 VMap.reset();
3468 FrameIndices.clear();
3469 MachinePreds.clear();
3470 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3471 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3472 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3473 EntryBuilder.reset();
3474 CurBuilder.reset();
3475 FuncInfo.clear();
3476 SPDescriptor.resetPerFunctionState();
3479 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3480 /// variadic musttail call.
3481 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3482 if (!IsVarArg)
3483 return false;
3485 // Walk the block backwards, because tail calls usually only appear at the end
3486 // of a block.
3487 return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3488 const auto *CI = dyn_cast<CallInst>(&I);
3489 return CI && CI->isMustTailCall();
3493 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3494 MF = &CurMF;
3495 const Function &F = MF->getFunction();
3496 GISelCSEAnalysisWrapper &Wrapper =
3497 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3498 // Set the CSEConfig and run the analysis.
3499 GISelCSEInfo *CSEInfo = nullptr;
3500 TPC = &getAnalysis<TargetPassConfig>();
3501 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3502 ? EnableCSEInIRTranslator
3503 : TPC->isGISelCSEEnabled();
3505 if (EnableCSE) {
3506 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3507 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3508 EntryBuilder->setCSEInfo(CSEInfo);
3509 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3510 CurBuilder->setCSEInfo(CSEInfo);
3511 } else {
3512 EntryBuilder = std::make_unique<MachineIRBuilder>();
3513 CurBuilder = std::make_unique<MachineIRBuilder>();
3515 CLI = MF->getSubtarget().getCallLowering();
3516 CurBuilder->setMF(*MF);
3517 EntryBuilder->setMF(*MF);
3518 MRI = &MF->getRegInfo();
3519 DL = &F.getParent()->getDataLayout();
3520 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3521 const TargetMachine &TM = MF->getTarget();
3522 TM.resetTargetOptions(F);
3523 EnableOpts = OptLevel != CodeGenOptLevel::None && !skipFunction(F);
3524 FuncInfo.MF = MF;
3525 if (EnableOpts) {
3526 AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3527 FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3528 } else {
3529 AA = nullptr;
3530 FuncInfo.BPI = nullptr;
3533 AC = &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(
3534 MF->getFunction());
3535 LibInfo = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
3536 FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3538 const auto &TLI = *MF->getSubtarget().getTargetLowering();
3540 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3541 SL->init(TLI, TM, *DL);
3545 assert(PendingPHIs.empty() && "stale PHIs");
3547 // Targets which want to use big endian can enable it using
3548 // enableBigEndian()
3549 if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3550 // Currently we don't properly handle big endian code.
3551 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3552 F.getSubprogram(), &F.getEntryBlock());
3553 R << "unable to translate in big endian mode";
3554 reportTranslationError(*MF, *TPC, *ORE, R);
3557 // Release the per-function state when we return, whether we succeeded or not.
3558 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3560 // Setup a separate basic-block for the arguments and constants
3561 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3562 MF->push_back(EntryBB);
3563 EntryBuilder->setMBB(*EntryBB);
3565 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3566 SwiftError.setFunction(CurMF);
3567 SwiftError.createEntriesInEntryBlock(DbgLoc);
3569 bool IsVarArg = F.isVarArg();
3570 bool HasMustTailInVarArgFn = false;
3572 // Create all blocks, in IR order, to preserve the layout.
3573 for (const BasicBlock &BB: F) {
3574 auto *&MBB = BBToMBB[&BB];
3576 MBB = MF->CreateMachineBasicBlock(&BB);
3577 MF->push_back(MBB);
3579 if (BB.hasAddressTaken())
3580 MBB->setAddressTakenIRBlock(const_cast<BasicBlock *>(&BB));
3582 if (!HasMustTailInVarArgFn)
3583 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3586 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3588 // Make our arguments/constants entry block fallthrough to the IR entry block.
3589 EntryBB->addSuccessor(&getMBB(F.front()));
3591 if (CLI->fallBackToDAGISel(*MF)) {
3592 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3593 F.getSubprogram(), &F.getEntryBlock());
3594 R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3595 reportTranslationError(*MF, *TPC, *ORE, R);
3596 return false;
3599 // Lower the actual args into this basic block.
3600 SmallVector<ArrayRef<Register>, 8> VRegArgs;
3601 for (const Argument &Arg: F.args()) {
3602 if (DL->getTypeStoreSize(Arg.getType()).isZero())
3603 continue; // Don't handle zero sized types.
3604 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3605 VRegArgs.push_back(VRegs);
3607 if (Arg.hasSwiftErrorAttr()) {
3608 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3609 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3613 if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
3614 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3615 F.getSubprogram(), &F.getEntryBlock());
3616 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3617 reportTranslationError(*MF, *TPC, *ORE, R);
3618 return false;
3621 // Need to visit defs before uses when translating instructions.
3622 GISelObserverWrapper WrapperObserver;
3623 if (EnableCSE && CSEInfo)
3624 WrapperObserver.addObserver(CSEInfo);
3626 ReversePostOrderTraversal<const Function *> RPOT(&F);
3627 #ifndef NDEBUG
3628 DILocationVerifier Verifier;
3629 WrapperObserver.addObserver(&Verifier);
3630 #endif // ifndef NDEBUG
3631 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3632 RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3633 for (const BasicBlock *BB : RPOT) {
3634 MachineBasicBlock &MBB = getMBB(*BB);
3635 // Set the insertion point of all the following translations to
3636 // the end of this basic block.
3637 CurBuilder->setMBB(MBB);
3638 HasTailCall = false;
3639 for (const Instruction &Inst : *BB) {
3640 // If we translated a tail call in the last step, then we know
3641 // everything after the call is either a return, or something that is
3642 // handled by the call itself. (E.g. a lifetime marker or assume
3643 // intrinsic.) In this case, we should stop translating the block and
3644 // move on.
3645 if (HasTailCall)
3646 break;
3647 #ifndef NDEBUG
3648 Verifier.setCurrentInst(&Inst);
3649 #endif // ifndef NDEBUG
3650 if (translate(Inst))
3651 continue;
3653 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3654 Inst.getDebugLoc(), BB);
3655 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3657 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3658 std::string InstStrStorage;
3659 raw_string_ostream InstStr(InstStrStorage);
3660 InstStr << Inst;
3662 R << ": '" << InstStr.str() << "'";
3665 reportTranslationError(*MF, *TPC, *ORE, R);
3666 return false;
3669 if (!finalizeBasicBlock(*BB, MBB)) {
3670 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3671 BB->getTerminator()->getDebugLoc(), BB);
3672 R << "unable to translate basic block";
3673 reportTranslationError(*MF, *TPC, *ORE, R);
3674 return false;
3677 #ifndef NDEBUG
3678 WrapperObserver.removeObserver(&Verifier);
3679 #endif
3682 finishPendingPhis();
3684 SwiftError.propagateVRegs();
3686 // Merge the argument lowering and constants block with its single
3687 // successor, the LLVM-IR entry block. We want the basic block to
3688 // be maximal.
3689 assert(EntryBB->succ_size() == 1 &&
3690 "Custom BB used for lowering should have only one successor");
3691 // Get the successor of the current entry block.
3692 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3693 assert(NewEntryBB.pred_size() == 1 &&
3694 "LLVM-IR entry block has a predecessor!?");
3695 // Move all the instruction from the current entry block to the
3696 // new entry block.
3697 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3698 EntryBB->end());
3700 // Update the live-in information for the new entry block.
3701 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3702 NewEntryBB.addLiveIn(LiveIn);
3703 NewEntryBB.sortUniqueLiveIns();
3705 // Get rid of the now empty basic block.
3706 EntryBB->removeSuccessor(&NewEntryBB);
3707 MF->remove(EntryBB);
3708 MF->deleteMachineBasicBlock(EntryBB);
3710 assert(&MF->front() == &NewEntryBB &&
3711 "New entry wasn't next in the list of basic block!");
3713 // Initialize stack protector information.
3714 StackProtector &SP = getAnalysis<StackProtector>();
3715 SP.copyToMachineFrameInfo(MF->getFrameInfo());
3717 return false;