[Alignment][NFC] Convert StoreInst to MaybeAlign
[llvm-complete.git] / lib / CodeGen / GlobalISel / IRTranslator.cpp
blob45cef4aca88843357728a5c47895f0646a9b3995
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/StackProtector.h"
34 #include "llvm/CodeGen/TargetFrameLowering.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/TargetPassConfig.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/CFG.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfo.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GetElementPtrTypeIterator.h"
49 #include "llvm/IR/InlineAsm.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/MC/MCContext.h"
60 #include "llvm/Pass.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CodeGen.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/LowLevelTypeImpl.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Target/TargetIntrinsicInfo.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <string>
75 #include <utility>
76 #include <vector>
78 #define DEBUG_TYPE "irtranslator"
80 using namespace llvm;
82 static cl::opt<bool>
83 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
84 cl::desc("Should enable CSE in irtranslator"),
85 cl::Optional, cl::init(false));
86 char IRTranslator::ID = 0;
88 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
89 false, false)
90 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
91 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
92 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
93 false, false)
95 static void reportTranslationError(MachineFunction &MF,
96 const TargetPassConfig &TPC,
97 OptimizationRemarkEmitter &ORE,
98 OptimizationRemarkMissed &R) {
99 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
101 // Print the function name explicitly if we don't have a debug location (which
102 // makes the diagnostic less useful) or if we're going to emit a raw error.
103 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
104 R << (" (in function: " + MF.getName() + ")").str();
106 if (TPC.isGlobalISelAbortEnabled())
107 report_fatal_error(R.getMsg());
108 else
109 ORE.emit(R);
112 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
114 #ifndef NDEBUG
115 namespace {
116 /// Verify that every instruction created has the same DILocation as the
117 /// instruction being translated.
118 class DILocationVerifier : public GISelChangeObserver {
119 const Instruction *CurrInst = nullptr;
121 public:
122 DILocationVerifier() = default;
123 ~DILocationVerifier() = default;
125 const Instruction *getCurrentInst() const { return CurrInst; }
126 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
128 void erasingInstr(MachineInstr &MI) override {}
129 void changingInstr(MachineInstr &MI) override {}
130 void changedInstr(MachineInstr &MI) override {}
132 void createdInstr(MachineInstr &MI) override {
133 assert(getCurrentInst() && "Inserted instruction without a current MI");
135 // Only print the check message if we're actually checking it.
136 #ifndef NDEBUG
137 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
138 << " was copied to " << MI);
139 #endif
140 // We allow insts in the entry block to have a debug loc line of 0 because
141 // they could have originated from constants, and we don't want a jumpy
142 // debug experience.
143 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
144 MI.getDebugLoc().getLine() == 0) &&
145 "Line info was not transferred to all instructions");
148 } // namespace
149 #endif // ifndef NDEBUG
152 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
153 AU.addRequired<StackProtector>();
154 AU.addRequired<TargetPassConfig>();
155 AU.addRequired<GISelCSEAnalysisWrapperPass>();
156 getSelectionDAGFallbackAnalysisUsage(AU);
157 MachineFunctionPass::getAnalysisUsage(AU);
160 IRTranslator::ValueToVRegInfo::VRegListT &
161 IRTranslator::allocateVRegs(const Value &Val) {
162 assert(!VMap.contains(Val) && "Value already allocated in VMap");
163 auto *Regs = VMap.getVRegs(Val);
164 auto *Offsets = VMap.getOffsets(Val);
165 SmallVector<LLT, 4> SplitTys;
166 computeValueLLTs(*DL, *Val.getType(), SplitTys,
167 Offsets->empty() ? Offsets : nullptr);
168 for (unsigned i = 0; i < SplitTys.size(); ++i)
169 Regs->push_back(0);
170 return *Regs;
173 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
174 auto VRegsIt = VMap.findVRegs(Val);
175 if (VRegsIt != VMap.vregs_end())
176 return *VRegsIt->second;
178 if (Val.getType()->isVoidTy())
179 return *VMap.getVRegs(Val);
181 // Create entry for this type.
182 auto *VRegs = VMap.getVRegs(Val);
183 auto *Offsets = VMap.getOffsets(Val);
185 assert(Val.getType()->isSized() &&
186 "Don't know how to create an empty vreg");
188 SmallVector<LLT, 4> SplitTys;
189 computeValueLLTs(*DL, *Val.getType(), SplitTys,
190 Offsets->empty() ? Offsets : nullptr);
192 if (!isa<Constant>(Val)) {
193 for (auto Ty : SplitTys)
194 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
195 return *VRegs;
198 if (Val.getType()->isAggregateType()) {
199 // UndefValue, ConstantAggregateZero
200 auto &C = cast<Constant>(Val);
201 unsigned Idx = 0;
202 while (auto Elt = C.getAggregateElement(Idx++)) {
203 auto EltRegs = getOrCreateVRegs(*Elt);
204 llvm::copy(EltRegs, std::back_inserter(*VRegs));
206 } else {
207 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
208 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
209 bool Success = translate(cast<Constant>(Val), VRegs->front());
210 if (!Success) {
211 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
212 MF->getFunction().getSubprogram(),
213 &MF->getFunction().getEntryBlock());
214 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
215 reportTranslationError(*MF, *TPC, *ORE, R);
216 return *VRegs;
220 return *VRegs;
223 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
224 if (FrameIndices.find(&AI) != FrameIndices.end())
225 return FrameIndices[&AI];
227 unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
228 unsigned Size =
229 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
231 // Always allocate at least one byte.
232 Size = std::max(Size, 1u);
234 unsigned Alignment = AI.getAlignment();
235 if (!Alignment)
236 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
238 int &FI = FrameIndices[&AI];
239 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
240 return FI;
243 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
244 unsigned Alignment = 0;
245 Type *ValTy = nullptr;
246 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
247 Alignment = SI->getAlignment();
248 ValTy = SI->getValueOperand()->getType();
249 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
250 Alignment = LI->getAlignment();
251 ValTy = LI->getType();
252 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
253 // TODO(PR27168): This instruction has no alignment attribute, but unlike
254 // the default alignment for load/store, the default here is to assume
255 // it has NATURAL alignment, not DataLayout-specified alignment.
256 const DataLayout &DL = AI->getModule()->getDataLayout();
257 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
258 ValTy = AI->getCompareOperand()->getType();
259 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
260 // TODO(PR27168): This instruction has no alignment attribute, but unlike
261 // the default alignment for load/store, the default here is to assume
262 // it has NATURAL alignment, not DataLayout-specified alignment.
263 const DataLayout &DL = AI->getModule()->getDataLayout();
264 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
265 ValTy = AI->getType();
266 } else {
267 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
268 R << "unable to translate memop: " << ore::NV("Opcode", &I);
269 reportTranslationError(*MF, *TPC, *ORE, R);
270 return 1;
273 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
276 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
277 MachineBasicBlock *&MBB = BBToMBB[&BB];
278 assert(MBB && "BasicBlock was not encountered before");
279 return *MBB;
282 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
283 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
284 MachinePreds[Edge].push_back(NewPred);
287 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
288 MachineIRBuilder &MIRBuilder) {
289 // Get or create a virtual register for each value.
290 // Unless the value is a Constant => loadimm cst?
291 // or inline constant each time?
292 // Creation of a virtual register needs to have a size.
293 Register Op0 = getOrCreateVReg(*U.getOperand(0));
294 Register Op1 = getOrCreateVReg(*U.getOperand(1));
295 Register Res = getOrCreateVReg(U);
296 uint16_t Flags = 0;
297 if (isa<Instruction>(U)) {
298 const Instruction &I = cast<Instruction>(U);
299 Flags = MachineInstr::copyFlagsFromInstruction(I);
302 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
303 return true;
306 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
307 // -0.0 - X --> G_FNEG
308 if (isa<Constant>(U.getOperand(0)) &&
309 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
310 Register Op1 = getOrCreateVReg(*U.getOperand(1));
311 Register Res = getOrCreateVReg(U);
312 uint16_t Flags = 0;
313 if (isa<Instruction>(U)) {
314 const Instruction &I = cast<Instruction>(U);
315 Flags = MachineInstr::copyFlagsFromInstruction(I);
317 // Negate the last operand of the FSUB
318 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
319 return true;
321 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
324 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
325 Register Op0 = getOrCreateVReg(*U.getOperand(0));
326 Register Res = getOrCreateVReg(U);
327 uint16_t Flags = 0;
328 if (isa<Instruction>(U)) {
329 const Instruction &I = cast<Instruction>(U);
330 Flags = MachineInstr::copyFlagsFromInstruction(I);
332 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
333 return true;
336 bool IRTranslator::translateCompare(const User &U,
337 MachineIRBuilder &MIRBuilder) {
338 auto *CI = dyn_cast<CmpInst>(&U);
339 Register Op0 = getOrCreateVReg(*U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*U.getOperand(1));
341 Register Res = getOrCreateVReg(U);
342 CmpInst::Predicate Pred =
343 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
344 cast<ConstantExpr>(U).getPredicate());
345 if (CmpInst::isIntPredicate(Pred))
346 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
347 else if (Pred == CmpInst::FCMP_FALSE)
348 MIRBuilder.buildCopy(
349 Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
350 else if (Pred == CmpInst::FCMP_TRUE)
351 MIRBuilder.buildCopy(
352 Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
353 else {
354 assert(CI && "Instruction should be CmpInst");
355 MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
356 MachineInstr::copyFlagsFromInstruction(*CI));
359 return true;
362 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
363 const ReturnInst &RI = cast<ReturnInst>(U);
364 const Value *Ret = RI.getReturnValue();
365 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
366 Ret = nullptr;
368 ArrayRef<Register> VRegs;
369 if (Ret)
370 VRegs = getOrCreateVRegs(*Ret);
372 Register SwiftErrorVReg = 0;
373 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
374 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
375 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
378 // The target may mess up with the insertion point, but
379 // this is not important as a return is the last instruction
380 // of the block anyway.
381 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
384 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
385 const BranchInst &BrInst = cast<BranchInst>(U);
386 unsigned Succ = 0;
387 if (!BrInst.isUnconditional()) {
388 // We want a G_BRCOND to the true BB followed by an unconditional branch.
389 Register Tst = getOrCreateVReg(*BrInst.getCondition());
390 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
391 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
392 MIRBuilder.buildBrCond(Tst, TrueBB);
395 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
396 MachineBasicBlock &TgtBB = getMBB(BrTgt);
397 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
399 // If the unconditional target is the layout successor, fallthrough.
400 if (!CurBB.isLayoutSuccessor(&TgtBB))
401 MIRBuilder.buildBr(TgtBB);
403 // Link successors.
404 for (const BasicBlock *Succ : successors(&BrInst))
405 CurBB.addSuccessor(&getMBB(*Succ));
406 return true;
409 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
410 MachineBasicBlock *Dst,
411 BranchProbability Prob) {
412 if (!FuncInfo.BPI) {
413 Src->addSuccessorWithoutProb(Dst);
414 return;
416 if (Prob.isUnknown())
417 Prob = getEdgeProbability(Src, Dst);
418 Src->addSuccessor(Dst, Prob);
421 BranchProbability
422 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
423 const MachineBasicBlock *Dst) const {
424 const BasicBlock *SrcBB = Src->getBasicBlock();
425 const BasicBlock *DstBB = Dst->getBasicBlock();
426 if (!FuncInfo.BPI) {
427 // If BPI is not available, set the default probability as 1 / N, where N is
428 // the number of successors.
429 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
430 return BranchProbability(1, SuccSize);
432 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
435 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
436 using namespace SwitchCG;
437 // Extract cases from the switch.
438 const SwitchInst &SI = cast<SwitchInst>(U);
439 BranchProbabilityInfo *BPI = FuncInfo.BPI;
440 CaseClusterVector Clusters;
441 Clusters.reserve(SI.getNumCases());
442 for (auto &I : SI.cases()) {
443 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
444 assert(Succ && "Could not find successor mbb in mapping");
445 const ConstantInt *CaseVal = I.getCaseValue();
446 BranchProbability Prob =
447 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
448 : BranchProbability(1, SI.getNumCases() + 1);
449 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
452 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
454 // Cluster adjacent cases with the same destination. We do this at all
455 // optimization levels because it's cheap to do and will make codegen faster
456 // if there are many clusters.
457 sortAndRangeify(Clusters);
459 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
461 // If there is only the default destination, jump there directly.
462 if (Clusters.empty()) {
463 SwitchMBB->addSuccessor(DefaultMBB);
464 if (DefaultMBB != SwitchMBB->getNextNode())
465 MIB.buildBr(*DefaultMBB);
466 return true;
469 SL->findJumpTables(Clusters, &SI, DefaultMBB);
471 LLVM_DEBUG({
472 dbgs() << "Case clusters: ";
473 for (const CaseCluster &C : Clusters) {
474 if (C.Kind == CC_JumpTable)
475 dbgs() << "JT:";
476 if (C.Kind == CC_BitTests)
477 dbgs() << "BT:";
479 C.Low->getValue().print(dbgs(), true);
480 if (C.Low != C.High) {
481 dbgs() << '-';
482 C.High->getValue().print(dbgs(), true);
484 dbgs() << ' ';
486 dbgs() << '\n';
489 assert(!Clusters.empty());
490 SwitchWorkList WorkList;
491 CaseClusterIt First = Clusters.begin();
492 CaseClusterIt Last = Clusters.end() - 1;
493 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
494 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
496 // FIXME: At the moment we don't do any splitting optimizations here like
497 // SelectionDAG does, so this worklist only has one entry.
498 while (!WorkList.empty()) {
499 SwitchWorkListItem W = WorkList.back();
500 WorkList.pop_back();
501 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
502 return false;
504 return true;
507 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
508 MachineBasicBlock *MBB) {
509 // Emit the code for the jump table
510 assert(JT.Reg != -1U && "Should lower JT Header first!");
511 MachineIRBuilder MIB(*MBB->getParent());
512 MIB.setMBB(*MBB);
513 MIB.setDebugLoc(CurBuilder->getDebugLoc());
515 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
516 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
518 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
519 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
522 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
523 SwitchCG::JumpTableHeader &JTH,
524 MachineBasicBlock *HeaderBB) {
525 MachineIRBuilder MIB(*HeaderBB->getParent());
526 MIB.setMBB(*HeaderBB);
527 MIB.setDebugLoc(CurBuilder->getDebugLoc());
529 const Value &SValue = *JTH.SValue;
530 // Subtract the lowest switch case value from the value being switched on.
531 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
532 Register SwitchOpReg = getOrCreateVReg(SValue);
533 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
534 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
536 // This value may be smaller or larger than the target's pointer type, and
537 // therefore require extension or truncating.
538 Type *PtrIRTy = SValue.getType()->getPointerTo();
539 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
540 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
542 JT.Reg = Sub.getReg(0);
544 if (JTH.OmitRangeCheck) {
545 if (JT.MBB != HeaderBB->getNextNode())
546 MIB.buildBr(*JT.MBB);
547 return true;
550 // Emit the range check for the jump table, and branch to the default block
551 // for the switch statement if the value being switched on exceeds the
552 // largest case in the switch.
553 auto Cst = getOrCreateVReg(
554 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
555 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
556 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
558 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
560 // Avoid emitting unnecessary branches to the next block.
561 if (JT.MBB != HeaderBB->getNextNode())
562 BrCond = MIB.buildBr(*JT.MBB);
563 return true;
566 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
567 MachineBasicBlock *SwitchBB,
568 MachineIRBuilder &MIB) {
569 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
570 Register Cond;
571 DebugLoc OldDbgLoc = MIB.getDebugLoc();
572 MIB.setDebugLoc(CB.DbgLoc);
573 MIB.setMBB(*CB.ThisBB);
575 if (CB.PredInfo.NoCmp) {
576 // Branch or fall through to TrueBB.
577 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
578 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
579 CB.ThisBB);
580 CB.ThisBB->normalizeSuccProbs();
581 if (CB.TrueBB != CB.ThisBB->getNextNode())
582 MIB.buildBr(*CB.TrueBB);
583 MIB.setDebugLoc(OldDbgLoc);
584 return;
587 const LLT i1Ty = LLT::scalar(1);
588 // Build the compare.
589 if (!CB.CmpMHS) {
590 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
591 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
592 } else {
593 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
594 "Can only handle SLE ranges");
596 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
597 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
599 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
600 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
601 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
602 Cond =
603 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
604 } else {
605 const LLT &CmpTy = MRI->getType(CmpOpReg);
606 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
607 auto Diff = MIB.buildConstant(CmpTy, High - Low);
608 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
612 // Update successor info
613 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
615 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
616 CB.ThisBB);
618 // TrueBB and FalseBB are always different unless the incoming IR is
619 // degenerate. This only happens when running llc on weird IR.
620 if (CB.TrueBB != CB.FalseBB)
621 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
622 CB.ThisBB->normalizeSuccProbs();
624 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
625 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
626 CB.ThisBB);
628 // If the lhs block is the next block, invert the condition so that we can
629 // fall through to the lhs instead of the rhs block.
630 if (CB.TrueBB == CB.ThisBB->getNextNode()) {
631 std::swap(CB.TrueBB, CB.FalseBB);
632 auto True = MIB.buildConstant(i1Ty, 1);
633 Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
634 .getReg(0);
637 MIB.buildBrCond(Cond, *CB.TrueBB);
638 MIB.buildBr(*CB.FalseBB);
639 MIB.setDebugLoc(OldDbgLoc);
642 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
643 MachineBasicBlock *SwitchMBB,
644 MachineBasicBlock *CurMBB,
645 MachineBasicBlock *DefaultMBB,
646 MachineIRBuilder &MIB,
647 MachineFunction::iterator BBI,
648 BranchProbability UnhandledProbs,
649 SwitchCG::CaseClusterIt I,
650 MachineBasicBlock *Fallthrough,
651 bool FallthroughUnreachable) {
652 using namespace SwitchCG;
653 MachineFunction *CurMF = SwitchMBB->getParent();
654 // FIXME: Optimize away range check based on pivot comparisons.
655 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
656 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
657 BranchProbability DefaultProb = W.DefaultProb;
659 // The jump block hasn't been inserted yet; insert it here.
660 MachineBasicBlock *JumpMBB = JT->MBB;
661 CurMF->insert(BBI, JumpMBB);
663 // Since the jump table block is separate from the switch block, we need
664 // to keep track of it as a machine predecessor to the default block,
665 // otherwise we lose the phi edges.
666 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
667 CurMBB);
668 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
669 JumpMBB);
671 auto JumpProb = I->Prob;
672 auto FallthroughProb = UnhandledProbs;
674 // If the default statement is a target of the jump table, we evenly
675 // distribute the default probability to successors of CurMBB. Also
676 // update the probability on the edge from JumpMBB to Fallthrough.
677 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
678 SE = JumpMBB->succ_end();
679 SI != SE; ++SI) {
680 if (*SI == DefaultMBB) {
681 JumpProb += DefaultProb / 2;
682 FallthroughProb -= DefaultProb / 2;
683 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
684 JumpMBB->normalizeSuccProbs();
685 } else {
686 // Also record edges from the jump table block to it's successors.
687 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
688 JumpMBB);
692 // Skip the range check if the fallthrough block is unreachable.
693 if (FallthroughUnreachable)
694 JTH->OmitRangeCheck = true;
696 if (!JTH->OmitRangeCheck)
697 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
698 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
699 CurMBB->normalizeSuccProbs();
701 // The jump table header will be inserted in our current block, do the
702 // range check, and fall through to our fallthrough block.
703 JTH->HeaderBB = CurMBB;
704 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
706 // If we're in the right place, emit the jump table header right now.
707 if (CurMBB == SwitchMBB) {
708 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
709 return false;
710 JTH->Emitted = true;
712 return true;
714 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
715 Value *Cond,
716 MachineBasicBlock *Fallthrough,
717 bool FallthroughUnreachable,
718 BranchProbability UnhandledProbs,
719 MachineBasicBlock *CurMBB,
720 MachineIRBuilder &MIB,
721 MachineBasicBlock *SwitchMBB) {
722 using namespace SwitchCG;
723 const Value *RHS, *LHS, *MHS;
724 CmpInst::Predicate Pred;
725 if (I->Low == I->High) {
726 // Check Cond == I->Low.
727 Pred = CmpInst::ICMP_EQ;
728 LHS = Cond;
729 RHS = I->Low;
730 MHS = nullptr;
731 } else {
732 // Check I->Low <= Cond <= I->High.
733 Pred = CmpInst::ICMP_SLE;
734 LHS = I->Low;
735 MHS = Cond;
736 RHS = I->High;
739 // If Fallthrough is unreachable, fold away the comparison.
740 // The false probability is the sum of all unhandled cases.
741 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
742 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
744 emitSwitchCase(CB, SwitchMBB, MIB);
745 return true;
748 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
749 Value *Cond,
750 MachineBasicBlock *SwitchMBB,
751 MachineBasicBlock *DefaultMBB,
752 MachineIRBuilder &MIB) {
753 using namespace SwitchCG;
754 MachineFunction *CurMF = FuncInfo.MF;
755 MachineBasicBlock *NextMBB = nullptr;
756 MachineFunction::iterator BBI(W.MBB);
757 if (++BBI != FuncInfo.MF->end())
758 NextMBB = &*BBI;
760 if (EnableOpts) {
761 // Here, we order cases by probability so the most likely case will be
762 // checked first. However, two clusters can have the same probability in
763 // which case their relative ordering is non-deterministic. So we use Low
764 // as a tie-breaker as clusters are guaranteed to never overlap.
765 llvm::sort(W.FirstCluster, W.LastCluster + 1,
766 [](const CaseCluster &a, const CaseCluster &b) {
767 return a.Prob != b.Prob
768 ? a.Prob > b.Prob
769 : a.Low->getValue().slt(b.Low->getValue());
772 // Rearrange the case blocks so that the last one falls through if possible
773 // without changing the order of probabilities.
774 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
775 --I;
776 if (I->Prob > W.LastCluster->Prob)
777 break;
778 if (I->Kind == CC_Range && I->MBB == NextMBB) {
779 std::swap(*I, *W.LastCluster);
780 break;
785 // Compute total probability.
786 BranchProbability DefaultProb = W.DefaultProb;
787 BranchProbability UnhandledProbs = DefaultProb;
788 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
789 UnhandledProbs += I->Prob;
791 MachineBasicBlock *CurMBB = W.MBB;
792 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
793 bool FallthroughUnreachable = false;
794 MachineBasicBlock *Fallthrough;
795 if (I == W.LastCluster) {
796 // For the last cluster, fall through to the default destination.
797 Fallthrough = DefaultMBB;
798 FallthroughUnreachable = isa<UnreachableInst>(
799 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
800 } else {
801 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
802 CurMF->insert(BBI, Fallthrough);
804 UnhandledProbs -= I->Prob;
806 switch (I->Kind) {
807 case CC_BitTests: {
808 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
809 return false; // Bit tests currently unimplemented.
811 case CC_JumpTable: {
812 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
813 UnhandledProbs, I, Fallthrough,
814 FallthroughUnreachable)) {
815 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
816 return false;
818 break;
820 case CC_Range: {
821 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
822 FallthroughUnreachable, UnhandledProbs,
823 CurMBB, MIB, SwitchMBB)) {
824 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
825 return false;
827 break;
830 CurMBB = Fallthrough;
833 return true;
836 bool IRTranslator::translateIndirectBr(const User &U,
837 MachineIRBuilder &MIRBuilder) {
838 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
840 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
841 MIRBuilder.buildBrIndirect(Tgt);
843 // Link successors.
844 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
845 for (const BasicBlock *Succ : successors(&BrInst))
846 CurBB.addSuccessor(&getMBB(*Succ));
848 return true;
851 static bool isSwiftError(const Value *V) {
852 if (auto Arg = dyn_cast<Argument>(V))
853 return Arg->hasSwiftErrorAttr();
854 if (auto AI = dyn_cast<AllocaInst>(V))
855 return AI->isSwiftError();
856 return false;
859 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
860 const LoadInst &LI = cast<LoadInst>(U);
862 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
863 : MachineMemOperand::MONone;
864 Flags |= MachineMemOperand::MOLoad;
866 if (DL->getTypeStoreSize(LI.getType()) == 0)
867 return true;
869 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
870 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
871 Register Base = getOrCreateVReg(*LI.getPointerOperand());
873 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
874 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
876 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
877 assert(Regs.size() == 1 && "swifterror should be single pointer");
878 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
879 LI.getPointerOperand());
880 MIRBuilder.buildCopy(Regs[0], VReg);
881 return true;
884 const MDNode *Ranges =
885 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
886 for (unsigned i = 0; i < Regs.size(); ++i) {
887 Register Addr;
888 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
890 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
891 unsigned BaseAlign = getMemOpAlignment(LI);
892 auto MMO = MF->getMachineMemOperand(
893 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
894 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), Ranges,
895 LI.getSyncScopeID(), LI.getOrdering());
896 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
899 return true;
902 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
903 const StoreInst &SI = cast<StoreInst>(U);
904 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
905 : MachineMemOperand::MONone;
906 Flags |= MachineMemOperand::MOStore;
908 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
909 return true;
911 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
912 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
913 Register Base = getOrCreateVReg(*SI.getPointerOperand());
915 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
916 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
918 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
919 assert(Vals.size() == 1 && "swifterror should be single pointer");
921 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
922 SI.getPointerOperand());
923 MIRBuilder.buildCopy(VReg, Vals[0]);
924 return true;
927 for (unsigned i = 0; i < Vals.size(); ++i) {
928 Register Addr;
929 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
931 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
932 unsigned BaseAlign = getMemOpAlignment(SI);
933 auto MMO = MF->getMachineMemOperand(
934 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
935 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
936 SI.getSyncScopeID(), SI.getOrdering());
937 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
939 return true;
942 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
943 const Value *Src = U.getOperand(0);
944 Type *Int32Ty = Type::getInt32Ty(U.getContext());
946 // getIndexedOffsetInType is designed for GEPs, so the first index is the
947 // usual array element rather than looking into the actual aggregate.
948 SmallVector<Value *, 1> Indices;
949 Indices.push_back(ConstantInt::get(Int32Ty, 0));
951 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
952 for (auto Idx : EVI->indices())
953 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
954 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
955 for (auto Idx : IVI->indices())
956 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
957 } else {
958 for (unsigned i = 1; i < U.getNumOperands(); ++i)
959 Indices.push_back(U.getOperand(i));
962 return 8 * static_cast<uint64_t>(
963 DL.getIndexedOffsetInType(Src->getType(), Indices));
966 bool IRTranslator::translateExtractValue(const User &U,
967 MachineIRBuilder &MIRBuilder) {
968 const Value *Src = U.getOperand(0);
969 uint64_t Offset = getOffsetFromIndices(U, *DL);
970 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
971 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
972 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
973 auto &DstRegs = allocateVRegs(U);
975 for (unsigned i = 0; i < DstRegs.size(); ++i)
976 DstRegs[i] = SrcRegs[Idx++];
978 return true;
981 bool IRTranslator::translateInsertValue(const User &U,
982 MachineIRBuilder &MIRBuilder) {
983 const Value *Src = U.getOperand(0);
984 uint64_t Offset = getOffsetFromIndices(U, *DL);
985 auto &DstRegs = allocateVRegs(U);
986 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
987 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
988 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
989 auto InsertedIt = InsertedRegs.begin();
991 for (unsigned i = 0; i < DstRegs.size(); ++i) {
992 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
993 DstRegs[i] = *InsertedIt++;
994 else
995 DstRegs[i] = SrcRegs[i];
998 return true;
1001 bool IRTranslator::translateSelect(const User &U,
1002 MachineIRBuilder &MIRBuilder) {
1003 Register Tst = getOrCreateVReg(*U.getOperand(0));
1004 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1005 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1006 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1008 const SelectInst &SI = cast<SelectInst>(U);
1009 uint16_t Flags = 0;
1010 if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
1011 Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
1013 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1014 MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
1015 {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
1018 return true;
1021 bool IRTranslator::translateBitCast(const User &U,
1022 MachineIRBuilder &MIRBuilder) {
1023 // If we're bitcasting to the source type, we can reuse the source vreg.
1024 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1025 getLLTForType(*U.getType(), *DL)) {
1026 Register SrcReg = getOrCreateVReg(*U.getOperand(0));
1027 auto &Regs = *VMap.getVRegs(U);
1028 // If we already assigned a vreg for this bitcast, we can't change that.
1029 // Emit a copy to satisfy the users we already emitted.
1030 if (!Regs.empty())
1031 MIRBuilder.buildCopy(Regs[0], SrcReg);
1032 else {
1033 Regs.push_back(SrcReg);
1034 VMap.getOffsets(U)->push_back(0);
1036 return true;
1038 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1041 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1042 MachineIRBuilder &MIRBuilder) {
1043 Register Op = getOrCreateVReg(*U.getOperand(0));
1044 Register Res = getOrCreateVReg(U);
1045 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1046 return true;
1049 bool IRTranslator::translateGetElementPtr(const User &U,
1050 MachineIRBuilder &MIRBuilder) {
1051 // FIXME: support vector GEPs.
1052 if (U.getType()->isVectorTy())
1053 return false;
1055 Value &Op0 = *U.getOperand(0);
1056 Register BaseReg = getOrCreateVReg(Op0);
1057 Type *PtrIRTy = Op0.getType();
1058 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1059 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1060 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1062 int64_t Offset = 0;
1063 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1064 GTI != E; ++GTI) {
1065 const Value *Idx = GTI.getOperand();
1066 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1067 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1068 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1069 continue;
1070 } else {
1071 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1073 // If this is a scalar constant or a splat vector of constants,
1074 // handle it quickly.
1075 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1076 Offset += ElementSize * CI->getSExtValue();
1077 continue;
1080 if (Offset != 0) {
1081 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1082 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1083 BaseReg =
1084 MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0);
1085 Offset = 0;
1088 Register IdxReg = getOrCreateVReg(*Idx);
1089 if (MRI->getType(IdxReg) != OffsetTy)
1090 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1092 // N = N + Idx * ElementSize;
1093 // Avoid doing it for ElementSize of 1.
1094 Register GepOffsetReg;
1095 if (ElementSize != 1) {
1096 auto ElementSizeMIB = MIRBuilder.buildConstant(
1097 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1098 GepOffsetReg =
1099 MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
1100 } else
1101 GepOffsetReg = IdxReg;
1103 BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1107 if (Offset != 0) {
1108 auto OffsetMIB =
1109 MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
1110 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1111 return true;
1114 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1115 return true;
1118 bool IRTranslator::translateMemFunc(const CallInst &CI,
1119 MachineIRBuilder &MIRBuilder,
1120 Intrinsic::ID ID) {
1122 // If the source is undef, then just emit a nop.
1123 if (isa<UndefValue>(CI.getArgOperand(1)))
1124 return true;
1126 ArrayRef<Register> Res;
1127 auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
1128 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
1129 ICall.addUse(getOrCreateVReg(**AI));
1131 unsigned DstAlign = 0, SrcAlign = 0;
1132 unsigned IsVol =
1133 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1134 ->getZExtValue();
1136 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1137 DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1);
1138 SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1);
1139 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1140 DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1);
1141 SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1);
1142 } else {
1143 auto *MSI = cast<MemSetInst>(&CI);
1144 DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
1147 // We need to propagate the tail call flag from the IR inst as an argument.
1148 // Otherwise, we have to pessimize and assume later that we cannot tail call
1149 // any memory intrinsics.
1150 ICall.addImm(CI.isTailCall() ? 1 : 0);
1152 // Create mem operands to store the alignment and volatile info.
1153 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1154 ICall.addMemOperand(MF->getMachineMemOperand(
1155 MachinePointerInfo(CI.getArgOperand(0)),
1156 MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1157 if (ID != Intrinsic::memset)
1158 ICall.addMemOperand(MF->getMachineMemOperand(
1159 MachinePointerInfo(CI.getArgOperand(1)),
1160 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1162 return true;
1165 void IRTranslator::getStackGuard(Register DstReg,
1166 MachineIRBuilder &MIRBuilder) {
1167 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1168 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1169 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
1170 MIB.addDef(DstReg);
1172 auto &TLI = *MF->getSubtarget().getTargetLowering();
1173 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1174 if (!Global)
1175 return;
1177 MachinePointerInfo MPInfo(Global);
1178 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1179 MachineMemOperand::MODereferenceable;
1180 MachineMemOperand *MemRef =
1181 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1182 DL->getPointerABIAlignment(0).value());
1183 MIB.setMemRefs({MemRef});
1186 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1187 MachineIRBuilder &MIRBuilder) {
1188 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1189 MIRBuilder.buildInstr(Op)
1190 .addDef(ResRegs[0])
1191 .addDef(ResRegs[1])
1192 .addUse(getOrCreateVReg(*CI.getOperand(0)))
1193 .addUse(getOrCreateVReg(*CI.getOperand(1)));
1195 return true;
1198 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1199 switch (ID) {
1200 default:
1201 break;
1202 case Intrinsic::bswap:
1203 return TargetOpcode::G_BSWAP;
1204 case Intrinsic::bitreverse:
1205 return TargetOpcode::G_BITREVERSE;
1206 case Intrinsic::ceil:
1207 return TargetOpcode::G_FCEIL;
1208 case Intrinsic::cos:
1209 return TargetOpcode::G_FCOS;
1210 case Intrinsic::ctpop:
1211 return TargetOpcode::G_CTPOP;
1212 case Intrinsic::exp:
1213 return TargetOpcode::G_FEXP;
1214 case Intrinsic::exp2:
1215 return TargetOpcode::G_FEXP2;
1216 case Intrinsic::fabs:
1217 return TargetOpcode::G_FABS;
1218 case Intrinsic::copysign:
1219 return TargetOpcode::G_FCOPYSIGN;
1220 case Intrinsic::minnum:
1221 return TargetOpcode::G_FMINNUM;
1222 case Intrinsic::maxnum:
1223 return TargetOpcode::G_FMAXNUM;
1224 case Intrinsic::minimum:
1225 return TargetOpcode::G_FMINIMUM;
1226 case Intrinsic::maximum:
1227 return TargetOpcode::G_FMAXIMUM;
1228 case Intrinsic::canonicalize:
1229 return TargetOpcode::G_FCANONICALIZE;
1230 case Intrinsic::floor:
1231 return TargetOpcode::G_FFLOOR;
1232 case Intrinsic::fma:
1233 return TargetOpcode::G_FMA;
1234 case Intrinsic::log:
1235 return TargetOpcode::G_FLOG;
1236 case Intrinsic::log2:
1237 return TargetOpcode::G_FLOG2;
1238 case Intrinsic::log10:
1239 return TargetOpcode::G_FLOG10;
1240 case Intrinsic::nearbyint:
1241 return TargetOpcode::G_FNEARBYINT;
1242 case Intrinsic::pow:
1243 return TargetOpcode::G_FPOW;
1244 case Intrinsic::rint:
1245 return TargetOpcode::G_FRINT;
1246 case Intrinsic::round:
1247 return TargetOpcode::G_INTRINSIC_ROUND;
1248 case Intrinsic::sin:
1249 return TargetOpcode::G_FSIN;
1250 case Intrinsic::sqrt:
1251 return TargetOpcode::G_FSQRT;
1252 case Intrinsic::trunc:
1253 return TargetOpcode::G_INTRINSIC_TRUNC;
1255 return Intrinsic::not_intrinsic;
1258 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1259 Intrinsic::ID ID,
1260 MachineIRBuilder &MIRBuilder) {
1262 unsigned Op = getSimpleIntrinsicOpcode(ID);
1264 // Is this a simple intrinsic?
1265 if (Op == Intrinsic::not_intrinsic)
1266 return false;
1268 // Yes. Let's translate it.
1269 SmallVector<llvm::SrcOp, 4> VRegs;
1270 for (auto &Arg : CI.arg_operands())
1271 VRegs.push_back(getOrCreateVReg(*Arg));
1273 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1274 MachineInstr::copyFlagsFromInstruction(CI));
1275 return true;
1278 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1279 MachineIRBuilder &MIRBuilder) {
1281 // If this is a simple intrinsic (that is, we just need to add a def of
1282 // a vreg, and uses for each arg operand, then translate it.
1283 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1284 return true;
1286 switch (ID) {
1287 default:
1288 break;
1289 case Intrinsic::lifetime_start:
1290 case Intrinsic::lifetime_end: {
1291 // No stack colouring in O0, discard region information.
1292 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1293 return true;
1295 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1296 : TargetOpcode::LIFETIME_END;
1298 // Get the underlying objects for the location passed on the lifetime
1299 // marker.
1300 SmallVector<const Value *, 4> Allocas;
1301 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1303 // Iterate over each underlying object, creating lifetime markers for each
1304 // static alloca. Quit if we find a non-static alloca.
1305 for (const Value *V : Allocas) {
1306 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1307 if (!AI)
1308 continue;
1310 if (!AI->isStaticAlloca())
1311 return true;
1313 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1315 return true;
1317 case Intrinsic::dbg_declare: {
1318 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1319 assert(DI.getVariable() && "Missing variable");
1321 const Value *Address = DI.getAddress();
1322 if (!Address || isa<UndefValue>(Address)) {
1323 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1324 return true;
1327 assert(DI.getVariable()->isValidLocationForIntrinsic(
1328 MIRBuilder.getDebugLoc()) &&
1329 "Expected inlined-at fields to agree");
1330 auto AI = dyn_cast<AllocaInst>(Address);
1331 if (AI && AI->isStaticAlloca()) {
1332 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1333 // instructions (in fact, they get ignored if they *do* exist).
1334 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1335 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1336 } else {
1337 // A dbg.declare describes the address of a source variable, so lower it
1338 // into an indirect DBG_VALUE.
1339 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1340 DI.getVariable(), DI.getExpression());
1342 return true;
1344 case Intrinsic::dbg_label: {
1345 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1346 assert(DI.getLabel() && "Missing label");
1348 assert(DI.getLabel()->isValidLocationForIntrinsic(
1349 MIRBuilder.getDebugLoc()) &&
1350 "Expected inlined-at fields to agree");
1352 MIRBuilder.buildDbgLabel(DI.getLabel());
1353 return true;
1355 case Intrinsic::vaend:
1356 // No target I know of cares about va_end. Certainly no in-tree target
1357 // does. Simplest intrinsic ever!
1358 return true;
1359 case Intrinsic::vastart: {
1360 auto &TLI = *MF->getSubtarget().getTargetLowering();
1361 Value *Ptr = CI.getArgOperand(0);
1362 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1364 // FIXME: Get alignment
1365 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
1366 .addUse(getOrCreateVReg(*Ptr))
1367 .addMemOperand(MF->getMachineMemOperand(
1368 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
1369 return true;
1371 case Intrinsic::dbg_value: {
1372 // This form of DBG_VALUE is target-independent.
1373 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1374 const Value *V = DI.getValue();
1375 assert(DI.getVariable()->isValidLocationForIntrinsic(
1376 MIRBuilder.getDebugLoc()) &&
1377 "Expected inlined-at fields to agree");
1378 if (!V) {
1379 // Currently the optimizer can produce this; insert an undef to
1380 // help debugging. Probably the optimizer should not do this.
1381 MIRBuilder.buildDirectDbgValue(0, DI.getVariable(), DI.getExpression());
1382 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1383 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1384 } else {
1385 for (Register Reg : getOrCreateVRegs(*V)) {
1386 // FIXME: This does not handle register-indirect values at offset 0. The
1387 // direct/indirect thing shouldn't really be handled by something as
1388 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1389 // pretty baked in right now.
1390 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1393 return true;
1395 case Intrinsic::uadd_with_overflow:
1396 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1397 case Intrinsic::sadd_with_overflow:
1398 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1399 case Intrinsic::usub_with_overflow:
1400 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1401 case Intrinsic::ssub_with_overflow:
1402 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1403 case Intrinsic::umul_with_overflow:
1404 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1405 case Intrinsic::smul_with_overflow:
1406 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1407 case Intrinsic::fmuladd: {
1408 const TargetMachine &TM = MF->getTarget();
1409 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1410 Register Dst = getOrCreateVReg(CI);
1411 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1412 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1413 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1414 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1415 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
1416 // TODO: Revisit this to see if we should move this part of the
1417 // lowering to the combiner.
1418 MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
1419 MachineInstr::copyFlagsFromInstruction(CI));
1420 } else {
1421 LLT Ty = getLLTForType(*CI.getType(), *DL);
1422 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1423 MachineInstr::copyFlagsFromInstruction(CI));
1424 MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1425 MachineInstr::copyFlagsFromInstruction(CI));
1427 return true;
1429 case Intrinsic::memcpy:
1430 case Intrinsic::memmove:
1431 case Intrinsic::memset:
1432 return translateMemFunc(CI, MIRBuilder, ID);
1433 case Intrinsic::eh_typeid_for: {
1434 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1435 Register Reg = getOrCreateVReg(CI);
1436 unsigned TypeID = MF->getTypeIDFor(GV);
1437 MIRBuilder.buildConstant(Reg, TypeID);
1438 return true;
1440 case Intrinsic::objectsize:
1441 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1443 case Intrinsic::is_constant:
1444 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1446 case Intrinsic::stackguard:
1447 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1448 return true;
1449 case Intrinsic::stackprotector: {
1450 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1451 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1452 getStackGuard(GuardVal, MIRBuilder);
1454 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1455 int FI = getOrCreateFrameIndex(*Slot);
1456 MF->getFrameInfo().setStackProtectorIndex(FI);
1458 MIRBuilder.buildStore(
1459 GuardVal, getOrCreateVReg(*Slot),
1460 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1461 MachineMemOperand::MOStore |
1462 MachineMemOperand::MOVolatile,
1463 PtrTy.getSizeInBits() / 8, 8));
1464 return true;
1466 case Intrinsic::stacksave: {
1467 // Save the stack pointer to the location provided by the intrinsic.
1468 Register Reg = getOrCreateVReg(CI);
1469 Register StackPtr = MF->getSubtarget()
1470 .getTargetLowering()
1471 ->getStackPointerRegisterToSaveRestore();
1473 // If the target doesn't specify a stack pointer, then fall back.
1474 if (!StackPtr)
1475 return false;
1477 MIRBuilder.buildCopy(Reg, StackPtr);
1478 return true;
1480 case Intrinsic::stackrestore: {
1481 // Restore the stack pointer from the location provided by the intrinsic.
1482 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1483 Register StackPtr = MF->getSubtarget()
1484 .getTargetLowering()
1485 ->getStackPointerRegisterToSaveRestore();
1487 // If the target doesn't specify a stack pointer, then fall back.
1488 if (!StackPtr)
1489 return false;
1491 MIRBuilder.buildCopy(StackPtr, Reg);
1492 return true;
1494 case Intrinsic::cttz:
1495 case Intrinsic::ctlz: {
1496 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1497 bool isTrailing = ID == Intrinsic::cttz;
1498 unsigned Opcode = isTrailing
1499 ? Cst->isZero() ? TargetOpcode::G_CTTZ
1500 : TargetOpcode::G_CTTZ_ZERO_UNDEF
1501 : Cst->isZero() ? TargetOpcode::G_CTLZ
1502 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1503 MIRBuilder.buildInstr(Opcode)
1504 .addDef(getOrCreateVReg(CI))
1505 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1506 return true;
1508 case Intrinsic::invariant_start: {
1509 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1510 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1511 MIRBuilder.buildUndef(Undef);
1512 return true;
1514 case Intrinsic::invariant_end:
1515 return true;
1516 case Intrinsic::assume:
1517 case Intrinsic::var_annotation:
1518 case Intrinsic::sideeffect:
1519 // Discard annotate attributes, assumptions, and artificial side-effects.
1520 return true;
1522 return false;
1525 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1526 MachineIRBuilder &MIRBuilder) {
1527 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1528 if (!IA.getConstraintString().empty())
1529 return false;
1531 unsigned ExtraInfo = 0;
1532 if (IA.hasSideEffects())
1533 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1534 if (IA.getDialect() == InlineAsm::AD_Intel)
1535 ExtraInfo |= InlineAsm::Extra_AsmDialect;
1537 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1538 .addExternalSymbol(IA.getAsmString().c_str())
1539 .addImm(ExtraInfo);
1541 return true;
1544 bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
1545 MachineIRBuilder &MIRBuilder) {
1546 const Instruction &I = *CS.getInstruction();
1547 ArrayRef<Register> Res = getOrCreateVRegs(I);
1549 SmallVector<ArrayRef<Register>, 8> Args;
1550 Register SwiftInVReg = 0;
1551 Register SwiftErrorVReg = 0;
1552 for (auto &Arg : CS.args()) {
1553 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1554 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1555 LLT Ty = getLLTForType(*Arg->getType(), *DL);
1556 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1557 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1558 &I, &MIRBuilder.getMBB(), Arg));
1559 Args.emplace_back(makeArrayRef(SwiftInVReg));
1560 SwiftErrorVReg =
1561 SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
1562 continue;
1564 Args.push_back(getOrCreateVRegs(*Arg));
1567 // We don't set HasCalls on MFI here yet because call lowering may decide to
1568 // optimize into tail calls. Instead, we defer that to selection where a final
1569 // scan is done to check if any instructions are calls.
1570 bool Success =
1571 CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
1572 [&]() { return getOrCreateVReg(*CS.getCalledValue()); });
1574 // Check if we just inserted a tail call.
1575 if (Success) {
1576 assert(!HasTailCall && "Can't tail call return twice from block?");
1577 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1578 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
1581 return Success;
1584 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1585 const CallInst &CI = cast<CallInst>(U);
1586 auto TII = MF->getTarget().getIntrinsicInfo();
1587 const Function *F = CI.getCalledFunction();
1589 // FIXME: support Windows dllimport function calls.
1590 if (F && F->hasDLLImportStorageClass())
1591 return false;
1593 if (CI.isInlineAsm())
1594 return translateInlineAsm(CI, MIRBuilder);
1596 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1597 if (F && F->isIntrinsic()) {
1598 ID = F->getIntrinsicID();
1599 if (TII && ID == Intrinsic::not_intrinsic)
1600 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1603 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
1604 return translateCallSite(&CI, MIRBuilder);
1606 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1608 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1609 return true;
1611 ArrayRef<Register> ResultRegs;
1612 if (!CI.getType()->isVoidTy())
1613 ResultRegs = getOrCreateVRegs(CI);
1615 // Ignore the callsite attributes. Backend code is most likely not expecting
1616 // an intrinsic to sometimes have side effects and sometimes not.
1617 MachineInstrBuilder MIB =
1618 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1619 if (isa<FPMathOperator>(CI))
1620 MIB->copyIRFlags(CI);
1622 for (auto &Arg : enumerate(CI.arg_operands())) {
1623 // Some intrinsics take metadata parameters. Reject them.
1624 if (isa<MetadataAsValue>(Arg.value()))
1625 return false;
1627 // If this is required to be an immediate, don't materialize it in a
1628 // register.
1629 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
1630 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
1631 // imm arguments are more convenient than cimm (and realistically
1632 // probably sufficient), so use them.
1633 assert(CI->getBitWidth() <= 64 &&
1634 "large intrinsic immediates not handled");
1635 MIB.addImm(CI->getSExtValue());
1636 } else {
1637 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
1639 } else {
1640 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
1641 if (VRegs.size() > 1)
1642 return false;
1643 MIB.addUse(VRegs[0]);
1647 // Add a MachineMemOperand if it is a target mem intrinsic.
1648 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1649 TargetLowering::IntrinsicInfo Info;
1650 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1651 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1652 MaybeAlign Align = Info.align;
1653 if (!Align)
1654 Align = MaybeAlign(
1655 DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())));
1657 uint64_t Size = Info.memVT.getStoreSize();
1658 MIB.addMemOperand(MF->getMachineMemOperand(
1659 MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value()));
1662 return true;
1665 bool IRTranslator::translateInvoke(const User &U,
1666 MachineIRBuilder &MIRBuilder) {
1667 const InvokeInst &I = cast<InvokeInst>(U);
1668 MCContext &Context = MF->getContext();
1670 const BasicBlock *ReturnBB = I.getSuccessor(0);
1671 const BasicBlock *EHPadBB = I.getSuccessor(1);
1673 const Value *Callee = I.getCalledValue();
1674 const Function *Fn = dyn_cast<Function>(Callee);
1675 if (isa<InlineAsm>(Callee))
1676 return false;
1678 // FIXME: support invoking patchpoint and statepoint intrinsics.
1679 if (Fn && Fn->isIntrinsic())
1680 return false;
1682 // FIXME: support whatever these are.
1683 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1684 return false;
1686 // FIXME: support Windows exception handling.
1687 if (!isa<LandingPadInst>(EHPadBB->front()))
1688 return false;
1690 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1691 // the region covered by the try.
1692 MCSymbol *BeginSymbol = Context.createTempSymbol();
1693 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1695 if (!translateCallSite(&I, MIRBuilder))
1696 return false;
1698 MCSymbol *EndSymbol = Context.createTempSymbol();
1699 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1701 // FIXME: track probabilities.
1702 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1703 &ReturnMBB = getMBB(*ReturnBB);
1704 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1705 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1706 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1707 MIRBuilder.buildBr(ReturnMBB);
1709 return true;
1712 bool IRTranslator::translateCallBr(const User &U,
1713 MachineIRBuilder &MIRBuilder) {
1714 // FIXME: Implement this.
1715 return false;
1718 bool IRTranslator::translateLandingPad(const User &U,
1719 MachineIRBuilder &MIRBuilder) {
1720 const LandingPadInst &LP = cast<LandingPadInst>(U);
1722 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1724 MBB.setIsEHPad();
1726 // If there aren't registers to copy the values into (e.g., during SjLj
1727 // exceptions), then don't bother.
1728 auto &TLI = *MF->getSubtarget().getTargetLowering();
1729 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1730 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1731 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1732 return true;
1734 // If landingpad's return type is token type, we don't create DAG nodes
1735 // for its exception pointer and selector value. The extraction of exception
1736 // pointer or selector value from token type landingpads is not currently
1737 // supported.
1738 if (LP.getType()->isTokenTy())
1739 return true;
1741 // Add a label to mark the beginning of the landing pad. Deletion of the
1742 // landing pad can thus be detected via the MachineModuleInfo.
1743 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1744 .addSym(MF->addLandingPad(&MBB));
1746 LLT Ty = getLLTForType(*LP.getType(), *DL);
1747 Register Undef = MRI->createGenericVirtualRegister(Ty);
1748 MIRBuilder.buildUndef(Undef);
1750 SmallVector<LLT, 2> Tys;
1751 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1752 Tys.push_back(getLLTForType(*Ty, *DL));
1753 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1755 // Mark exception register as live in.
1756 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1757 if (!ExceptionReg)
1758 return false;
1760 MBB.addLiveIn(ExceptionReg);
1761 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1762 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1764 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1765 if (!SelectorReg)
1766 return false;
1768 MBB.addLiveIn(SelectorReg);
1769 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1770 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1771 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1773 return true;
1776 bool IRTranslator::translateAlloca(const User &U,
1777 MachineIRBuilder &MIRBuilder) {
1778 auto &AI = cast<AllocaInst>(U);
1780 if (AI.isSwiftError())
1781 return true;
1783 if (AI.isStaticAlloca()) {
1784 Register Res = getOrCreateVReg(AI);
1785 int FI = getOrCreateFrameIndex(AI);
1786 MIRBuilder.buildFrameIndex(Res, FI);
1787 return true;
1790 // FIXME: support stack probing for Windows.
1791 if (MF->getTarget().getTargetTriple().isOSWindows())
1792 return false;
1794 // Now we're in the harder dynamic case.
1795 Type *Ty = AI.getAllocatedType();
1796 unsigned Align =
1797 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1799 Register NumElts = getOrCreateVReg(*AI.getArraySize());
1801 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1802 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1803 if (MRI->getType(NumElts) != IntPtrTy) {
1804 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1805 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1806 NumElts = ExtElts;
1809 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1810 Register TySize =
1811 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
1812 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1814 unsigned StackAlign =
1815 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1816 if (Align <= StackAlign)
1817 Align = 0;
1819 // Round the size of the allocation up to the stack alignment size
1820 // by add SA-1 to the size. This doesn't overflow because we're computing
1821 // an address inside an alloca.
1822 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1);
1823 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
1824 MachineInstr::NoUWrap);
1825 auto AlignCst =
1826 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1));
1827 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
1829 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align);
1831 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1832 assert(MF->getFrameInfo().hasVarSizedObjects());
1833 return true;
1836 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1837 // FIXME: We may need more info about the type. Because of how LLT works,
1838 // we're completely discarding the i64/double distinction here (amongst
1839 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1840 // anyway but that's not guaranteed.
1841 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1842 .addDef(getOrCreateVReg(U))
1843 .addUse(getOrCreateVReg(*U.getOperand(0)))
1844 .addImm(DL->getABITypeAlignment(U.getType()));
1845 return true;
1848 bool IRTranslator::translateInsertElement(const User &U,
1849 MachineIRBuilder &MIRBuilder) {
1850 // If it is a <1 x Ty> vector, use the scalar as it is
1851 // not a legal vector type in LLT.
1852 if (U.getType()->getVectorNumElements() == 1) {
1853 Register Elt = getOrCreateVReg(*U.getOperand(1));
1854 auto &Regs = *VMap.getVRegs(U);
1855 if (Regs.empty()) {
1856 Regs.push_back(Elt);
1857 VMap.getOffsets(U)->push_back(0);
1858 } else {
1859 MIRBuilder.buildCopy(Regs[0], Elt);
1861 return true;
1864 Register Res = getOrCreateVReg(U);
1865 Register Val = getOrCreateVReg(*U.getOperand(0));
1866 Register Elt = getOrCreateVReg(*U.getOperand(1));
1867 Register Idx = getOrCreateVReg(*U.getOperand(2));
1868 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1869 return true;
1872 bool IRTranslator::translateExtractElement(const User &U,
1873 MachineIRBuilder &MIRBuilder) {
1874 // If it is a <1 x Ty> vector, use the scalar as it is
1875 // not a legal vector type in LLT.
1876 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1877 Register Elt = getOrCreateVReg(*U.getOperand(0));
1878 auto &Regs = *VMap.getVRegs(U);
1879 if (Regs.empty()) {
1880 Regs.push_back(Elt);
1881 VMap.getOffsets(U)->push_back(0);
1882 } else {
1883 MIRBuilder.buildCopy(Regs[0], Elt);
1885 return true;
1887 Register Res = getOrCreateVReg(U);
1888 Register Val = getOrCreateVReg(*U.getOperand(0));
1889 const auto &TLI = *MF->getSubtarget().getTargetLowering();
1890 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1891 Register Idx;
1892 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1893 if (CI->getBitWidth() != PreferredVecIdxWidth) {
1894 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1895 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1896 Idx = getOrCreateVReg(*NewIdxCI);
1899 if (!Idx)
1900 Idx = getOrCreateVReg(*U.getOperand(1));
1901 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1902 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1903 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1905 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1906 return true;
1909 bool IRTranslator::translateShuffleVector(const User &U,
1910 MachineIRBuilder &MIRBuilder) {
1911 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1912 .addDef(getOrCreateVReg(U))
1913 .addUse(getOrCreateVReg(*U.getOperand(0)))
1914 .addUse(getOrCreateVReg(*U.getOperand(1)))
1915 .addShuffleMask(cast<Constant>(U.getOperand(2)));
1916 return true;
1919 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1920 const PHINode &PI = cast<PHINode>(U);
1922 SmallVector<MachineInstr *, 4> Insts;
1923 for (auto Reg : getOrCreateVRegs(PI)) {
1924 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1925 Insts.push_back(MIB.getInstr());
1928 PendingPHIs.emplace_back(&PI, std::move(Insts));
1929 return true;
1932 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1933 MachineIRBuilder &MIRBuilder) {
1934 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1936 if (I.isWeak())
1937 return false;
1939 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1940 : MachineMemOperand::MONone;
1941 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1943 Type *ResType = I.getType();
1944 Type *ValType = ResType->Type::getStructElementType(0);
1946 auto Res = getOrCreateVRegs(I);
1947 Register OldValRes = Res[0];
1948 Register SuccessRes = Res[1];
1949 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1950 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
1951 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
1953 MIRBuilder.buildAtomicCmpXchgWithSuccess(
1954 OldValRes, SuccessRes, Addr, Cmp, NewVal,
1955 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1956 Flags, DL->getTypeStoreSize(ValType),
1957 getMemOpAlignment(I), AAMDNodes(), nullptr,
1958 I.getSyncScopeID(), I.getSuccessOrdering(),
1959 I.getFailureOrdering()));
1960 return true;
1963 bool IRTranslator::translateAtomicRMW(const User &U,
1964 MachineIRBuilder &MIRBuilder) {
1965 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1967 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1968 : MachineMemOperand::MONone;
1969 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1971 Type *ResType = I.getType();
1973 Register Res = getOrCreateVReg(I);
1974 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1975 Register Val = getOrCreateVReg(*I.getValOperand());
1977 unsigned Opcode = 0;
1978 switch (I.getOperation()) {
1979 default:
1980 return false;
1981 case AtomicRMWInst::Xchg:
1982 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1983 break;
1984 case AtomicRMWInst::Add:
1985 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1986 break;
1987 case AtomicRMWInst::Sub:
1988 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1989 break;
1990 case AtomicRMWInst::And:
1991 Opcode = TargetOpcode::G_ATOMICRMW_AND;
1992 break;
1993 case AtomicRMWInst::Nand:
1994 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1995 break;
1996 case AtomicRMWInst::Or:
1997 Opcode = TargetOpcode::G_ATOMICRMW_OR;
1998 break;
1999 case AtomicRMWInst::Xor:
2000 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2001 break;
2002 case AtomicRMWInst::Max:
2003 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2004 break;
2005 case AtomicRMWInst::Min:
2006 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2007 break;
2008 case AtomicRMWInst::UMax:
2009 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2010 break;
2011 case AtomicRMWInst::UMin:
2012 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2013 break;
2014 case AtomicRMWInst::FAdd:
2015 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2016 break;
2017 case AtomicRMWInst::FSub:
2018 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2019 break;
2022 MIRBuilder.buildAtomicRMW(
2023 Opcode, Res, Addr, Val,
2024 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2025 Flags, DL->getTypeStoreSize(ResType),
2026 getMemOpAlignment(I), AAMDNodes(), nullptr,
2027 I.getSyncScopeID(), I.getOrdering()));
2028 return true;
2031 bool IRTranslator::translateFence(const User &U,
2032 MachineIRBuilder &MIRBuilder) {
2033 const FenceInst &Fence = cast<FenceInst>(U);
2034 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2035 Fence.getSyncScopeID());
2036 return true;
2039 void IRTranslator::finishPendingPhis() {
2040 #ifndef NDEBUG
2041 DILocationVerifier Verifier;
2042 GISelObserverWrapper WrapperObserver(&Verifier);
2043 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2044 #endif // ifndef NDEBUG
2045 for (auto &Phi : PendingPHIs) {
2046 const PHINode *PI = Phi.first;
2047 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2048 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2049 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2050 #ifndef NDEBUG
2051 Verifier.setCurrentInst(PI);
2052 #endif // ifndef NDEBUG
2054 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2055 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2056 auto IRPred = PI->getIncomingBlock(i);
2057 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2058 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2059 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2060 continue;
2061 SeenPreds.insert(Pred);
2062 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2063 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2064 MIB.addUse(ValRegs[j]);
2065 MIB.addMBB(Pred);
2072 bool IRTranslator::valueIsSplit(const Value &V,
2073 SmallVectorImpl<uint64_t> *Offsets) {
2074 SmallVector<LLT, 4> SplitTys;
2075 if (Offsets && !Offsets->empty())
2076 Offsets->clear();
2077 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2078 return SplitTys.size() > 1;
2081 bool IRTranslator::translate(const Instruction &Inst) {
2082 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2083 // We only emit constants into the entry block from here. To prevent jumpy
2084 // debug behaviour set the line to 0.
2085 if (const DebugLoc &DL = Inst.getDebugLoc())
2086 EntryBuilder->setDebugLoc(
2087 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2088 else
2089 EntryBuilder->setDebugLoc(DebugLoc());
2091 switch (Inst.getOpcode()) {
2092 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2093 case Instruction::OPCODE: \
2094 return translate##OPCODE(Inst, *CurBuilder.get());
2095 #include "llvm/IR/Instruction.def"
2096 default:
2097 return false;
2101 bool IRTranslator::translate(const Constant &C, Register Reg) {
2102 if (auto CI = dyn_cast<ConstantInt>(&C))
2103 EntryBuilder->buildConstant(Reg, *CI);
2104 else if (auto CF = dyn_cast<ConstantFP>(&C))
2105 EntryBuilder->buildFConstant(Reg, *CF);
2106 else if (isa<UndefValue>(C))
2107 EntryBuilder->buildUndef(Reg);
2108 else if (isa<ConstantPointerNull>(C)) {
2109 // As we are trying to build a constant val of 0 into a pointer,
2110 // insert a cast to make them correct with respect to types.
2111 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
2112 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
2113 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
2114 Register ZeroReg = getOrCreateVReg(*ZeroVal);
2115 EntryBuilder->buildCast(Reg, ZeroReg);
2116 } else if (auto GV = dyn_cast<GlobalValue>(&C))
2117 EntryBuilder->buildGlobalValue(Reg, GV);
2118 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2119 if (!CAZ->getType()->isVectorTy())
2120 return false;
2121 // Return the scalar if it is a <1 x Ty> vector.
2122 if (CAZ->getNumElements() == 1)
2123 return translate(*CAZ->getElementValue(0u), Reg);
2124 SmallVector<Register, 4> Ops;
2125 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2126 Constant &Elt = *CAZ->getElementValue(i);
2127 Ops.push_back(getOrCreateVReg(Elt));
2129 EntryBuilder->buildBuildVector(Reg, Ops);
2130 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2131 // Return the scalar if it is a <1 x Ty> vector.
2132 if (CV->getNumElements() == 1)
2133 return translate(*CV->getElementAsConstant(0), Reg);
2134 SmallVector<Register, 4> Ops;
2135 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2136 Constant &Elt = *CV->getElementAsConstant(i);
2137 Ops.push_back(getOrCreateVReg(Elt));
2139 EntryBuilder->buildBuildVector(Reg, Ops);
2140 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2141 switch(CE->getOpcode()) {
2142 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2143 case Instruction::OPCODE: \
2144 return translate##OPCODE(*CE, *EntryBuilder.get());
2145 #include "llvm/IR/Instruction.def"
2146 default:
2147 return false;
2149 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2150 if (CV->getNumOperands() == 1)
2151 return translate(*CV->getOperand(0), Reg);
2152 SmallVector<Register, 4> Ops;
2153 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2154 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2156 EntryBuilder->buildBuildVector(Reg, Ops);
2157 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2158 EntryBuilder->buildBlockAddress(Reg, BA);
2159 } else
2160 return false;
2162 return true;
2165 void IRTranslator::finalizeBasicBlock() {
2166 for (auto &JTCase : SL->JTCases) {
2167 // Emit header first, if it wasn't already emitted.
2168 if (!JTCase.first.Emitted)
2169 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2171 emitJumpTable(JTCase.second, JTCase.second.MBB);
2173 SL->JTCases.clear();
2176 void IRTranslator::finalizeFunction() {
2177 // Release the memory used by the different maps we
2178 // needed during the translation.
2179 PendingPHIs.clear();
2180 VMap.reset();
2181 FrameIndices.clear();
2182 MachinePreds.clear();
2183 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2184 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
2185 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2186 EntryBuilder.reset();
2187 CurBuilder.reset();
2188 FuncInfo.clear();
2191 /// Returns true if a BasicBlock \p BB within a variadic function contains a
2192 /// variadic musttail call.
2193 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
2194 if (!IsVarArg)
2195 return false;
2197 // Walk the block backwards, because tail calls usually only appear at the end
2198 // of a block.
2199 return std::any_of(BB.rbegin(), BB.rend(), [](const Instruction &I) {
2200 const auto *CI = dyn_cast<CallInst>(&I);
2201 return CI && CI->isMustTailCall();
2205 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2206 MF = &CurMF;
2207 const Function &F = MF->getFunction();
2208 if (F.empty())
2209 return false;
2210 GISelCSEAnalysisWrapper &Wrapper =
2211 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2212 // Set the CSEConfig and run the analysis.
2213 GISelCSEInfo *CSEInfo = nullptr;
2214 TPC = &getAnalysis<TargetPassConfig>();
2215 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
2216 ? EnableCSEInIRTranslator
2217 : TPC->isGISelCSEEnabled();
2219 if (EnableCSE) {
2220 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2221 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2222 EntryBuilder->setCSEInfo(CSEInfo);
2223 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2224 CurBuilder->setCSEInfo(CSEInfo);
2225 } else {
2226 EntryBuilder = std::make_unique<MachineIRBuilder>();
2227 CurBuilder = std::make_unique<MachineIRBuilder>();
2229 CLI = MF->getSubtarget().getCallLowering();
2230 CurBuilder->setMF(*MF);
2231 EntryBuilder->setMF(*MF);
2232 MRI = &MF->getRegInfo();
2233 DL = &F.getParent()->getDataLayout();
2234 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2235 FuncInfo.MF = MF;
2236 FuncInfo.BPI = nullptr;
2237 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2238 const TargetMachine &TM = MF->getTarget();
2239 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
2240 SL->init(TLI, TM, *DL);
2242 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
2244 assert(PendingPHIs.empty() && "stale PHIs");
2246 if (!DL->isLittleEndian()) {
2247 // Currently we don't properly handle big endian code.
2248 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2249 F.getSubprogram(), &F.getEntryBlock());
2250 R << "unable to translate in big endian mode";
2251 reportTranslationError(*MF, *TPC, *ORE, R);
2254 // Release the per-function state when we return, whether we succeeded or not.
2255 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2257 // Setup a separate basic-block for the arguments and constants
2258 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2259 MF->push_back(EntryBB);
2260 EntryBuilder->setMBB(*EntryBB);
2262 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2263 SwiftError.setFunction(CurMF);
2264 SwiftError.createEntriesInEntryBlock(DbgLoc);
2266 bool IsVarArg = F.isVarArg();
2267 bool HasMustTailInVarArgFn = false;
2269 // Create all blocks, in IR order, to preserve the layout.
2270 for (const BasicBlock &BB: F) {
2271 auto *&MBB = BBToMBB[&BB];
2273 MBB = MF->CreateMachineBasicBlock(&BB);
2274 MF->push_back(MBB);
2276 if (BB.hasAddressTaken())
2277 MBB->setHasAddressTaken();
2279 if (!HasMustTailInVarArgFn)
2280 HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
2283 MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
2285 // Make our arguments/constants entry block fallthrough to the IR entry block.
2286 EntryBB->addSuccessor(&getMBB(F.front()));
2288 // Lower the actual args into this basic block.
2289 SmallVector<ArrayRef<Register>, 8> VRegArgs;
2290 for (const Argument &Arg: F.args()) {
2291 if (DL->getTypeStoreSize(Arg.getType()) == 0)
2292 continue; // Don't handle zero sized types.
2293 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2294 VRegArgs.push_back(VRegs);
2296 if (Arg.hasSwiftErrorAttr()) {
2297 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
2298 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2302 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
2303 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2304 F.getSubprogram(), &F.getEntryBlock());
2305 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2306 reportTranslationError(*MF, *TPC, *ORE, R);
2307 return false;
2310 // Need to visit defs before uses when translating instructions.
2311 GISelObserverWrapper WrapperObserver;
2312 if (EnableCSE && CSEInfo)
2313 WrapperObserver.addObserver(CSEInfo);
2315 ReversePostOrderTraversal<const Function *> RPOT(&F);
2316 #ifndef NDEBUG
2317 DILocationVerifier Verifier;
2318 WrapperObserver.addObserver(&Verifier);
2319 #endif // ifndef NDEBUG
2320 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2321 for (const BasicBlock *BB : RPOT) {
2322 MachineBasicBlock &MBB = getMBB(*BB);
2323 // Set the insertion point of all the following translations to
2324 // the end of this basic block.
2325 CurBuilder->setMBB(MBB);
2326 HasTailCall = false;
2327 for (const Instruction &Inst : *BB) {
2328 // If we translated a tail call in the last step, then we know
2329 // everything after the call is either a return, or something that is
2330 // handled by the call itself. (E.g. a lifetime marker or assume
2331 // intrinsic.) In this case, we should stop translating the block and
2332 // move on.
2333 if (HasTailCall)
2334 break;
2335 #ifndef NDEBUG
2336 Verifier.setCurrentInst(&Inst);
2337 #endif // ifndef NDEBUG
2338 if (translate(Inst))
2339 continue;
2341 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2342 Inst.getDebugLoc(), BB);
2343 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2345 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2346 std::string InstStrStorage;
2347 raw_string_ostream InstStr(InstStrStorage);
2348 InstStr << Inst;
2350 R << ": '" << InstStr.str() << "'";
2353 reportTranslationError(*MF, *TPC, *ORE, R);
2354 return false;
2357 finalizeBasicBlock();
2359 #ifndef NDEBUG
2360 WrapperObserver.removeObserver(&Verifier);
2361 #endif
2364 finishPendingPhis();
2366 SwiftError.propagateVRegs();
2368 // Merge the argument lowering and constants block with its single
2369 // successor, the LLVM-IR entry block. We want the basic block to
2370 // be maximal.
2371 assert(EntryBB->succ_size() == 1 &&
2372 "Custom BB used for lowering should have only one successor");
2373 // Get the successor of the current entry block.
2374 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2375 assert(NewEntryBB.pred_size() == 1 &&
2376 "LLVM-IR entry block has a predecessor!?");
2377 // Move all the instruction from the current entry block to the
2378 // new entry block.
2379 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2380 EntryBB->end());
2382 // Update the live-in information for the new entry block.
2383 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2384 NewEntryBB.addLiveIn(LiveIn);
2385 NewEntryBB.sortUniqueLiveIns();
2387 // Get rid of the now empty basic block.
2388 EntryBB->removeSuccessor(&NewEntryBB);
2389 MF->remove(EntryBB);
2390 MF->DeleteMachineBasicBlock(EntryBB);
2392 assert(&MF->front() == &NewEntryBB &&
2393 "New entry wasn't next in the list of basic block!");
2395 // Initialize stack protector information.
2396 StackProtector &SP = getAnalysis<StackProtector>();
2397 SP.copyToMachineFrameInfo(MF->getFrameInfo());
2399 return false;