[x86] fix assert with horizontal math + broadcast of vector (PR43402)
[llvm-core.git] / lib / CodeGen / GlobalISel / IRTranslator.cpp
blob1e34739cb03548afbdfa63266f109d51caf47651
1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/StackProtector.h"
34 #include "llvm/CodeGen/TargetFrameLowering.h"
35 #include "llvm/CodeGen/TargetInstrInfo.h"
36 #include "llvm/CodeGen/TargetLowering.h"
37 #include "llvm/CodeGen/TargetPassConfig.h"
38 #include "llvm/CodeGen/TargetRegisterInfo.h"
39 #include "llvm/CodeGen/TargetSubtargetInfo.h"
40 #include "llvm/IR/BasicBlock.h"
41 #include "llvm/IR/CFG.h"
42 #include "llvm/IR/Constant.h"
43 #include "llvm/IR/Constants.h"
44 #include "llvm/IR/DataLayout.h"
45 #include "llvm/IR/DebugInfo.h"
46 #include "llvm/IR/DerivedTypes.h"
47 #include "llvm/IR/Function.h"
48 #include "llvm/IR/GetElementPtrTypeIterator.h"
49 #include "llvm/IR/InlineAsm.h"
50 #include "llvm/IR/InstrTypes.h"
51 #include "llvm/IR/Instructions.h"
52 #include "llvm/IR/IntrinsicInst.h"
53 #include "llvm/IR/Intrinsics.h"
54 #include "llvm/IR/LLVMContext.h"
55 #include "llvm/IR/Metadata.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/User.h"
58 #include "llvm/IR/Value.h"
59 #include "llvm/MC/MCContext.h"
60 #include "llvm/Pass.h"
61 #include "llvm/Support/Casting.h"
62 #include "llvm/Support/CodeGen.h"
63 #include "llvm/Support/Debug.h"
64 #include "llvm/Support/ErrorHandling.h"
65 #include "llvm/Support/LowLevelTypeImpl.h"
66 #include "llvm/Support/MathExtras.h"
67 #include "llvm/Support/raw_ostream.h"
68 #include "llvm/Target/TargetIntrinsicInfo.h"
69 #include "llvm/Target/TargetMachine.h"
70 #include <algorithm>
71 #include <cassert>
72 #include <cstdint>
73 #include <iterator>
74 #include <string>
75 #include <utility>
76 #include <vector>
78 #define DEBUG_TYPE "irtranslator"
80 using namespace llvm;
82 static cl::opt<bool>
83 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
84 cl::desc("Should enable CSE in irtranslator"),
85 cl::Optional, cl::init(false));
86 char IRTranslator::ID = 0;
88 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
89 false, false)
90 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
91 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
92 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
93 false, false)
95 static void reportTranslationError(MachineFunction &MF,
96 const TargetPassConfig &TPC,
97 OptimizationRemarkEmitter &ORE,
98 OptimizationRemarkMissed &R) {
99 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
101 // Print the function name explicitly if we don't have a debug location (which
102 // makes the diagnostic less useful) or if we're going to emit a raw error.
103 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
104 R << (" (in function: " + MF.getName() + ")").str();
106 if (TPC.isGlobalISelAbortEnabled())
107 report_fatal_error(R.getMsg());
108 else
109 ORE.emit(R);
112 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
114 #ifndef NDEBUG
115 namespace {
116 /// Verify that every instruction created has the same DILocation as the
117 /// instruction being translated.
118 class DILocationVerifier : public GISelChangeObserver {
119 const Instruction *CurrInst = nullptr;
121 public:
122 DILocationVerifier() = default;
123 ~DILocationVerifier() = default;
125 const Instruction *getCurrentInst() const { return CurrInst; }
126 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
128 void erasingInstr(MachineInstr &MI) override {}
129 void changingInstr(MachineInstr &MI) override {}
130 void changedInstr(MachineInstr &MI) override {}
132 void createdInstr(MachineInstr &MI) override {
133 assert(getCurrentInst() && "Inserted instruction without a current MI");
135 // Only print the check message if we're actually checking it.
136 #ifndef NDEBUG
137 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
138 << " was copied to " << MI);
139 #endif
140 // We allow insts in the entry block to have a debug loc line of 0 because
141 // they could have originated from constants, and we don't want a jumpy
142 // debug experience.
143 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
144 MI.getDebugLoc().getLine() == 0) &&
145 "Line info was not transferred to all instructions");
148 } // namespace
149 #endif // ifndef NDEBUG
152 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
153 AU.addRequired<StackProtector>();
154 AU.addRequired<TargetPassConfig>();
155 AU.addRequired<GISelCSEAnalysisWrapperPass>();
156 getSelectionDAGFallbackAnalysisUsage(AU);
157 MachineFunctionPass::getAnalysisUsage(AU);
160 IRTranslator::ValueToVRegInfo::VRegListT &
161 IRTranslator::allocateVRegs(const Value &Val) {
162 assert(!VMap.contains(Val) && "Value already allocated in VMap");
163 auto *Regs = VMap.getVRegs(Val);
164 auto *Offsets = VMap.getOffsets(Val);
165 SmallVector<LLT, 4> SplitTys;
166 computeValueLLTs(*DL, *Val.getType(), SplitTys,
167 Offsets->empty() ? Offsets : nullptr);
168 for (unsigned i = 0; i < SplitTys.size(); ++i)
169 Regs->push_back(0);
170 return *Regs;
173 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
174 auto VRegsIt = VMap.findVRegs(Val);
175 if (VRegsIt != VMap.vregs_end())
176 return *VRegsIt->second;
178 if (Val.getType()->isVoidTy())
179 return *VMap.getVRegs(Val);
181 // Create entry for this type.
182 auto *VRegs = VMap.getVRegs(Val);
183 auto *Offsets = VMap.getOffsets(Val);
185 assert(Val.getType()->isSized() &&
186 "Don't know how to create an empty vreg");
188 SmallVector<LLT, 4> SplitTys;
189 computeValueLLTs(*DL, *Val.getType(), SplitTys,
190 Offsets->empty() ? Offsets : nullptr);
192 if (!isa<Constant>(Val)) {
193 for (auto Ty : SplitTys)
194 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
195 return *VRegs;
198 if (Val.getType()->isAggregateType()) {
199 // UndefValue, ConstantAggregateZero
200 auto &C = cast<Constant>(Val);
201 unsigned Idx = 0;
202 while (auto Elt = C.getAggregateElement(Idx++)) {
203 auto EltRegs = getOrCreateVRegs(*Elt);
204 llvm::copy(EltRegs, std::back_inserter(*VRegs));
206 } else {
207 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
208 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
209 bool Success = translate(cast<Constant>(Val), VRegs->front());
210 if (!Success) {
211 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
212 MF->getFunction().getSubprogram(),
213 &MF->getFunction().getEntryBlock());
214 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
215 reportTranslationError(*MF, *TPC, *ORE, R);
216 return *VRegs;
220 return *VRegs;
223 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
224 if (FrameIndices.find(&AI) != FrameIndices.end())
225 return FrameIndices[&AI];
227 unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
228 unsigned Size =
229 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
231 // Always allocate at least one byte.
232 Size = std::max(Size, 1u);
234 unsigned Alignment = AI.getAlignment();
235 if (!Alignment)
236 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
238 int &FI = FrameIndices[&AI];
239 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
240 return FI;
243 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
244 unsigned Alignment = 0;
245 Type *ValTy = nullptr;
246 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
247 Alignment = SI->getAlignment();
248 ValTy = SI->getValueOperand()->getType();
249 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
250 Alignment = LI->getAlignment();
251 ValTy = LI->getType();
252 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
253 // TODO(PR27168): This instruction has no alignment attribute, but unlike
254 // the default alignment for load/store, the default here is to assume
255 // it has NATURAL alignment, not DataLayout-specified alignment.
256 const DataLayout &DL = AI->getModule()->getDataLayout();
257 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
258 ValTy = AI->getCompareOperand()->getType();
259 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
260 // TODO(PR27168): This instruction has no alignment attribute, but unlike
261 // the default alignment for load/store, the default here is to assume
262 // it has NATURAL alignment, not DataLayout-specified alignment.
263 const DataLayout &DL = AI->getModule()->getDataLayout();
264 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
265 ValTy = AI->getType();
266 } else {
267 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
268 R << "unable to translate memop: " << ore::NV("Opcode", &I);
269 reportTranslationError(*MF, *TPC, *ORE, R);
270 return 1;
273 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
276 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
277 MachineBasicBlock *&MBB = BBToMBB[&BB];
278 assert(MBB && "BasicBlock was not encountered before");
279 return *MBB;
282 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
283 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
284 MachinePreds[Edge].push_back(NewPred);
287 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
288 MachineIRBuilder &MIRBuilder) {
289 // Get or create a virtual register for each value.
290 // Unless the value is a Constant => loadimm cst?
291 // or inline constant each time?
292 // Creation of a virtual register needs to have a size.
293 Register Op0 = getOrCreateVReg(*U.getOperand(0));
294 Register Op1 = getOrCreateVReg(*U.getOperand(1));
295 Register Res = getOrCreateVReg(U);
296 uint16_t Flags = 0;
297 if (isa<Instruction>(U)) {
298 const Instruction &I = cast<Instruction>(U);
299 Flags = MachineInstr::copyFlagsFromInstruction(I);
302 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
303 return true;
306 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
307 // -0.0 - X --> G_FNEG
308 if (isa<Constant>(U.getOperand(0)) &&
309 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
310 Register Op1 = getOrCreateVReg(*U.getOperand(1));
311 Register Res = getOrCreateVReg(U);
312 uint16_t Flags = 0;
313 if (isa<Instruction>(U)) {
314 const Instruction &I = cast<Instruction>(U);
315 Flags = MachineInstr::copyFlagsFromInstruction(I);
317 // Negate the last operand of the FSUB
318 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
319 return true;
321 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
324 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
325 Register Op0 = getOrCreateVReg(*U.getOperand(0));
326 Register Res = getOrCreateVReg(U);
327 uint16_t Flags = 0;
328 if (isa<Instruction>(U)) {
329 const Instruction &I = cast<Instruction>(U);
330 Flags = MachineInstr::copyFlagsFromInstruction(I);
332 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
333 return true;
336 bool IRTranslator::translateCompare(const User &U,
337 MachineIRBuilder &MIRBuilder) {
338 const CmpInst *CI = dyn_cast<CmpInst>(&U);
339 Register Op0 = getOrCreateVReg(*U.getOperand(0));
340 Register Op1 = getOrCreateVReg(*U.getOperand(1));
341 Register Res = getOrCreateVReg(U);
342 CmpInst::Predicate Pred =
343 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
344 cast<ConstantExpr>(U).getPredicate());
345 if (CmpInst::isIntPredicate(Pred))
346 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
347 else if (Pred == CmpInst::FCMP_FALSE)
348 MIRBuilder.buildCopy(
349 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
350 else if (Pred == CmpInst::FCMP_TRUE)
351 MIRBuilder.buildCopy(
352 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
353 else {
354 MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
355 MachineInstr::copyFlagsFromInstruction(*CI));
358 return true;
361 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
362 const ReturnInst &RI = cast<ReturnInst>(U);
363 const Value *Ret = RI.getReturnValue();
364 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
365 Ret = nullptr;
367 ArrayRef<Register> VRegs;
368 if (Ret)
369 VRegs = getOrCreateVRegs(*Ret);
371 Register SwiftErrorVReg = 0;
372 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
373 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
374 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
377 // The target may mess up with the insertion point, but
378 // this is not important as a return is the last instruction
379 // of the block anyway.
380 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
383 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
384 const BranchInst &BrInst = cast<BranchInst>(U);
385 unsigned Succ = 0;
386 if (!BrInst.isUnconditional()) {
387 // We want a G_BRCOND to the true BB followed by an unconditional branch.
388 Register Tst = getOrCreateVReg(*BrInst.getCondition());
389 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
390 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
391 MIRBuilder.buildBrCond(Tst, TrueBB);
394 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
395 MachineBasicBlock &TgtBB = getMBB(BrTgt);
396 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
398 // If the unconditional target is the layout successor, fallthrough.
399 if (!CurBB.isLayoutSuccessor(&TgtBB))
400 MIRBuilder.buildBr(TgtBB);
402 // Link successors.
403 for (const BasicBlock *Succ : successors(&BrInst))
404 CurBB.addSuccessor(&getMBB(*Succ));
405 return true;
408 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
409 MachineBasicBlock *Dst,
410 BranchProbability Prob) {
411 if (!FuncInfo.BPI) {
412 Src->addSuccessorWithoutProb(Dst);
413 return;
415 if (Prob.isUnknown())
416 Prob = getEdgeProbability(Src, Dst);
417 Src->addSuccessor(Dst, Prob);
420 BranchProbability
421 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
422 const MachineBasicBlock *Dst) const {
423 const BasicBlock *SrcBB = Src->getBasicBlock();
424 const BasicBlock *DstBB = Dst->getBasicBlock();
425 if (!FuncInfo.BPI) {
426 // If BPI is not available, set the default probability as 1 / N, where N is
427 // the number of successors.
428 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
429 return BranchProbability(1, SuccSize);
431 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
434 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
435 using namespace SwitchCG;
436 // Extract cases from the switch.
437 const SwitchInst &SI = cast<SwitchInst>(U);
438 BranchProbabilityInfo *BPI = FuncInfo.BPI;
439 CaseClusterVector Clusters;
440 Clusters.reserve(SI.getNumCases());
441 for (auto &I : SI.cases()) {
442 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
443 assert(Succ && "Could not find successor mbb in mapping");
444 const ConstantInt *CaseVal = I.getCaseValue();
445 BranchProbability Prob =
446 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
447 : BranchProbability(1, SI.getNumCases() + 1);
448 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
451 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
453 // Cluster adjacent cases with the same destination. We do this at all
454 // optimization levels because it's cheap to do and will make codegen faster
455 // if there are many clusters.
456 sortAndRangeify(Clusters);
458 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
460 // If there is only the default destination, jump there directly.
461 if (Clusters.empty()) {
462 SwitchMBB->addSuccessor(DefaultMBB);
463 if (DefaultMBB != SwitchMBB->getNextNode())
464 MIB.buildBr(*DefaultMBB);
465 return true;
468 SL->findJumpTables(Clusters, &SI, DefaultMBB);
470 LLVM_DEBUG({
471 dbgs() << "Case clusters: ";
472 for (const CaseCluster &C : Clusters) {
473 if (C.Kind == CC_JumpTable)
474 dbgs() << "JT:";
475 if (C.Kind == CC_BitTests)
476 dbgs() << "BT:";
478 C.Low->getValue().print(dbgs(), true);
479 if (C.Low != C.High) {
480 dbgs() << '-';
481 C.High->getValue().print(dbgs(), true);
483 dbgs() << ' ';
485 dbgs() << '\n';
488 assert(!Clusters.empty());
489 SwitchWorkList WorkList;
490 CaseClusterIt First = Clusters.begin();
491 CaseClusterIt Last = Clusters.end() - 1;
492 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
493 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
495 // FIXME: At the moment we don't do any splitting optimizations here like
496 // SelectionDAG does, so this worklist only has one entry.
497 while (!WorkList.empty()) {
498 SwitchWorkListItem W = WorkList.back();
499 WorkList.pop_back();
500 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
501 return false;
503 return true;
506 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
507 MachineBasicBlock *MBB) {
508 // Emit the code for the jump table
509 assert(JT.Reg != -1U && "Should lower JT Header first!");
510 MachineIRBuilder MIB(*MBB->getParent());
511 MIB.setMBB(*MBB);
512 MIB.setDebugLoc(CurBuilder->getDebugLoc());
514 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
515 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
517 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
518 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
521 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
522 SwitchCG::JumpTableHeader &JTH,
523 MachineBasicBlock *HeaderBB) {
524 MachineIRBuilder MIB(*HeaderBB->getParent());
525 MIB.setMBB(*HeaderBB);
526 MIB.setDebugLoc(CurBuilder->getDebugLoc());
528 const Value &SValue = *JTH.SValue;
529 // Subtract the lowest switch case value from the value being switched on.
530 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
531 Register SwitchOpReg = getOrCreateVReg(SValue);
532 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
533 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
535 // This value may be smaller or larger than the target's pointer type, and
536 // therefore require extension or truncating.
537 Type *PtrIRTy = SValue.getType()->getPointerTo();
538 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
539 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
541 JT.Reg = Sub.getReg(0);
543 if (JTH.OmitRangeCheck) {
544 if (JT.MBB != HeaderBB->getNextNode())
545 MIB.buildBr(*JT.MBB);
546 return true;
549 // Emit the range check for the jump table, and branch to the default block
550 // for the switch statement if the value being switched on exceeds the
551 // largest case in the switch.
552 auto Cst = getOrCreateVReg(
553 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
554 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
555 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
557 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
559 // Avoid emitting unnecessary branches to the next block.
560 if (JT.MBB != HeaderBB->getNextNode())
561 BrCond = MIB.buildBr(*JT.MBB);
562 return true;
565 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
566 MachineBasicBlock *SwitchBB,
567 MachineIRBuilder &MIB) {
568 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
569 Register Cond;
570 DebugLoc OldDbgLoc = MIB.getDebugLoc();
571 MIB.setDebugLoc(CB.DbgLoc);
572 MIB.setMBB(*CB.ThisBB);
574 if (CB.PredInfo.NoCmp) {
575 // Branch or fall through to TrueBB.
576 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
577 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
578 CB.ThisBB);
579 CB.ThisBB->normalizeSuccProbs();
580 if (CB.TrueBB != CB.ThisBB->getNextNode())
581 MIB.buildBr(*CB.TrueBB);
582 MIB.setDebugLoc(OldDbgLoc);
583 return;
586 const LLT i1Ty = LLT::scalar(1);
587 // Build the compare.
588 if (!CB.CmpMHS) {
589 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
590 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
591 } else {
592 assert(CB.PredInfo.Pred == CmpInst::ICMP_ULE &&
593 "Can only handle ULE ranges");
595 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
596 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
598 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
599 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
600 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
601 Cond =
602 MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, CmpOpReg, CondRHS).getReg(0);
603 } else {
604 const LLT &CmpTy = MRI->getType(CmpOpReg);
605 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
606 auto Diff = MIB.buildConstant(CmpTy, High - Low);
607 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
611 // Update successor info
612 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
614 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
615 CB.ThisBB);
617 // TrueBB and FalseBB are always different unless the incoming IR is
618 // degenerate. This only happens when running llc on weird IR.
619 if (CB.TrueBB != CB.FalseBB)
620 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
621 CB.ThisBB->normalizeSuccProbs();
623 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
624 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
625 CB.ThisBB);
627 // If the lhs block is the next block, invert the condition so that we can
628 // fall through to the lhs instead of the rhs block.
629 if (CB.TrueBB == CB.ThisBB->getNextNode()) {
630 std::swap(CB.TrueBB, CB.FalseBB);
631 auto True = MIB.buildConstant(i1Ty, 1);
632 Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
633 .getReg(0);
636 MIB.buildBrCond(Cond, *CB.TrueBB);
637 MIB.buildBr(*CB.FalseBB);
638 MIB.setDebugLoc(OldDbgLoc);
641 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
642 MachineBasicBlock *SwitchMBB,
643 MachineBasicBlock *CurMBB,
644 MachineBasicBlock *DefaultMBB,
645 MachineIRBuilder &MIB,
646 MachineFunction::iterator BBI,
647 BranchProbability UnhandledProbs,
648 SwitchCG::CaseClusterIt I,
649 MachineBasicBlock *Fallthrough,
650 bool FallthroughUnreachable) {
651 using namespace SwitchCG;
652 MachineFunction *CurMF = SwitchMBB->getParent();
653 // FIXME: Optimize away range check based on pivot comparisons.
654 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
655 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
656 BranchProbability DefaultProb = W.DefaultProb;
658 // The jump block hasn't been inserted yet; insert it here.
659 MachineBasicBlock *JumpMBB = JT->MBB;
660 CurMF->insert(BBI, JumpMBB);
662 // Since the jump table block is separate from the switch block, we need
663 // to keep track of it as a machine predecessor to the default block,
664 // otherwise we lose the phi edges.
665 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
666 CurMBB);
667 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
668 JumpMBB);
670 auto JumpProb = I->Prob;
671 auto FallthroughProb = UnhandledProbs;
673 // If the default statement is a target of the jump table, we evenly
674 // distribute the default probability to successors of CurMBB. Also
675 // update the probability on the edge from JumpMBB to Fallthrough.
676 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
677 SE = JumpMBB->succ_end();
678 SI != SE; ++SI) {
679 if (*SI == DefaultMBB) {
680 JumpProb += DefaultProb / 2;
681 FallthroughProb -= DefaultProb / 2;
682 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
683 JumpMBB->normalizeSuccProbs();
684 } else {
685 // Also record edges from the jump table block to it's successors.
686 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
687 JumpMBB);
691 // Skip the range check if the fallthrough block is unreachable.
692 if (FallthroughUnreachable)
693 JTH->OmitRangeCheck = true;
695 if (!JTH->OmitRangeCheck)
696 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
697 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
698 CurMBB->normalizeSuccProbs();
700 // The jump table header will be inserted in our current block, do the
701 // range check, and fall through to our fallthrough block.
702 JTH->HeaderBB = CurMBB;
703 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
705 // If we're in the right place, emit the jump table header right now.
706 if (CurMBB == SwitchMBB) {
707 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
708 return false;
709 JTH->Emitted = true;
711 return true;
713 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
714 Value *Cond,
715 MachineBasicBlock *Fallthrough,
716 bool FallthroughUnreachable,
717 BranchProbability UnhandledProbs,
718 MachineBasicBlock *CurMBB,
719 MachineIRBuilder &MIB,
720 MachineBasicBlock *SwitchMBB) {
721 using namespace SwitchCG;
722 const Value *RHS, *LHS, *MHS;
723 CmpInst::Predicate Pred;
724 if (I->Low == I->High) {
725 // Check Cond == I->Low.
726 Pred = CmpInst::ICMP_EQ;
727 LHS = Cond;
728 RHS = I->Low;
729 MHS = nullptr;
730 } else {
731 // Check I->Low <= Cond <= I->High.
732 Pred = CmpInst::ICMP_ULE;
733 LHS = I->Low;
734 MHS = Cond;
735 RHS = I->High;
738 // If Fallthrough is unreachable, fold away the comparison.
739 // The false probability is the sum of all unhandled cases.
740 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
741 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
743 emitSwitchCase(CB, SwitchMBB, MIB);
744 return true;
747 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
748 Value *Cond,
749 MachineBasicBlock *SwitchMBB,
750 MachineBasicBlock *DefaultMBB,
751 MachineIRBuilder &MIB) {
752 using namespace SwitchCG;
753 MachineFunction *CurMF = FuncInfo.MF;
754 MachineBasicBlock *NextMBB = nullptr;
755 MachineFunction::iterator BBI(W.MBB);
756 if (++BBI != FuncInfo.MF->end())
757 NextMBB = &*BBI;
759 if (EnableOpts) {
760 // Here, we order cases by probability so the most likely case will be
761 // checked first. However, two clusters can have the same probability in
762 // which case their relative ordering is non-deterministic. So we use Low
763 // as a tie-breaker as clusters are guaranteed to never overlap.
764 llvm::sort(W.FirstCluster, W.LastCluster + 1,
765 [](const CaseCluster &a, const CaseCluster &b) {
766 return a.Prob != b.Prob
767 ? a.Prob > b.Prob
768 : a.Low->getValue().slt(b.Low->getValue());
771 // Rearrange the case blocks so that the last one falls through if possible
772 // without changing the order of probabilities.
773 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
774 --I;
775 if (I->Prob > W.LastCluster->Prob)
776 break;
777 if (I->Kind == CC_Range && I->MBB == NextMBB) {
778 std::swap(*I, *W.LastCluster);
779 break;
784 // Compute total probability.
785 BranchProbability DefaultProb = W.DefaultProb;
786 BranchProbability UnhandledProbs = DefaultProb;
787 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
788 UnhandledProbs += I->Prob;
790 MachineBasicBlock *CurMBB = W.MBB;
791 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
792 bool FallthroughUnreachable = false;
793 MachineBasicBlock *Fallthrough;
794 if (I == W.LastCluster) {
795 // For the last cluster, fall through to the default destination.
796 Fallthrough = DefaultMBB;
797 FallthroughUnreachable = isa<UnreachableInst>(
798 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
799 } else {
800 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
801 CurMF->insert(BBI, Fallthrough);
803 UnhandledProbs -= I->Prob;
805 switch (I->Kind) {
806 case CC_BitTests: {
807 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
808 return false; // Bit tests currently unimplemented.
810 case CC_JumpTable: {
811 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
812 UnhandledProbs, I, Fallthrough,
813 FallthroughUnreachable)) {
814 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
815 return false;
817 break;
819 case CC_Range: {
820 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
821 FallthroughUnreachable, UnhandledProbs,
822 CurMBB, MIB, SwitchMBB)) {
823 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
824 return false;
826 break;
829 CurMBB = Fallthrough;
832 return true;
835 bool IRTranslator::translateIndirectBr(const User &U,
836 MachineIRBuilder &MIRBuilder) {
837 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
839 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
840 MIRBuilder.buildBrIndirect(Tgt);
842 // Link successors.
843 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
844 for (const BasicBlock *Succ : successors(&BrInst))
845 CurBB.addSuccessor(&getMBB(*Succ));
847 return true;
850 static bool isSwiftError(const Value *V) {
851 if (auto Arg = dyn_cast<Argument>(V))
852 return Arg->hasSwiftErrorAttr();
853 if (auto AI = dyn_cast<AllocaInst>(V))
854 return AI->isSwiftError();
855 return false;
858 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
859 const LoadInst &LI = cast<LoadInst>(U);
861 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
862 : MachineMemOperand::MONone;
863 Flags |= MachineMemOperand::MOLoad;
865 if (DL->getTypeStoreSize(LI.getType()) == 0)
866 return true;
868 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
869 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
870 Register Base = getOrCreateVReg(*LI.getPointerOperand());
872 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
873 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
875 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
876 assert(Regs.size() == 1 && "swifterror should be single pointer");
877 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
878 LI.getPointerOperand());
879 MIRBuilder.buildCopy(Regs[0], VReg);
880 return true;
883 const MDNode *Ranges =
884 Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
885 for (unsigned i = 0; i < Regs.size(); ++i) {
886 Register Addr;
887 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
889 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
890 unsigned BaseAlign = getMemOpAlignment(LI);
891 auto MMO = MF->getMachineMemOperand(
892 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
893 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), Ranges,
894 LI.getSyncScopeID(), LI.getOrdering());
895 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
898 return true;
901 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
902 const StoreInst &SI = cast<StoreInst>(U);
903 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
904 : MachineMemOperand::MONone;
905 Flags |= MachineMemOperand::MOStore;
907 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
908 return true;
910 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
911 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
912 Register Base = getOrCreateVReg(*SI.getPointerOperand());
914 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
915 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
917 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
918 assert(Vals.size() == 1 && "swifterror should be single pointer");
920 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
921 SI.getPointerOperand());
922 MIRBuilder.buildCopy(VReg, Vals[0]);
923 return true;
926 for (unsigned i = 0; i < Vals.size(); ++i) {
927 Register Addr;
928 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
930 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
931 unsigned BaseAlign = getMemOpAlignment(SI);
932 auto MMO = MF->getMachineMemOperand(
933 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
934 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
935 SI.getSyncScopeID(), SI.getOrdering());
936 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
938 return true;
941 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
942 const Value *Src = U.getOperand(0);
943 Type *Int32Ty = Type::getInt32Ty(U.getContext());
945 // getIndexedOffsetInType is designed for GEPs, so the first index is the
946 // usual array element rather than looking into the actual aggregate.
947 SmallVector<Value *, 1> Indices;
948 Indices.push_back(ConstantInt::get(Int32Ty, 0));
950 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
951 for (auto Idx : EVI->indices())
952 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
953 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
954 for (auto Idx : IVI->indices())
955 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
956 } else {
957 for (unsigned i = 1; i < U.getNumOperands(); ++i)
958 Indices.push_back(U.getOperand(i));
961 return 8 * static_cast<uint64_t>(
962 DL.getIndexedOffsetInType(Src->getType(), Indices));
965 bool IRTranslator::translateExtractValue(const User &U,
966 MachineIRBuilder &MIRBuilder) {
967 const Value *Src = U.getOperand(0);
968 uint64_t Offset = getOffsetFromIndices(U, *DL);
969 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
970 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
971 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
972 auto &DstRegs = allocateVRegs(U);
974 for (unsigned i = 0; i < DstRegs.size(); ++i)
975 DstRegs[i] = SrcRegs[Idx++];
977 return true;
980 bool IRTranslator::translateInsertValue(const User &U,
981 MachineIRBuilder &MIRBuilder) {
982 const Value *Src = U.getOperand(0);
983 uint64_t Offset = getOffsetFromIndices(U, *DL);
984 auto &DstRegs = allocateVRegs(U);
985 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
986 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
987 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
988 auto InsertedIt = InsertedRegs.begin();
990 for (unsigned i = 0; i < DstRegs.size(); ++i) {
991 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
992 DstRegs[i] = *InsertedIt++;
993 else
994 DstRegs[i] = SrcRegs[i];
997 return true;
1000 bool IRTranslator::translateSelect(const User &U,
1001 MachineIRBuilder &MIRBuilder) {
1002 Register Tst = getOrCreateVReg(*U.getOperand(0));
1003 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1004 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1005 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1007 const SelectInst &SI = cast<SelectInst>(U);
1008 uint16_t Flags = 0;
1009 if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
1010 Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
1012 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1013 MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
1014 {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
1017 return true;
1020 bool IRTranslator::translateBitCast(const User &U,
1021 MachineIRBuilder &MIRBuilder) {
1022 // If we're bitcasting to the source type, we can reuse the source vreg.
1023 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1024 getLLTForType(*U.getType(), *DL)) {
1025 Register SrcReg = getOrCreateVReg(*U.getOperand(0));
1026 auto &Regs = *VMap.getVRegs(U);
1027 // If we already assigned a vreg for this bitcast, we can't change that.
1028 // Emit a copy to satisfy the users we already emitted.
1029 if (!Regs.empty())
1030 MIRBuilder.buildCopy(Regs[0], SrcReg);
1031 else {
1032 Regs.push_back(SrcReg);
1033 VMap.getOffsets(U)->push_back(0);
1035 return true;
1037 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1040 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1041 MachineIRBuilder &MIRBuilder) {
1042 Register Op = getOrCreateVReg(*U.getOperand(0));
1043 Register Res = getOrCreateVReg(U);
1044 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1045 return true;
1048 bool IRTranslator::translateGetElementPtr(const User &U,
1049 MachineIRBuilder &MIRBuilder) {
1050 // FIXME: support vector GEPs.
1051 if (U.getType()->isVectorTy())
1052 return false;
1054 Value &Op0 = *U.getOperand(0);
1055 Register BaseReg = getOrCreateVReg(Op0);
1056 Type *PtrIRTy = Op0.getType();
1057 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1058 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1059 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1061 int64_t Offset = 0;
1062 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1063 GTI != E; ++GTI) {
1064 const Value *Idx = GTI.getOperand();
1065 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1066 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1067 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1068 continue;
1069 } else {
1070 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1072 // If this is a scalar constant or a splat vector of constants,
1073 // handle it quickly.
1074 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1075 Offset += ElementSize * CI->getSExtValue();
1076 continue;
1079 if (Offset != 0) {
1080 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1081 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1082 BaseReg =
1083 MIRBuilder.buildGEP(PtrTy, BaseReg, OffsetMIB.getReg(0)).getReg(0);
1084 Offset = 0;
1087 Register IdxReg = getOrCreateVReg(*Idx);
1088 if (MRI->getType(IdxReg) != OffsetTy)
1089 IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1091 // N = N + Idx * ElementSize;
1092 // Avoid doing it for ElementSize of 1.
1093 Register GepOffsetReg;
1094 if (ElementSize != 1) {
1095 auto ElementSizeMIB = MIRBuilder.buildConstant(
1096 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1097 GepOffsetReg =
1098 MIRBuilder.buildMul(OffsetTy, ElementSizeMIB, IdxReg).getReg(0);
1099 } else
1100 GepOffsetReg = IdxReg;
1102 BaseReg = MIRBuilder.buildGEP(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1106 if (Offset != 0) {
1107 auto OffsetMIB =
1108 MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
1109 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1110 return true;
1113 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1114 return true;
1117 bool IRTranslator::translateMemFunc(const CallInst &CI,
1118 MachineIRBuilder &MIRBuilder,
1119 Intrinsic::ID ID) {
1121 // If the source is undef, then just emit a nop.
1122 if (isa<UndefValue>(CI.getArgOperand(1)))
1123 return true;
1125 ArrayRef<Register> Res;
1126 auto ICall = MIRBuilder.buildIntrinsic(ID, Res, true);
1127 for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI)
1128 ICall.addUse(getOrCreateVReg(**AI));
1130 unsigned DstAlign = 0, SrcAlign = 0;
1131 unsigned IsVol =
1132 cast<ConstantInt>(CI.getArgOperand(CI.getNumArgOperands() - 1))
1133 ->getZExtValue();
1135 if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1136 DstAlign = std::max<unsigned>(MCI->getDestAlignment(), 1);
1137 SrcAlign = std::max<unsigned>(MCI->getSourceAlignment(), 1);
1138 } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1139 DstAlign = std::max<unsigned>(MMI->getDestAlignment(), 1);
1140 SrcAlign = std::max<unsigned>(MMI->getSourceAlignment(), 1);
1141 } else {
1142 auto *MSI = cast<MemSetInst>(&CI);
1143 DstAlign = std::max<unsigned>(MSI->getDestAlignment(), 1);
1146 // Create mem operands to store the alignment and volatile info.
1147 auto VolFlag = IsVol ? MachineMemOperand::MOVolatile : MachineMemOperand::MONone;
1148 ICall.addMemOperand(MF->getMachineMemOperand(
1149 MachinePointerInfo(CI.getArgOperand(0)),
1150 MachineMemOperand::MOStore | VolFlag, 1, DstAlign));
1151 if (ID != Intrinsic::memset)
1152 ICall.addMemOperand(MF->getMachineMemOperand(
1153 MachinePointerInfo(CI.getArgOperand(1)),
1154 MachineMemOperand::MOLoad | VolFlag, 1, SrcAlign));
1156 return true;
1159 void IRTranslator::getStackGuard(Register DstReg,
1160 MachineIRBuilder &MIRBuilder) {
1161 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1162 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1163 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
1164 MIB.addDef(DstReg);
1166 auto &TLI = *MF->getSubtarget().getTargetLowering();
1167 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1168 if (!Global)
1169 return;
1171 MachinePointerInfo MPInfo(Global);
1172 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1173 MachineMemOperand::MODereferenceable;
1174 MachineMemOperand *MemRef =
1175 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1176 DL->getPointerABIAlignment(0).value());
1177 MIB.setMemRefs({MemRef});
1180 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1181 MachineIRBuilder &MIRBuilder) {
1182 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1183 MIRBuilder.buildInstr(Op)
1184 .addDef(ResRegs[0])
1185 .addDef(ResRegs[1])
1186 .addUse(getOrCreateVReg(*CI.getOperand(0)))
1187 .addUse(getOrCreateVReg(*CI.getOperand(1)));
1189 return true;
1192 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1193 switch (ID) {
1194 default:
1195 break;
1196 case Intrinsic::bswap:
1197 return TargetOpcode::G_BSWAP;
1198 case Intrinsic::bitreverse:
1199 return TargetOpcode::G_BITREVERSE;
1200 case Intrinsic::ceil:
1201 return TargetOpcode::G_FCEIL;
1202 case Intrinsic::cos:
1203 return TargetOpcode::G_FCOS;
1204 case Intrinsic::ctpop:
1205 return TargetOpcode::G_CTPOP;
1206 case Intrinsic::exp:
1207 return TargetOpcode::G_FEXP;
1208 case Intrinsic::exp2:
1209 return TargetOpcode::G_FEXP2;
1210 case Intrinsic::fabs:
1211 return TargetOpcode::G_FABS;
1212 case Intrinsic::copysign:
1213 return TargetOpcode::G_FCOPYSIGN;
1214 case Intrinsic::minnum:
1215 return TargetOpcode::G_FMINNUM;
1216 case Intrinsic::maxnum:
1217 return TargetOpcode::G_FMAXNUM;
1218 case Intrinsic::minimum:
1219 return TargetOpcode::G_FMINIMUM;
1220 case Intrinsic::maximum:
1221 return TargetOpcode::G_FMAXIMUM;
1222 case Intrinsic::canonicalize:
1223 return TargetOpcode::G_FCANONICALIZE;
1224 case Intrinsic::floor:
1225 return TargetOpcode::G_FFLOOR;
1226 case Intrinsic::fma:
1227 return TargetOpcode::G_FMA;
1228 case Intrinsic::log:
1229 return TargetOpcode::G_FLOG;
1230 case Intrinsic::log2:
1231 return TargetOpcode::G_FLOG2;
1232 case Intrinsic::log10:
1233 return TargetOpcode::G_FLOG10;
1234 case Intrinsic::nearbyint:
1235 return TargetOpcode::G_FNEARBYINT;
1236 case Intrinsic::pow:
1237 return TargetOpcode::G_FPOW;
1238 case Intrinsic::rint:
1239 return TargetOpcode::G_FRINT;
1240 case Intrinsic::round:
1241 return TargetOpcode::G_INTRINSIC_ROUND;
1242 case Intrinsic::sin:
1243 return TargetOpcode::G_FSIN;
1244 case Intrinsic::sqrt:
1245 return TargetOpcode::G_FSQRT;
1246 case Intrinsic::trunc:
1247 return TargetOpcode::G_INTRINSIC_TRUNC;
1249 return Intrinsic::not_intrinsic;
1252 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1253 Intrinsic::ID ID,
1254 MachineIRBuilder &MIRBuilder) {
1256 unsigned Op = getSimpleIntrinsicOpcode(ID);
1258 // Is this a simple intrinsic?
1259 if (Op == Intrinsic::not_intrinsic)
1260 return false;
1262 // Yes. Let's translate it.
1263 SmallVector<llvm::SrcOp, 4> VRegs;
1264 for (auto &Arg : CI.arg_operands())
1265 VRegs.push_back(getOrCreateVReg(*Arg));
1267 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1268 MachineInstr::copyFlagsFromInstruction(CI));
1269 return true;
1272 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1273 MachineIRBuilder &MIRBuilder) {
1275 // If this is a simple intrinsic (that is, we just need to add a def of
1276 // a vreg, and uses for each arg operand, then translate it.
1277 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1278 return true;
1280 switch (ID) {
1281 default:
1282 break;
1283 case Intrinsic::lifetime_start:
1284 case Intrinsic::lifetime_end: {
1285 // No stack colouring in O0, discard region information.
1286 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1287 return true;
1289 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1290 : TargetOpcode::LIFETIME_END;
1292 // Get the underlying objects for the location passed on the lifetime
1293 // marker.
1294 SmallVector<const Value *, 4> Allocas;
1295 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1297 // Iterate over each underlying object, creating lifetime markers for each
1298 // static alloca. Quit if we find a non-static alloca.
1299 for (const Value *V : Allocas) {
1300 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1301 if (!AI)
1302 continue;
1304 if (!AI->isStaticAlloca())
1305 return true;
1307 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1309 return true;
1311 case Intrinsic::dbg_declare: {
1312 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1313 assert(DI.getVariable() && "Missing variable");
1315 const Value *Address = DI.getAddress();
1316 if (!Address || isa<UndefValue>(Address)) {
1317 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1318 return true;
1321 assert(DI.getVariable()->isValidLocationForIntrinsic(
1322 MIRBuilder.getDebugLoc()) &&
1323 "Expected inlined-at fields to agree");
1324 auto AI = dyn_cast<AllocaInst>(Address);
1325 if (AI && AI->isStaticAlloca()) {
1326 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1327 // instructions (in fact, they get ignored if they *do* exist).
1328 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1329 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1330 } else {
1331 // A dbg.declare describes the address of a source variable, so lower it
1332 // into an indirect DBG_VALUE.
1333 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1334 DI.getVariable(), DI.getExpression());
1336 return true;
1338 case Intrinsic::dbg_label: {
1339 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1340 assert(DI.getLabel() && "Missing label");
1342 assert(DI.getLabel()->isValidLocationForIntrinsic(
1343 MIRBuilder.getDebugLoc()) &&
1344 "Expected inlined-at fields to agree");
1346 MIRBuilder.buildDbgLabel(DI.getLabel());
1347 return true;
1349 case Intrinsic::vaend:
1350 // No target I know of cares about va_end. Certainly no in-tree target
1351 // does. Simplest intrinsic ever!
1352 return true;
1353 case Intrinsic::vastart: {
1354 auto &TLI = *MF->getSubtarget().getTargetLowering();
1355 Value *Ptr = CI.getArgOperand(0);
1356 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1358 // FIXME: Get alignment
1359 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
1360 .addUse(getOrCreateVReg(*Ptr))
1361 .addMemOperand(MF->getMachineMemOperand(
1362 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
1363 return true;
1365 case Intrinsic::dbg_value: {
1366 // This form of DBG_VALUE is target-independent.
1367 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1368 const Value *V = DI.getValue();
1369 assert(DI.getVariable()->isValidLocationForIntrinsic(
1370 MIRBuilder.getDebugLoc()) &&
1371 "Expected inlined-at fields to agree");
1372 if (!V) {
1373 // Currently the optimizer can produce this; insert an undef to
1374 // help debugging. Probably the optimizer should not do this.
1375 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1376 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1377 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1378 } else {
1379 for (Register Reg : getOrCreateVRegs(*V)) {
1380 // FIXME: This does not handle register-indirect values at offset 0. The
1381 // direct/indirect thing shouldn't really be handled by something as
1382 // implicit as reg+noreg vs reg+imm in the first place, but it seems
1383 // pretty baked in right now.
1384 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1387 return true;
1389 case Intrinsic::uadd_with_overflow:
1390 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1391 case Intrinsic::sadd_with_overflow:
1392 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1393 case Intrinsic::usub_with_overflow:
1394 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1395 case Intrinsic::ssub_with_overflow:
1396 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1397 case Intrinsic::umul_with_overflow:
1398 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1399 case Intrinsic::smul_with_overflow:
1400 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1401 case Intrinsic::fmuladd: {
1402 const TargetMachine &TM = MF->getTarget();
1403 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1404 Register Dst = getOrCreateVReg(CI);
1405 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1406 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1407 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1408 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1409 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
1410 // TODO: Revisit this to see if we should move this part of the
1411 // lowering to the combiner.
1412 MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
1413 MachineInstr::copyFlagsFromInstruction(CI));
1414 } else {
1415 LLT Ty = getLLTForType(*CI.getType(), *DL);
1416 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1417 MachineInstr::copyFlagsFromInstruction(CI));
1418 MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1419 MachineInstr::copyFlagsFromInstruction(CI));
1421 return true;
1423 case Intrinsic::memcpy:
1424 case Intrinsic::memmove:
1425 case Intrinsic::memset:
1426 return translateMemFunc(CI, MIRBuilder, ID);
1427 case Intrinsic::eh_typeid_for: {
1428 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1429 Register Reg = getOrCreateVReg(CI);
1430 unsigned TypeID = MF->getTypeIDFor(GV);
1431 MIRBuilder.buildConstant(Reg, TypeID);
1432 return true;
1434 case Intrinsic::objectsize: {
1435 // If we don't know by now, we're never going to know.
1436 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
1438 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1439 return true;
1441 case Intrinsic::is_constant:
1442 // If this wasn't constant-folded away by now, then it's not a
1443 // constant.
1444 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1445 return true;
1446 case Intrinsic::stackguard:
1447 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1448 return true;
1449 case Intrinsic::stackprotector: {
1450 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1451 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1452 getStackGuard(GuardVal, MIRBuilder);
1454 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1455 int FI = getOrCreateFrameIndex(*Slot);
1456 MF->getFrameInfo().setStackProtectorIndex(FI);
1458 MIRBuilder.buildStore(
1459 GuardVal, getOrCreateVReg(*Slot),
1460 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1461 MachineMemOperand::MOStore |
1462 MachineMemOperand::MOVolatile,
1463 PtrTy.getSizeInBits() / 8, 8));
1464 return true;
1466 case Intrinsic::stacksave: {
1467 // Save the stack pointer to the location provided by the intrinsic.
1468 Register Reg = getOrCreateVReg(CI);
1469 Register StackPtr = MF->getSubtarget()
1470 .getTargetLowering()
1471 ->getStackPointerRegisterToSaveRestore();
1473 // If the target doesn't specify a stack pointer, then fall back.
1474 if (!StackPtr)
1475 return false;
1477 MIRBuilder.buildCopy(Reg, StackPtr);
1478 return true;
1480 case Intrinsic::stackrestore: {
1481 // Restore the stack pointer from the location provided by the intrinsic.
1482 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1483 Register StackPtr = MF->getSubtarget()
1484 .getTargetLowering()
1485 ->getStackPointerRegisterToSaveRestore();
1487 // If the target doesn't specify a stack pointer, then fall back.
1488 if (!StackPtr)
1489 return false;
1491 MIRBuilder.buildCopy(StackPtr, Reg);
1492 return true;
1494 case Intrinsic::cttz:
1495 case Intrinsic::ctlz: {
1496 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1497 bool isTrailing = ID == Intrinsic::cttz;
1498 unsigned Opcode = isTrailing
1499 ? Cst->isZero() ? TargetOpcode::G_CTTZ
1500 : TargetOpcode::G_CTTZ_ZERO_UNDEF
1501 : Cst->isZero() ? TargetOpcode::G_CTLZ
1502 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1503 MIRBuilder.buildInstr(Opcode)
1504 .addDef(getOrCreateVReg(CI))
1505 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1506 return true;
1508 case Intrinsic::invariant_start: {
1509 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1510 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1511 MIRBuilder.buildUndef(Undef);
1512 return true;
1514 case Intrinsic::invariant_end:
1515 return true;
1516 case Intrinsic::assume:
1517 case Intrinsic::var_annotation:
1518 case Intrinsic::sideeffect:
1519 // Discard annotate attributes, assumptions, and artificial side-effects.
1520 return true;
1522 return false;
1525 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1526 MachineIRBuilder &MIRBuilder) {
1527 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1528 if (!IA.getConstraintString().empty())
1529 return false;
1531 unsigned ExtraInfo = 0;
1532 if (IA.hasSideEffects())
1533 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1534 if (IA.getDialect() == InlineAsm::AD_Intel)
1535 ExtraInfo |= InlineAsm::Extra_AsmDialect;
1537 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1538 .addExternalSymbol(IA.getAsmString().c_str())
1539 .addImm(ExtraInfo);
1541 return true;
1544 bool IRTranslator::translateCallSite(const ImmutableCallSite &CS,
1545 MachineIRBuilder &MIRBuilder) {
1546 const Instruction &I = *CS.getInstruction();
1547 ArrayRef<Register> Res = getOrCreateVRegs(I);
1549 SmallVector<ArrayRef<Register>, 8> Args;
1550 Register SwiftInVReg = 0;
1551 Register SwiftErrorVReg = 0;
1552 for (auto &Arg : CS.args()) {
1553 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1554 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1555 LLT Ty = getLLTForType(*Arg->getType(), *DL);
1556 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1557 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1558 &I, &MIRBuilder.getMBB(), Arg));
1559 Args.emplace_back(makeArrayRef(SwiftInVReg));
1560 SwiftErrorVReg =
1561 SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
1562 continue;
1564 Args.push_back(getOrCreateVRegs(*Arg));
1567 // We don't set HasCalls on MFI here yet because call lowering may decide to
1568 // optimize into tail calls. Instead, we defer that to selection where a final
1569 // scan is done to check if any instructions are calls.
1570 bool Success =
1571 CLI->lowerCall(MIRBuilder, CS, Res, Args, SwiftErrorVReg,
1572 [&]() { return getOrCreateVReg(*CS.getCalledValue()); });
1574 // Check if we just inserted a tail call.
1575 if (Success) {
1576 assert(!HasTailCall && "Can't tail call return twice from block?");
1577 const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
1578 HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
1581 return Success;
1584 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1585 const CallInst &CI = cast<CallInst>(U);
1586 auto TII = MF->getTarget().getIntrinsicInfo();
1587 const Function *F = CI.getCalledFunction();
1589 // FIXME: support Windows dllimport function calls.
1590 if (F && F->hasDLLImportStorageClass())
1591 return false;
1593 if (CI.isInlineAsm())
1594 return translateInlineAsm(CI, MIRBuilder);
1596 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1597 if (F && F->isIntrinsic()) {
1598 ID = F->getIntrinsicID();
1599 if (TII && ID == Intrinsic::not_intrinsic)
1600 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1603 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
1604 return translateCallSite(&CI, MIRBuilder);
1606 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1608 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1609 return true;
1611 ArrayRef<Register> ResultRegs;
1612 if (!CI.getType()->isVoidTy())
1613 ResultRegs = getOrCreateVRegs(CI);
1615 // Ignore the callsite attributes. Backend code is most likely not expecting
1616 // an intrinsic to sometimes have side effects and sometimes not.
1617 MachineInstrBuilder MIB =
1618 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1619 if (isa<FPMathOperator>(CI))
1620 MIB->copyIRFlags(CI);
1622 for (auto &Arg : enumerate(CI.arg_operands())) {
1623 // Some intrinsics take metadata parameters. Reject them.
1624 if (isa<MetadataAsValue>(Arg.value()))
1625 return false;
1627 // If this is required to be an immediate, don't materialize it in a
1628 // register.
1629 if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
1630 if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
1631 // imm arguments are more convenient than cimm (and realistically
1632 // probably sufficient), so use them.
1633 assert(CI->getBitWidth() <= 64 &&
1634 "large intrinsic immediates not handled");
1635 MIB.addImm(CI->getSExtValue());
1636 } else {
1637 MIB.addFPImm(cast<ConstantFP>(Arg.value()));
1639 } else {
1640 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
1641 if (VRegs.size() > 1)
1642 return false;
1643 MIB.addUse(VRegs[0]);
1647 // Add a MachineMemOperand if it is a target mem intrinsic.
1648 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1649 TargetLowering::IntrinsicInfo Info;
1650 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1651 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1652 MaybeAlign Align = Info.align;
1653 if (!Align)
1654 Align = MaybeAlign(
1655 DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext())));
1657 uint64_t Size = Info.memVT.getStoreSize();
1658 MIB.addMemOperand(MF->getMachineMemOperand(
1659 MachinePointerInfo(Info.ptrVal), Info.flags, Size, Align->value()));
1662 return true;
1665 bool IRTranslator::translateInvoke(const User &U,
1666 MachineIRBuilder &MIRBuilder) {
1667 const InvokeInst &I = cast<InvokeInst>(U);
1668 MCContext &Context = MF->getContext();
1670 const BasicBlock *ReturnBB = I.getSuccessor(0);
1671 const BasicBlock *EHPadBB = I.getSuccessor(1);
1673 const Value *Callee = I.getCalledValue();
1674 const Function *Fn = dyn_cast<Function>(Callee);
1675 if (isa<InlineAsm>(Callee))
1676 return false;
1678 // FIXME: support invoking patchpoint and statepoint intrinsics.
1679 if (Fn && Fn->isIntrinsic())
1680 return false;
1682 // FIXME: support whatever these are.
1683 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1684 return false;
1686 // FIXME: support Windows exception handling.
1687 if (!isa<LandingPadInst>(EHPadBB->front()))
1688 return false;
1690 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1691 // the region covered by the try.
1692 MCSymbol *BeginSymbol = Context.createTempSymbol();
1693 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1695 if (!translateCallSite(&I, MIRBuilder))
1696 return false;
1698 MCSymbol *EndSymbol = Context.createTempSymbol();
1699 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1701 // FIXME: track probabilities.
1702 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1703 &ReturnMBB = getMBB(*ReturnBB);
1704 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1705 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1706 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1707 MIRBuilder.buildBr(ReturnMBB);
1709 return true;
1712 bool IRTranslator::translateCallBr(const User &U,
1713 MachineIRBuilder &MIRBuilder) {
1714 // FIXME: Implement this.
1715 return false;
1718 bool IRTranslator::translateLandingPad(const User &U,
1719 MachineIRBuilder &MIRBuilder) {
1720 const LandingPadInst &LP = cast<LandingPadInst>(U);
1722 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1724 MBB.setIsEHPad();
1726 // If there aren't registers to copy the values into (e.g., during SjLj
1727 // exceptions), then don't bother.
1728 auto &TLI = *MF->getSubtarget().getTargetLowering();
1729 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1730 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1731 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1732 return true;
1734 // If landingpad's return type is token type, we don't create DAG nodes
1735 // for its exception pointer and selector value. The extraction of exception
1736 // pointer or selector value from token type landingpads is not currently
1737 // supported.
1738 if (LP.getType()->isTokenTy())
1739 return true;
1741 // Add a label to mark the beginning of the landing pad. Deletion of the
1742 // landing pad can thus be detected via the MachineModuleInfo.
1743 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1744 .addSym(MF->addLandingPad(&MBB));
1746 LLT Ty = getLLTForType(*LP.getType(), *DL);
1747 Register Undef = MRI->createGenericVirtualRegister(Ty);
1748 MIRBuilder.buildUndef(Undef);
1750 SmallVector<LLT, 2> Tys;
1751 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1752 Tys.push_back(getLLTForType(*Ty, *DL));
1753 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1755 // Mark exception register as live in.
1756 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1757 if (!ExceptionReg)
1758 return false;
1760 MBB.addLiveIn(ExceptionReg);
1761 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1762 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1764 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1765 if (!SelectorReg)
1766 return false;
1768 MBB.addLiveIn(SelectorReg);
1769 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1770 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1771 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1773 return true;
1776 bool IRTranslator::translateAlloca(const User &U,
1777 MachineIRBuilder &MIRBuilder) {
1778 auto &AI = cast<AllocaInst>(U);
1780 if (AI.isSwiftError())
1781 return true;
1783 if (AI.isStaticAlloca()) {
1784 Register Res = getOrCreateVReg(AI);
1785 int FI = getOrCreateFrameIndex(AI);
1786 MIRBuilder.buildFrameIndex(Res, FI);
1787 return true;
1790 // FIXME: support stack probing for Windows.
1791 if (MF->getTarget().getTargetTriple().isOSWindows())
1792 return false;
1794 // Now we're in the harder dynamic case.
1795 Type *Ty = AI.getAllocatedType();
1796 unsigned Align =
1797 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1799 Register NumElts = getOrCreateVReg(*AI.getArraySize());
1801 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1802 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1803 if (MRI->getType(NumElts) != IntPtrTy) {
1804 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1805 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1806 NumElts = ExtElts;
1809 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1810 Register TySize =
1811 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
1812 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1814 unsigned StackAlign =
1815 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1816 if (Align <= StackAlign)
1817 Align = 0;
1819 // Round the size of the allocation up to the stack alignment size
1820 // by add SA-1 to the size. This doesn't overflow because we're computing
1821 // an address inside an alloca.
1822 auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign - 1);
1823 auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
1824 MachineInstr::NoUWrap);
1825 auto AlignCst =
1826 MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign - 1));
1827 auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
1829 MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Align);
1831 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1832 assert(MF->getFrameInfo().hasVarSizedObjects());
1833 return true;
1836 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1837 // FIXME: We may need more info about the type. Because of how LLT works,
1838 // we're completely discarding the i64/double distinction here (amongst
1839 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1840 // anyway but that's not guaranteed.
1841 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1842 .addDef(getOrCreateVReg(U))
1843 .addUse(getOrCreateVReg(*U.getOperand(0)))
1844 .addImm(DL->getABITypeAlignment(U.getType()));
1845 return true;
1848 bool IRTranslator::translateInsertElement(const User &U,
1849 MachineIRBuilder &MIRBuilder) {
1850 // If it is a <1 x Ty> vector, use the scalar as it is
1851 // not a legal vector type in LLT.
1852 if (U.getType()->getVectorNumElements() == 1) {
1853 Register Elt = getOrCreateVReg(*U.getOperand(1));
1854 auto &Regs = *VMap.getVRegs(U);
1855 if (Regs.empty()) {
1856 Regs.push_back(Elt);
1857 VMap.getOffsets(U)->push_back(0);
1858 } else {
1859 MIRBuilder.buildCopy(Regs[0], Elt);
1861 return true;
1864 Register Res = getOrCreateVReg(U);
1865 Register Val = getOrCreateVReg(*U.getOperand(0));
1866 Register Elt = getOrCreateVReg(*U.getOperand(1));
1867 Register Idx = getOrCreateVReg(*U.getOperand(2));
1868 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1869 return true;
1872 bool IRTranslator::translateExtractElement(const User &U,
1873 MachineIRBuilder &MIRBuilder) {
1874 // If it is a <1 x Ty> vector, use the scalar as it is
1875 // not a legal vector type in LLT.
1876 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1877 Register Elt = getOrCreateVReg(*U.getOperand(0));
1878 auto &Regs = *VMap.getVRegs(U);
1879 if (Regs.empty()) {
1880 Regs.push_back(Elt);
1881 VMap.getOffsets(U)->push_back(0);
1882 } else {
1883 MIRBuilder.buildCopy(Regs[0], Elt);
1885 return true;
1887 Register Res = getOrCreateVReg(U);
1888 Register Val = getOrCreateVReg(*U.getOperand(0));
1889 const auto &TLI = *MF->getSubtarget().getTargetLowering();
1890 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1891 Register Idx;
1892 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1893 if (CI->getBitWidth() != PreferredVecIdxWidth) {
1894 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1895 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1896 Idx = getOrCreateVReg(*NewIdxCI);
1899 if (!Idx)
1900 Idx = getOrCreateVReg(*U.getOperand(1));
1901 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1902 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1903 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1905 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1906 return true;
1909 bool IRTranslator::translateShuffleVector(const User &U,
1910 MachineIRBuilder &MIRBuilder) {
1911 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1912 .addDef(getOrCreateVReg(U))
1913 .addUse(getOrCreateVReg(*U.getOperand(0)))
1914 .addUse(getOrCreateVReg(*U.getOperand(1)))
1915 .addShuffleMask(cast<Constant>(U.getOperand(2)));
1916 return true;
1919 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1920 const PHINode &PI = cast<PHINode>(U);
1922 SmallVector<MachineInstr *, 4> Insts;
1923 for (auto Reg : getOrCreateVRegs(PI)) {
1924 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1925 Insts.push_back(MIB.getInstr());
1928 PendingPHIs.emplace_back(&PI, std::move(Insts));
1929 return true;
1932 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1933 MachineIRBuilder &MIRBuilder) {
1934 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1936 if (I.isWeak())
1937 return false;
1939 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1940 : MachineMemOperand::MONone;
1941 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1943 Type *ResType = I.getType();
1944 Type *ValType = ResType->Type::getStructElementType(0);
1946 auto Res = getOrCreateVRegs(I);
1947 Register OldValRes = Res[0];
1948 Register SuccessRes = Res[1];
1949 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1950 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
1951 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
1953 MIRBuilder.buildAtomicCmpXchgWithSuccess(
1954 OldValRes, SuccessRes, Addr, Cmp, NewVal,
1955 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1956 Flags, DL->getTypeStoreSize(ValType),
1957 getMemOpAlignment(I), AAMDNodes(), nullptr,
1958 I.getSyncScopeID(), I.getSuccessOrdering(),
1959 I.getFailureOrdering()));
1960 return true;
1963 bool IRTranslator::translateAtomicRMW(const User &U,
1964 MachineIRBuilder &MIRBuilder) {
1965 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1967 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1968 : MachineMemOperand::MONone;
1969 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1971 Type *ResType = I.getType();
1973 Register Res = getOrCreateVReg(I);
1974 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1975 Register Val = getOrCreateVReg(*I.getValOperand());
1977 unsigned Opcode = 0;
1978 switch (I.getOperation()) {
1979 default:
1980 return false;
1981 case AtomicRMWInst::Xchg:
1982 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1983 break;
1984 case AtomicRMWInst::Add:
1985 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
1986 break;
1987 case AtomicRMWInst::Sub:
1988 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
1989 break;
1990 case AtomicRMWInst::And:
1991 Opcode = TargetOpcode::G_ATOMICRMW_AND;
1992 break;
1993 case AtomicRMWInst::Nand:
1994 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
1995 break;
1996 case AtomicRMWInst::Or:
1997 Opcode = TargetOpcode::G_ATOMICRMW_OR;
1998 break;
1999 case AtomicRMWInst::Xor:
2000 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2001 break;
2002 case AtomicRMWInst::Max:
2003 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2004 break;
2005 case AtomicRMWInst::Min:
2006 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2007 break;
2008 case AtomicRMWInst::UMax:
2009 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2010 break;
2011 case AtomicRMWInst::UMin:
2012 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2013 break;
2014 case AtomicRMWInst::FAdd:
2015 Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2016 break;
2017 case AtomicRMWInst::FSub:
2018 Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2019 break;
2022 MIRBuilder.buildAtomicRMW(
2023 Opcode, Res, Addr, Val,
2024 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2025 Flags, DL->getTypeStoreSize(ResType),
2026 getMemOpAlignment(I), AAMDNodes(), nullptr,
2027 I.getSyncScopeID(), I.getOrdering()));
2028 return true;
2031 bool IRTranslator::translateFence(const User &U,
2032 MachineIRBuilder &MIRBuilder) {
2033 const FenceInst &Fence = cast<FenceInst>(U);
2034 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2035 Fence.getSyncScopeID());
2036 return true;
2039 void IRTranslator::finishPendingPhis() {
2040 #ifndef NDEBUG
2041 DILocationVerifier Verifier;
2042 GISelObserverWrapper WrapperObserver(&Verifier);
2043 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2044 #endif // ifndef NDEBUG
2045 for (auto &Phi : PendingPHIs) {
2046 const PHINode *PI = Phi.first;
2047 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2048 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2049 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2050 #ifndef NDEBUG
2051 Verifier.setCurrentInst(PI);
2052 #endif // ifndef NDEBUG
2054 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2055 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2056 auto IRPred = PI->getIncomingBlock(i);
2057 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2058 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2059 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2060 continue;
2061 SeenPreds.insert(Pred);
2062 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2063 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2064 MIB.addUse(ValRegs[j]);
2065 MIB.addMBB(Pred);
2072 bool IRTranslator::valueIsSplit(const Value &V,
2073 SmallVectorImpl<uint64_t> *Offsets) {
2074 SmallVector<LLT, 4> SplitTys;
2075 if (Offsets && !Offsets->empty())
2076 Offsets->clear();
2077 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2078 return SplitTys.size() > 1;
2081 bool IRTranslator::translate(const Instruction &Inst) {
2082 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2083 // We only emit constants into the entry block from here. To prevent jumpy
2084 // debug behaviour set the line to 0.
2085 if (const DebugLoc &DL = Inst.getDebugLoc())
2086 EntryBuilder->setDebugLoc(
2087 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2088 else
2089 EntryBuilder->setDebugLoc(DebugLoc());
2091 switch (Inst.getOpcode()) {
2092 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2093 case Instruction::OPCODE: \
2094 return translate##OPCODE(Inst, *CurBuilder.get());
2095 #include "llvm/IR/Instruction.def"
2096 default:
2097 return false;
2101 bool IRTranslator::translate(const Constant &C, Register Reg) {
2102 if (auto CI = dyn_cast<ConstantInt>(&C))
2103 EntryBuilder->buildConstant(Reg, *CI);
2104 else if (auto CF = dyn_cast<ConstantFP>(&C))
2105 EntryBuilder->buildFConstant(Reg, *CF);
2106 else if (isa<UndefValue>(C))
2107 EntryBuilder->buildUndef(Reg);
2108 else if (isa<ConstantPointerNull>(C)) {
2109 // As we are trying to build a constant val of 0 into a pointer,
2110 // insert a cast to make them correct with respect to types.
2111 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
2112 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
2113 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
2114 Register ZeroReg = getOrCreateVReg(*ZeroVal);
2115 EntryBuilder->buildCast(Reg, ZeroReg);
2116 } else if (auto GV = dyn_cast<GlobalValue>(&C))
2117 EntryBuilder->buildGlobalValue(Reg, GV);
2118 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2119 if (!CAZ->getType()->isVectorTy())
2120 return false;
2121 // Return the scalar if it is a <1 x Ty> vector.
2122 if (CAZ->getNumElements() == 1)
2123 return translate(*CAZ->getElementValue(0u), Reg);
2124 SmallVector<Register, 4> Ops;
2125 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2126 Constant &Elt = *CAZ->getElementValue(i);
2127 Ops.push_back(getOrCreateVReg(Elt));
2129 EntryBuilder->buildBuildVector(Reg, Ops);
2130 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2131 // Return the scalar if it is a <1 x Ty> vector.
2132 if (CV->getNumElements() == 1)
2133 return translate(*CV->getElementAsConstant(0), Reg);
2134 SmallVector<Register, 4> Ops;
2135 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2136 Constant &Elt = *CV->getElementAsConstant(i);
2137 Ops.push_back(getOrCreateVReg(Elt));
2139 EntryBuilder->buildBuildVector(Reg, Ops);
2140 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2141 switch(CE->getOpcode()) {
2142 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2143 case Instruction::OPCODE: \
2144 return translate##OPCODE(*CE, *EntryBuilder.get());
2145 #include "llvm/IR/Instruction.def"
2146 default:
2147 return false;
2149 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2150 if (CV->getNumOperands() == 1)
2151 return translate(*CV->getOperand(0), Reg);
2152 SmallVector<Register, 4> Ops;
2153 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2154 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2156 EntryBuilder->buildBuildVector(Reg, Ops);
2157 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2158 EntryBuilder->buildBlockAddress(Reg, BA);
2159 } else
2160 return false;
2162 return true;
2165 void IRTranslator::finalizeBasicBlock() {
2166 for (auto &JTCase : SL->JTCases) {
2167 // Emit header first, if it wasn't already emitted.
2168 if (!JTCase.first.Emitted)
2169 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2171 emitJumpTable(JTCase.second, JTCase.second.MBB);
2173 SL->JTCases.clear();
2176 void IRTranslator::finalizeFunction() {
2177 // Release the memory used by the different maps we
2178 // needed during the translation.
2179 PendingPHIs.clear();
2180 VMap.reset();
2181 FrameIndices.clear();
2182 MachinePreds.clear();
2183 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2184 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
2185 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2186 EntryBuilder.reset();
2187 CurBuilder.reset();
2188 FuncInfo.clear();
2191 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2192 MF = &CurMF;
2193 const Function &F = MF->getFunction();
2194 if (F.empty())
2195 return false;
2196 GISelCSEAnalysisWrapper &Wrapper =
2197 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2198 // Set the CSEConfig and run the analysis.
2199 GISelCSEInfo *CSEInfo = nullptr;
2200 TPC = &getAnalysis<TargetPassConfig>();
2201 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
2202 ? EnableCSEInIRTranslator
2203 : TPC->isGISelCSEEnabled();
2205 if (EnableCSE) {
2206 EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2207 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2208 EntryBuilder->setCSEInfo(CSEInfo);
2209 CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
2210 CurBuilder->setCSEInfo(CSEInfo);
2211 } else {
2212 EntryBuilder = std::make_unique<MachineIRBuilder>();
2213 CurBuilder = std::make_unique<MachineIRBuilder>();
2215 CLI = MF->getSubtarget().getCallLowering();
2216 CurBuilder->setMF(*MF);
2217 EntryBuilder->setMF(*MF);
2218 MRI = &MF->getRegInfo();
2219 DL = &F.getParent()->getDataLayout();
2220 ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
2221 FuncInfo.MF = MF;
2222 FuncInfo.BPI = nullptr;
2223 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2224 const TargetMachine &TM = MF->getTarget();
2225 SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
2226 SL->init(TLI, TM, *DL);
2228 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
2230 assert(PendingPHIs.empty() && "stale PHIs");
2232 if (!DL->isLittleEndian()) {
2233 // Currently we don't properly handle big endian code.
2234 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2235 F.getSubprogram(), &F.getEntryBlock());
2236 R << "unable to translate in big endian mode";
2237 reportTranslationError(*MF, *TPC, *ORE, R);
2240 // Release the per-function state when we return, whether we succeeded or not.
2241 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2243 // Setup a separate basic-block for the arguments and constants
2244 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2245 MF->push_back(EntryBB);
2246 EntryBuilder->setMBB(*EntryBB);
2248 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2249 SwiftError.setFunction(CurMF);
2250 SwiftError.createEntriesInEntryBlock(DbgLoc);
2252 // Create all blocks, in IR order, to preserve the layout.
2253 for (const BasicBlock &BB: F) {
2254 auto *&MBB = BBToMBB[&BB];
2256 MBB = MF->CreateMachineBasicBlock(&BB);
2257 MF->push_back(MBB);
2259 if (BB.hasAddressTaken())
2260 MBB->setHasAddressTaken();
2263 // Make our arguments/constants entry block fallthrough to the IR entry block.
2264 EntryBB->addSuccessor(&getMBB(F.front()));
2266 // Lower the actual args into this basic block.
2267 SmallVector<ArrayRef<Register>, 8> VRegArgs;
2268 for (const Argument &Arg: F.args()) {
2269 if (DL->getTypeStoreSize(Arg.getType()) == 0)
2270 continue; // Don't handle zero sized types.
2271 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2272 VRegArgs.push_back(VRegs);
2274 if (Arg.hasSwiftErrorAttr()) {
2275 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
2276 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2280 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
2281 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2282 F.getSubprogram(), &F.getEntryBlock());
2283 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2284 reportTranslationError(*MF, *TPC, *ORE, R);
2285 return false;
2288 // Need to visit defs before uses when translating instructions.
2289 GISelObserverWrapper WrapperObserver;
2290 if (EnableCSE && CSEInfo)
2291 WrapperObserver.addObserver(CSEInfo);
2293 ReversePostOrderTraversal<const Function *> RPOT(&F);
2294 #ifndef NDEBUG
2295 DILocationVerifier Verifier;
2296 WrapperObserver.addObserver(&Verifier);
2297 #endif // ifndef NDEBUG
2298 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2299 for (const BasicBlock *BB : RPOT) {
2300 MachineBasicBlock &MBB = getMBB(*BB);
2301 // Set the insertion point of all the following translations to
2302 // the end of this basic block.
2303 CurBuilder->setMBB(MBB);
2304 HasTailCall = false;
2305 for (const Instruction &Inst : *BB) {
2306 // If we translated a tail call in the last step, then we know
2307 // everything after the call is either a return, or something that is
2308 // handled by the call itself. (E.g. a lifetime marker or assume
2309 // intrinsic.) In this case, we should stop translating the block and
2310 // move on.
2311 if (HasTailCall)
2312 break;
2313 #ifndef NDEBUG
2314 Verifier.setCurrentInst(&Inst);
2315 #endif // ifndef NDEBUG
2316 if (translate(Inst))
2317 continue;
2319 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2320 Inst.getDebugLoc(), BB);
2321 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2323 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2324 std::string InstStrStorage;
2325 raw_string_ostream InstStr(InstStrStorage);
2326 InstStr << Inst;
2328 R << ": '" << InstStr.str() << "'";
2331 reportTranslationError(*MF, *TPC, *ORE, R);
2332 return false;
2335 finalizeBasicBlock();
2337 #ifndef NDEBUG
2338 WrapperObserver.removeObserver(&Verifier);
2339 #endif
2342 finishPendingPhis();
2344 SwiftError.propagateVRegs();
2346 // Merge the argument lowering and constants block with its single
2347 // successor, the LLVM-IR entry block. We want the basic block to
2348 // be maximal.
2349 assert(EntryBB->succ_size() == 1 &&
2350 "Custom BB used for lowering should have only one successor");
2351 // Get the successor of the current entry block.
2352 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2353 assert(NewEntryBB.pred_size() == 1 &&
2354 "LLVM-IR entry block has a predecessor!?");
2355 // Move all the instruction from the current entry block to the
2356 // new entry block.
2357 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2358 EntryBB->end());
2360 // Update the live-in information for the new entry block.
2361 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2362 NewEntryBB.addLiveIn(LiveIn);
2363 NewEntryBB.sortUniqueLiveIns();
2365 // Get rid of the now empty basic block.
2366 EntryBB->removeSuccessor(&NewEntryBB);
2367 MF->remove(EntryBB);
2368 MF->DeleteMachineBasicBlock(EntryBB);
2370 assert(&MF->front() == &NewEntryBB &&
2371 "New entry wasn't next in the list of basic block!");
2373 // Initialize stack protector information.
2374 StackProtector &SP = getAnalysis<StackProtector>();
2375 SP.copyToMachineFrameInfo(MF->getFrameInfo());
2377 return false;