[llvm-objcopy] [COFF] Test absolute symbols wrt --strip-unneeded and --discard-all...
[llvm-complete.git] / lib / CodeGen / SelectionDAG / FastISel.cpp
bloba9a3c44ea0c9ca6d62af1f5f84331bf8ba343761
1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the implementation of the FastISel class.
12 // "Fast" instruction selection is designed to emit very poor code quickly.
13 // Also, it is not designed to be able to do much lowering, so most illegal
14 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
15 // also not intended to be able to do much optimization, except in a few cases
16 // where doing optimizations reduces overall compile time. For example, folding
17 // constants into immediate fields is often done, because it's cheap and it
18 // reduces the number of instructions later phases have to examine.
20 // "Fast" instruction selection is able to fail gracefully and transfer
21 // control to the SelectionDAG selector for operations that it doesn't
22 // support. In many cases, this allows us to avoid duplicating a lot of
23 // the complicated lowering logic that SelectionDAG currently has.
25 // The intended use for "fast" instruction selection is "-O0" mode
26 // compilation, where the quality of the generated code is irrelevant when
27 // weighed against the speed at which the code can be generated. Also,
28 // at -O0, the LLVM optimizers are not running, and this makes the
29 // compile time of codegen a much higher portion of the overall compile
30 // time. Despite its limitations, "fast" instruction selection is able to
31 // handle enough code on its own to provide noticeable overall speedups
32 // in -O0 compiles.
34 // Basic operations are supported in a target-independent way, by reading
35 // the same instruction descriptions that the SelectionDAG selector reads,
36 // and identifying simple arithmetic operations that can be directly selected
37 // from simple operators. More complicated operations currently require
38 // target-specific code.
40 //===----------------------------------------------------------------------===//
42 #include "llvm/CodeGen/FastISel.h"
43 #include "llvm/ADT/APFloat.h"
44 #include "llvm/ADT/APSInt.h"
45 #include "llvm/ADT/DenseMap.h"
46 #include "llvm/ADT/Optional.h"
47 #include "llvm/ADT/SmallPtrSet.h"
48 #include "llvm/ADT/SmallString.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
51 #include "llvm/Analysis/BranchProbabilityInfo.h"
52 #include "llvm/Analysis/TargetLibraryInfo.h"
53 #include "llvm/CodeGen/Analysis.h"
54 #include "llvm/CodeGen/FunctionLoweringInfo.h"
55 #include "llvm/CodeGen/ISDOpcodes.h"
56 #include "llvm/CodeGen/MachineBasicBlock.h"
57 #include "llvm/CodeGen/MachineFrameInfo.h"
58 #include "llvm/CodeGen/MachineInstr.h"
59 #include "llvm/CodeGen/MachineInstrBuilder.h"
60 #include "llvm/CodeGen/MachineMemOperand.h"
61 #include "llvm/CodeGen/MachineModuleInfo.h"
62 #include "llvm/CodeGen/MachineOperand.h"
63 #include "llvm/CodeGen/MachineRegisterInfo.h"
64 #include "llvm/CodeGen/StackMaps.h"
65 #include "llvm/CodeGen/TargetInstrInfo.h"
66 #include "llvm/CodeGen/TargetLowering.h"
67 #include "llvm/CodeGen/TargetSubtargetInfo.h"
68 #include "llvm/CodeGen/ValueTypes.h"
69 #include "llvm/IR/Argument.h"
70 #include "llvm/IR/Attributes.h"
71 #include "llvm/IR/BasicBlock.h"
72 #include "llvm/IR/CallSite.h"
73 #include "llvm/IR/CallingConv.h"
74 #include "llvm/IR/Constant.h"
75 #include "llvm/IR/Constants.h"
76 #include "llvm/IR/DataLayout.h"
77 #include "llvm/IR/DebugInfo.h"
78 #include "llvm/IR/DebugLoc.h"
79 #include "llvm/IR/DerivedTypes.h"
80 #include "llvm/IR/Function.h"
81 #include "llvm/IR/GetElementPtrTypeIterator.h"
82 #include "llvm/IR/GlobalValue.h"
83 #include "llvm/IR/InlineAsm.h"
84 #include "llvm/IR/InstrTypes.h"
85 #include "llvm/IR/Instruction.h"
86 #include "llvm/IR/Instructions.h"
87 #include "llvm/IR/IntrinsicInst.h"
88 #include "llvm/IR/LLVMContext.h"
89 #include "llvm/IR/Mangler.h"
90 #include "llvm/IR/Metadata.h"
91 #include "llvm/IR/Operator.h"
92 #include "llvm/IR/PatternMatch.h"
93 #include "llvm/IR/Type.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/MC/MCContext.h"
97 #include "llvm/MC/MCInstrDesc.h"
98 #include "llvm/MC/MCRegisterInfo.h"
99 #include "llvm/Support/Casting.h"
100 #include "llvm/Support/Debug.h"
101 #include "llvm/Support/ErrorHandling.h"
102 #include "llvm/Support/MachineValueType.h"
103 #include "llvm/Support/MathExtras.h"
104 #include "llvm/Support/raw_ostream.h"
105 #include "llvm/Target/TargetMachine.h"
106 #include "llvm/Target/TargetOptions.h"
107 #include <algorithm>
108 #include <cassert>
109 #include <cstdint>
110 #include <iterator>
111 #include <utility>
113 using namespace llvm;
114 using namespace PatternMatch;
116 #define DEBUG_TYPE "isel"
118 // FIXME: Remove this after the feature has proven reliable.
119 static cl::opt<bool> SinkLocalValues("fast-isel-sink-local-values",
120 cl::init(true), cl::Hidden,
121 cl::desc("Sink local values in FastISel"));
123 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
124 "target-independent selector");
125 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
126 "target-specific selector");
127 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
129 /// Set the current block to which generated machine instructions will be
130 /// appended.
131 void FastISel::startNewBlock() {
132 assert(LocalValueMap.empty() &&
133 "local values should be cleared after finishing a BB");
135 // Instructions are appended to FuncInfo.MBB. If the basic block already
136 // contains labels or copies, use the last instruction as the last local
137 // value.
138 EmitStartPt = nullptr;
139 if (!FuncInfo.MBB->empty())
140 EmitStartPt = &FuncInfo.MBB->back();
141 LastLocalValue = EmitStartPt;
144 /// Flush the local CSE map and sink anything we can.
145 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
147 bool FastISel::lowerArguments() {
148 if (!FuncInfo.CanLowerReturn)
149 // Fallback to SDISel argument lowering code to deal with sret pointer
150 // parameter.
151 return false;
153 if (!fastLowerArguments())
154 return false;
156 // Enter arguments into ValueMap for uses in non-entry BBs.
157 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
158 E = FuncInfo.Fn->arg_end();
159 I != E; ++I) {
160 DenseMap<const Value *, unsigned>::iterator VI = LocalValueMap.find(&*I);
161 assert(VI != LocalValueMap.end() && "Missed an argument?");
162 FuncInfo.ValueMap[&*I] = VI->second;
164 return true;
167 /// Return the defined register if this instruction defines exactly one
168 /// virtual register and uses no other virtual registers. Otherwise return 0.
169 static unsigned findSinkableLocalRegDef(MachineInstr &MI) {
170 unsigned RegDef = 0;
171 for (const MachineOperand &MO : MI.operands()) {
172 if (!MO.isReg())
173 continue;
174 if (MO.isDef()) {
175 if (RegDef)
176 return 0;
177 RegDef = MO.getReg();
178 } else if (TargetRegisterInfo::isVirtualRegister(MO.getReg())) {
179 // This is another use of a vreg. Don't try to sink it.
180 return 0;
183 return RegDef;
186 void FastISel::flushLocalValueMap() {
187 // Try to sink local values down to their first use so that we can give them a
188 // better debug location. This has the side effect of shrinking local value
189 // live ranges, which helps out fast regalloc.
190 if (SinkLocalValues && LastLocalValue != EmitStartPt) {
191 // Sink local value materialization instructions between EmitStartPt and
192 // LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to
193 // avoid inserting into the range that we're iterating over.
194 MachineBasicBlock::reverse_iterator RE =
195 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
196 : FuncInfo.MBB->rend();
197 MachineBasicBlock::reverse_iterator RI(LastLocalValue);
199 InstOrderMap OrderMap;
200 for (; RI != RE;) {
201 MachineInstr &LocalMI = *RI;
202 ++RI;
203 bool Store = true;
204 if (!LocalMI.isSafeToMove(nullptr, Store))
205 continue;
206 unsigned DefReg = findSinkableLocalRegDef(LocalMI);
207 if (DefReg == 0)
208 continue;
210 sinkLocalValueMaterialization(LocalMI, DefReg, OrderMap);
214 LocalValueMap.clear();
215 LastLocalValue = EmitStartPt;
216 recomputeInsertPt();
217 SavedInsertPt = FuncInfo.InsertPt;
218 LastFlushPoint = FuncInfo.InsertPt;
221 static bool isRegUsedByPhiNodes(unsigned DefReg,
222 FunctionLoweringInfo &FuncInfo) {
223 for (auto &P : FuncInfo.PHINodesToUpdate)
224 if (P.second == DefReg)
225 return true;
226 return false;
229 /// Build a map of instruction orders. Return the first terminator and its
230 /// order. Consider EH_LABEL instructions to be terminators as well, since local
231 /// values for phis after invokes must be materialized before the call.
232 void FastISel::InstOrderMap::initialize(
233 MachineBasicBlock *MBB, MachineBasicBlock::iterator LastFlushPoint) {
234 unsigned Order = 0;
235 for (MachineInstr &I : *MBB) {
236 if (!FirstTerminator &&
237 (I.isTerminator() || (I.isEHLabel() && &I != &MBB->front()))) {
238 FirstTerminator = &I;
239 FirstTerminatorOrder = Order;
241 Orders[&I] = Order++;
243 // We don't need to order instructions past the last flush point.
244 if (I.getIterator() == LastFlushPoint)
245 break;
249 void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
250 unsigned DefReg,
251 InstOrderMap &OrderMap) {
252 // If this register is used by a register fixup, MRI will not contain all
253 // the uses until after register fixups, so don't attempt to sink or DCE
254 // this instruction. Register fixups typically come from no-op cast
255 // instructions, which replace the cast instruction vreg with the local
256 // value vreg.
257 if (FuncInfo.RegsWithFixups.count(DefReg))
258 return;
260 // We can DCE this instruction if there are no uses and it wasn't a
261 // materialized for a successor PHI node.
262 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
263 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
264 if (EmitStartPt == &LocalMI)
265 EmitStartPt = EmitStartPt->getPrevNode();
266 LLVM_DEBUG(dbgs() << "removing dead local value materialization "
267 << LocalMI);
268 OrderMap.Orders.erase(&LocalMI);
269 LocalMI.eraseFromParent();
270 return;
273 // Number the instructions if we haven't yet so we can efficiently find the
274 // earliest use.
275 if (OrderMap.Orders.empty())
276 OrderMap.initialize(FuncInfo.MBB, LastFlushPoint);
278 // Find the first user in the BB.
279 MachineInstr *FirstUser = nullptr;
280 unsigned FirstOrder = std::numeric_limits<unsigned>::max();
281 for (MachineInstr &UseInst : MRI.use_nodbg_instructions(DefReg)) {
282 auto I = OrderMap.Orders.find(&UseInst);
283 assert(I != OrderMap.Orders.end() &&
284 "local value used by instruction outside local region");
285 unsigned UseOrder = I->second;
286 if (UseOrder < FirstOrder) {
287 FirstOrder = UseOrder;
288 FirstUser = &UseInst;
292 // The insertion point will be the first terminator or the first user,
293 // whichever came first. If there was no terminator, this must be a
294 // fallthrough block and the insertion point is the end of the block.
295 MachineBasicBlock::instr_iterator SinkPos;
296 if (UsedByPHI && OrderMap.FirstTerminatorOrder < FirstOrder) {
297 FirstOrder = OrderMap.FirstTerminatorOrder;
298 SinkPos = OrderMap.FirstTerminator->getIterator();
299 } else if (FirstUser) {
300 SinkPos = FirstUser->getIterator();
301 } else {
302 assert(UsedByPHI && "must be users if not used by a phi");
303 SinkPos = FuncInfo.MBB->instr_end();
306 // Collect all DBG_VALUEs before the new insertion position so that we can
307 // sink them.
308 SmallVector<MachineInstr *, 1> DbgValues;
309 for (MachineInstr &DbgVal : MRI.use_instructions(DefReg)) {
310 if (!DbgVal.isDebugValue())
311 continue;
312 unsigned UseOrder = OrderMap.Orders[&DbgVal];
313 if (UseOrder < FirstOrder)
314 DbgValues.push_back(&DbgVal);
317 // Sink LocalMI before SinkPos and assign it the same DebugLoc.
318 LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
319 FuncInfo.MBB->remove(&LocalMI);
320 FuncInfo.MBB->insert(SinkPos, &LocalMI);
321 if (SinkPos != FuncInfo.MBB->end())
322 LocalMI.setDebugLoc(SinkPos->getDebugLoc());
324 // Sink any debug values that we've collected.
325 for (MachineInstr *DI : DbgValues) {
326 FuncInfo.MBB->remove(DI);
327 FuncInfo.MBB->insert(SinkPos, DI);
331 bool FastISel::hasTrivialKill(const Value *V) {
332 // Don't consider constants or arguments to have trivial kills.
333 const Instruction *I = dyn_cast<Instruction>(V);
334 if (!I)
335 return false;
337 // No-op casts are trivially coalesced by fast-isel.
338 if (const auto *Cast = dyn_cast<CastInst>(I))
339 if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
340 return false;
342 // Even the value might have only one use in the LLVM IR, it is possible that
343 // FastISel might fold the use into another instruction and now there is more
344 // than one use at the Machine Instruction level.
345 unsigned Reg = lookUpRegForValue(V);
346 if (Reg && !MRI.use_empty(Reg))
347 return false;
349 // GEPs with all zero indices are trivially coalesced by fast-isel.
350 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
351 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
352 return false;
354 // Only instructions with a single use in the same basic block are considered
355 // to have trivial kills.
356 return I->hasOneUse() &&
357 !(I->getOpcode() == Instruction::BitCast ||
358 I->getOpcode() == Instruction::PtrToInt ||
359 I->getOpcode() == Instruction::IntToPtr) &&
360 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
363 unsigned FastISel::getRegForValue(const Value *V) {
364 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
365 // Don't handle non-simple values in FastISel.
366 if (!RealVT.isSimple())
367 return 0;
369 // Ignore illegal types. We must do this before looking up the value
370 // in ValueMap because Arguments are given virtual registers regardless
371 // of whether FastISel can handle them.
372 MVT VT = RealVT.getSimpleVT();
373 if (!TLI.isTypeLegal(VT)) {
374 // Handle integer promotions, though, because they're common and easy.
375 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
376 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
377 else
378 return 0;
381 // Look up the value to see if we already have a register for it.
382 unsigned Reg = lookUpRegForValue(V);
383 if (Reg)
384 return Reg;
386 // In bottom-up mode, just create the virtual register which will be used
387 // to hold the value. It will be materialized later.
388 if (isa<Instruction>(V) &&
389 (!isa<AllocaInst>(V) ||
390 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
391 return FuncInfo.InitializeRegForValue(V);
393 SavePoint SaveInsertPt = enterLocalValueArea();
395 // Materialize the value in a register. Emit any instructions in the
396 // local value area.
397 Reg = materializeRegForValue(V, VT);
399 leaveLocalValueArea(SaveInsertPt);
401 return Reg;
404 unsigned FastISel::materializeConstant(const Value *V, MVT VT) {
405 unsigned Reg = 0;
406 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
407 if (CI->getValue().getActiveBits() <= 64)
408 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
409 } else if (isa<AllocaInst>(V))
410 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
411 else if (isa<ConstantPointerNull>(V))
412 // Translate this as an integer zero so that it can be
413 // local-CSE'd with actual integer zeros.
414 Reg = getRegForValue(
415 Constant::getNullValue(DL.getIntPtrType(V->getContext())));
416 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
417 if (CF->isNullValue())
418 Reg = fastMaterializeFloatZero(CF);
419 else
420 // Try to emit the constant directly.
421 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
423 if (!Reg) {
424 // Try to emit the constant by using an integer constant with a cast.
425 const APFloat &Flt = CF->getValueAPF();
426 EVT IntVT = TLI.getPointerTy(DL);
427 uint32_t IntBitWidth = IntVT.getSizeInBits();
428 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
429 bool isExact;
430 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
431 if (isExact) {
432 unsigned IntegerReg =
433 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
434 if (IntegerReg != 0)
435 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
436 /*Kill=*/false);
439 } else if (const auto *Op = dyn_cast<Operator>(V)) {
440 if (!selectOperator(Op, Op->getOpcode()))
441 if (!isa<Instruction>(Op) ||
442 !fastSelectInstruction(cast<Instruction>(Op)))
443 return 0;
444 Reg = lookUpRegForValue(Op);
445 } else if (isa<UndefValue>(V)) {
446 Reg = createResultReg(TLI.getRegClassFor(VT));
447 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
448 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
450 return Reg;
453 /// Helper for getRegForValue. This function is called when the value isn't
454 /// already available in a register and must be materialized with new
455 /// instructions.
456 unsigned FastISel::materializeRegForValue(const Value *V, MVT VT) {
457 unsigned Reg = 0;
458 // Give the target-specific code a try first.
459 if (isa<Constant>(V))
460 Reg = fastMaterializeConstant(cast<Constant>(V));
462 // If target-specific code couldn't or didn't want to handle the value, then
463 // give target-independent code a try.
464 if (!Reg)
465 Reg = materializeConstant(V, VT);
467 // Don't cache constant materializations in the general ValueMap.
468 // To do so would require tracking what uses they dominate.
469 if (Reg) {
470 LocalValueMap[V] = Reg;
471 LastLocalValue = MRI.getVRegDef(Reg);
473 return Reg;
476 unsigned FastISel::lookUpRegForValue(const Value *V) {
477 // Look up the value to see if we already have a register for it. We
478 // cache values defined by Instructions across blocks, and other values
479 // only locally. This is because Instructions already have the SSA
480 // def-dominates-use requirement enforced.
481 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(V);
482 if (I != FuncInfo.ValueMap.end())
483 return I->second;
484 return LocalValueMap[V];
487 void FastISel::updateValueMap(const Value *I, unsigned Reg, unsigned NumRegs) {
488 if (!isa<Instruction>(I)) {
489 LocalValueMap[I] = Reg;
490 return;
493 unsigned &AssignedReg = FuncInfo.ValueMap[I];
494 if (AssignedReg == 0)
495 // Use the new register.
496 AssignedReg = Reg;
497 else if (Reg != AssignedReg) {
498 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
499 for (unsigned i = 0; i < NumRegs; i++) {
500 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
501 FuncInfo.RegsWithFixups.insert(Reg + i);
504 AssignedReg = Reg;
508 std::pair<unsigned, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
509 unsigned IdxN = getRegForValue(Idx);
510 if (IdxN == 0)
511 // Unhandled operand. Halt "fast" selection and bail.
512 return std::pair<unsigned, bool>(0, false);
514 bool IdxNIsKill = hasTrivialKill(Idx);
516 // If the index is smaller or larger than intptr_t, truncate or extend it.
517 MVT PtrVT = TLI.getPointerTy(DL);
518 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
519 if (IdxVT.bitsLT(PtrVT)) {
520 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
521 IdxNIsKill);
522 IdxNIsKill = true;
523 } else if (IdxVT.bitsGT(PtrVT)) {
524 IdxN =
525 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
526 IdxNIsKill = true;
528 return std::pair<unsigned, bool>(IdxN, IdxNIsKill);
531 void FastISel::recomputeInsertPt() {
532 if (getLastLocalValue()) {
533 FuncInfo.InsertPt = getLastLocalValue();
534 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
535 ++FuncInfo.InsertPt;
536 } else
537 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
539 // Now skip past any EH_LABELs, which must remain at the beginning.
540 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
541 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
542 ++FuncInfo.InsertPt;
545 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
546 MachineBasicBlock::iterator E) {
547 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
548 "Invalid iterator!");
549 while (I != E) {
550 if (LastFlushPoint == I)
551 LastFlushPoint = E;
552 if (SavedInsertPt == I)
553 SavedInsertPt = E;
554 if (EmitStartPt == I)
555 EmitStartPt = E.isValid() ? &*E : nullptr;
556 if (LastLocalValue == I)
557 LastLocalValue = E.isValid() ? &*E : nullptr;
559 MachineInstr *Dead = &*I;
560 ++I;
561 Dead->eraseFromParent();
562 ++NumFastIselDead;
564 recomputeInsertPt();
567 FastISel::SavePoint FastISel::enterLocalValueArea() {
568 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
569 DebugLoc OldDL = DbgLoc;
570 recomputeInsertPt();
571 DbgLoc = DebugLoc();
572 SavePoint SP = {OldInsertPt, OldDL};
573 return SP;
576 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
577 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
578 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
580 // Restore the previous insert position.
581 FuncInfo.InsertPt = OldInsertPt.InsertPt;
582 DbgLoc = OldInsertPt.DL;
585 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
586 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
587 if (VT == MVT::Other || !VT.isSimple())
588 // Unhandled type. Halt "fast" selection and bail.
589 return false;
591 // We only handle legal types. For example, on x86-32 the instruction
592 // selector contains all of the 64-bit instructions from x86-64,
593 // under the assumption that i64 won't be used if the target doesn't
594 // support it.
595 if (!TLI.isTypeLegal(VT)) {
596 // MVT::i1 is special. Allow AND, OR, or XOR because they
597 // don't require additional zeroing, which makes them easy.
598 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
599 ISDOpcode == ISD::XOR))
600 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
601 else
602 return false;
605 // Check if the first operand is a constant, and handle it as "ri". At -O0,
606 // we don't have anything that canonicalizes operand order.
607 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
608 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
609 unsigned Op1 = getRegForValue(I->getOperand(1));
610 if (!Op1)
611 return false;
612 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
614 unsigned ResultReg =
615 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
616 CI->getZExtValue(), VT.getSimpleVT());
617 if (!ResultReg)
618 return false;
620 // We successfully emitted code for the given LLVM Instruction.
621 updateValueMap(I, ResultReg);
622 return true;
625 unsigned Op0 = getRegForValue(I->getOperand(0));
626 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
627 return false;
628 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
630 // Check if the second operand is a constant and handle it appropriately.
631 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
632 uint64_t Imm = CI->getSExtValue();
634 // Transform "sdiv exact X, 8" -> "sra X, 3".
635 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
636 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
637 Imm = Log2_64(Imm);
638 ISDOpcode = ISD::SRA;
641 // Transform "urem x, pow2" -> "and x, pow2-1".
642 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
643 isPowerOf2_64(Imm)) {
644 --Imm;
645 ISDOpcode = ISD::AND;
648 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
649 Op0IsKill, Imm, VT.getSimpleVT());
650 if (!ResultReg)
651 return false;
653 // We successfully emitted code for the given LLVM Instruction.
654 updateValueMap(I, ResultReg);
655 return true;
658 unsigned Op1 = getRegForValue(I->getOperand(1));
659 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
660 return false;
661 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
663 // Now we have both operands in registers. Emit the instruction.
664 unsigned ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
665 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
666 if (!ResultReg)
667 // Target-specific code wasn't able to find a machine opcode for
668 // the given ISD opcode and type. Halt "fast" selection and bail.
669 return false;
671 // We successfully emitted code for the given LLVM Instruction.
672 updateValueMap(I, ResultReg);
673 return true;
676 bool FastISel::selectGetElementPtr(const User *I) {
677 unsigned N = getRegForValue(I->getOperand(0));
678 if (!N) // Unhandled operand. Halt "fast" selection and bail.
679 return false;
680 bool NIsKill = hasTrivialKill(I->getOperand(0));
682 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
683 // into a single N = N + TotalOffset.
684 uint64_t TotalOffs = 0;
685 // FIXME: What's a good SWAG number for MaxOffs?
686 uint64_t MaxOffs = 2048;
687 MVT VT = TLI.getPointerTy(DL);
688 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
689 GTI != E; ++GTI) {
690 const Value *Idx = GTI.getOperand();
691 if (StructType *StTy = GTI.getStructTypeOrNull()) {
692 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
693 if (Field) {
694 // N = N + Offset
695 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
696 if (TotalOffs >= MaxOffs) {
697 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
698 if (!N) // Unhandled operand. Halt "fast" selection and bail.
699 return false;
700 NIsKill = true;
701 TotalOffs = 0;
704 } else {
705 Type *Ty = GTI.getIndexedType();
707 // If this is a constant subscript, handle it quickly.
708 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
709 if (CI->isZero())
710 continue;
711 // N = N + Offset
712 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
713 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
714 if (TotalOffs >= MaxOffs) {
715 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
716 if (!N) // Unhandled operand. Halt "fast" selection and bail.
717 return false;
718 NIsKill = true;
719 TotalOffs = 0;
721 continue;
723 if (TotalOffs) {
724 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
725 if (!N) // Unhandled operand. Halt "fast" selection and bail.
726 return false;
727 NIsKill = true;
728 TotalOffs = 0;
731 // N = N + Idx * ElementSize;
732 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
733 std::pair<unsigned, bool> Pair = getRegForGEPIndex(Idx);
734 unsigned IdxN = Pair.first;
735 bool IdxNIsKill = Pair.second;
736 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
737 return false;
739 if (ElementSize != 1) {
740 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
741 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
742 return false;
743 IdxNIsKill = true;
745 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
746 if (!N) // Unhandled operand. Halt "fast" selection and bail.
747 return false;
750 if (TotalOffs) {
751 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
752 if (!N) // Unhandled operand. Halt "fast" selection and bail.
753 return false;
756 // We successfully emitted code for the given LLVM Instruction.
757 updateValueMap(I, N);
758 return true;
761 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
762 const CallInst *CI, unsigned StartIdx) {
763 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
764 Value *Val = CI->getArgOperand(i);
765 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
766 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
767 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
768 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
769 } else if (isa<ConstantPointerNull>(Val)) {
770 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
771 Ops.push_back(MachineOperand::CreateImm(0));
772 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
773 // Values coming from a stack location also require a special encoding,
774 // but that is added later on by the target specific frame index
775 // elimination implementation.
776 auto SI = FuncInfo.StaticAllocaMap.find(AI);
777 if (SI != FuncInfo.StaticAllocaMap.end())
778 Ops.push_back(MachineOperand::CreateFI(SI->second));
779 else
780 return false;
781 } else {
782 unsigned Reg = getRegForValue(Val);
783 if (!Reg)
784 return false;
785 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
788 return true;
791 bool FastISel::selectStackmap(const CallInst *I) {
792 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
793 // [live variables...])
794 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
795 "Stackmap cannot return a value.");
797 // The stackmap intrinsic only records the live variables (the arguments
798 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
799 // intrinsic, this won't be lowered to a function call. This means we don't
800 // have to worry about calling conventions and target-specific lowering code.
801 // Instead we perform the call lowering right here.
803 // CALLSEQ_START(0, 0...)
804 // STACKMAP(id, nbytes, ...)
805 // CALLSEQ_END(0, 0)
807 SmallVector<MachineOperand, 32> Ops;
809 // Add the <id> and <numBytes> constants.
810 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
811 "Expected a constant integer.");
812 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
813 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
815 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
816 "Expected a constant integer.");
817 const auto *NumBytes =
818 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
819 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
821 // Push live variables for the stack map (skipping the first two arguments
822 // <id> and <numBytes>).
823 if (!addStackMapLiveVars(Ops, I, 2))
824 return false;
826 // We are not adding any register mask info here, because the stackmap doesn't
827 // clobber anything.
829 // Add scratch registers as implicit def and early clobber.
830 CallingConv::ID CC = I->getCallingConv();
831 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
832 for (unsigned i = 0; ScratchRegs[i]; ++i)
833 Ops.push_back(MachineOperand::CreateReg(
834 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
835 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
837 // Issue CALLSEQ_START
838 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
839 auto Builder =
840 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
841 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
842 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
843 Builder.addImm(0);
845 // Issue STACKMAP.
846 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
847 TII.get(TargetOpcode::STACKMAP));
848 for (auto const &MO : Ops)
849 MIB.add(MO);
851 // Issue CALLSEQ_END
852 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
853 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
854 .addImm(0)
855 .addImm(0);
857 // Inform the Frame Information that we have a stackmap in this function.
858 FuncInfo.MF->getFrameInfo().setHasStackMap();
860 return true;
863 /// Lower an argument list according to the target calling convention.
865 /// This is a helper for lowering intrinsics that follow a target calling
866 /// convention or require stack pointer adjustment. Only a subset of the
867 /// intrinsic's operands need to participate in the calling convention.
868 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
869 unsigned NumArgs, const Value *Callee,
870 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
871 ArgListTy Args;
872 Args.reserve(NumArgs);
874 // Populate the argument list.
875 ImmutableCallSite CS(CI);
876 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
877 Value *V = CI->getOperand(ArgI);
879 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
881 ArgListEntry Entry;
882 Entry.Val = V;
883 Entry.Ty = V->getType();
884 Entry.setAttributes(&CS, ArgI);
885 Args.push_back(Entry);
888 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
889 : CI->getType();
890 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
892 return lowerCallTo(CLI);
895 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
896 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
897 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
898 SmallString<32> MangledName;
899 Mangler::getNameWithPrefix(MangledName, Target, DL);
900 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
901 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
904 bool FastISel::selectPatchpoint(const CallInst *I) {
905 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
906 // i32 <numBytes>,
907 // i8* <target>,
908 // i32 <numArgs>,
909 // [Args...],
910 // [live variables...])
911 CallingConv::ID CC = I->getCallingConv();
912 bool IsAnyRegCC = CC == CallingConv::AnyReg;
913 bool HasDef = !I->getType()->isVoidTy();
914 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
916 // Get the real number of arguments participating in the call <numArgs>
917 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
918 "Expected a constant integer.");
919 const auto *NumArgsVal =
920 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
921 unsigned NumArgs = NumArgsVal->getZExtValue();
923 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
924 // This includes all meta-operands up to but not including CC.
925 unsigned NumMetaOpers = PatchPointOpers::CCPos;
926 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
927 "Not enough arguments provided to the patchpoint intrinsic");
929 // For AnyRegCC the arguments are lowered later on manually.
930 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
931 CallLoweringInfo CLI;
932 CLI.setIsPatchPoint();
933 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
934 return false;
936 assert(CLI.Call && "No call instruction specified.");
938 SmallVector<MachineOperand, 32> Ops;
940 // Add an explicit result reg if we use the anyreg calling convention.
941 if (IsAnyRegCC && HasDef) {
942 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
943 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
944 CLI.NumResultRegs = 1;
945 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*IsDef=*/true));
948 // Add the <id> and <numBytes> constants.
949 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
950 "Expected a constant integer.");
951 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
952 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
954 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
955 "Expected a constant integer.");
956 const auto *NumBytes =
957 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
958 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
960 // Add the call target.
961 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
962 uint64_t CalleeConstAddr =
963 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
964 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
965 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
966 if (C->getOpcode() == Instruction::IntToPtr) {
967 uint64_t CalleeConstAddr =
968 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
969 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
970 } else
971 llvm_unreachable("Unsupported ConstantExpr.");
972 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
973 Ops.push_back(MachineOperand::CreateGA(GV, 0));
974 } else if (isa<ConstantPointerNull>(Callee))
975 Ops.push_back(MachineOperand::CreateImm(0));
976 else
977 llvm_unreachable("Unsupported callee address.");
979 // Adjust <numArgs> to account for any arguments that have been passed on
980 // the stack instead.
981 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
982 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
984 // Add the calling convention
985 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
987 // Add the arguments we omitted previously. The register allocator should
988 // place these in any free register.
989 if (IsAnyRegCC) {
990 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
991 unsigned Reg = getRegForValue(I->getArgOperand(i));
992 if (!Reg)
993 return false;
994 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
998 // Push the arguments from the call instruction.
999 for (auto Reg : CLI.OutRegs)
1000 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/false));
1002 // Push live variables for the stack map.
1003 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
1004 return false;
1006 // Push the register mask info.
1007 Ops.push_back(MachineOperand::CreateRegMask(
1008 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
1010 // Add scratch registers as implicit def and early clobber.
1011 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
1012 for (unsigned i = 0; ScratchRegs[i]; ++i)
1013 Ops.push_back(MachineOperand::CreateReg(
1014 ScratchRegs[i], /*IsDef=*/true, /*IsImp=*/true, /*IsKill=*/false,
1015 /*IsDead=*/false, /*IsUndef=*/false, /*IsEarlyClobber=*/true));
1017 // Add implicit defs (return values).
1018 for (auto Reg : CLI.InRegs)
1019 Ops.push_back(MachineOperand::CreateReg(Reg, /*IsDef=*/true,
1020 /*IsImpl=*/true));
1022 // Insert the patchpoint instruction before the call generated by the target.
1023 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
1024 TII.get(TargetOpcode::PATCHPOINT));
1026 for (auto &MO : Ops)
1027 MIB.add(MO);
1029 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1031 // Delete the original call instruction.
1032 CLI.Call->eraseFromParent();
1034 // Inform the Frame Information that we have a patchpoint in this function.
1035 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
1037 if (CLI.NumResultRegs)
1038 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
1039 return true;
1042 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
1043 const auto &Triple = TM.getTargetTriple();
1044 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1045 return true; // don't do anything to this instruction.
1046 SmallVector<MachineOperand, 8> Ops;
1047 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1048 /*IsDef=*/false));
1049 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1050 /*IsDef=*/false));
1051 MachineInstrBuilder MIB =
1052 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1053 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
1054 for (auto &MO : Ops)
1055 MIB.add(MO);
1057 // Insert the Patchable Event Call instruction, that gets lowered properly.
1058 return true;
1061 bool FastISel::selectXRayTypedEvent(const CallInst *I) {
1062 const auto &Triple = TM.getTargetTriple();
1063 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1064 return true; // don't do anything to this instruction.
1065 SmallVector<MachineOperand, 8> Ops;
1066 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1067 /*IsDef=*/false));
1068 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1069 /*IsDef=*/false));
1070 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
1071 /*IsDef=*/false));
1072 MachineInstrBuilder MIB =
1073 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1074 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
1075 for (auto &MO : Ops)
1076 MIB.add(MO);
1078 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
1079 return true;
1082 /// Returns an AttributeList representing the attributes applied to the return
1083 /// value of the given call.
1084 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
1085 SmallVector<Attribute::AttrKind, 2> Attrs;
1086 if (CLI.RetSExt)
1087 Attrs.push_back(Attribute::SExt);
1088 if (CLI.RetZExt)
1089 Attrs.push_back(Attribute::ZExt);
1090 if (CLI.IsInReg)
1091 Attrs.push_back(Attribute::InReg);
1093 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
1094 Attrs);
1097 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
1098 unsigned NumArgs) {
1099 MCContext &Ctx = MF->getContext();
1100 SmallString<32> MangledName;
1101 Mangler::getNameWithPrefix(MangledName, SymName, DL);
1102 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
1103 return lowerCallTo(CI, Sym, NumArgs);
1106 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
1107 unsigned NumArgs) {
1108 ImmutableCallSite CS(CI);
1110 FunctionType *FTy = CS.getFunctionType();
1111 Type *RetTy = CS.getType();
1113 ArgListTy Args;
1114 Args.reserve(NumArgs);
1116 // Populate the argument list.
1117 // Attributes for args start at offset 1, after the return attribute.
1118 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
1119 Value *V = CI->getOperand(ArgI);
1121 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
1123 ArgListEntry Entry;
1124 Entry.Val = V;
1125 Entry.Ty = V->getType();
1126 Entry.setAttributes(&CS, ArgI);
1127 Args.push_back(Entry);
1129 TLI.markLibCallAttributes(MF, CS.getCallingConv(), Args);
1131 CallLoweringInfo CLI;
1132 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), CS, NumArgs);
1134 return lowerCallTo(CLI);
1137 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
1138 // Handle the incoming return values from the call.
1139 CLI.clearIns();
1140 SmallVector<EVT, 4> RetTys;
1141 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1143 SmallVector<ISD::OutputArg, 4> Outs;
1144 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1146 bool CanLowerReturn = TLI.CanLowerReturn(
1147 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1149 // FIXME: sret demotion isn't supported yet - bail out.
1150 if (!CanLowerReturn)
1151 return false;
1153 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1154 EVT VT = RetTys[I];
1155 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1156 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1157 for (unsigned i = 0; i != NumRegs; ++i) {
1158 ISD::InputArg MyFlags;
1159 MyFlags.VT = RegisterVT;
1160 MyFlags.ArgVT = VT;
1161 MyFlags.Used = CLI.IsReturnValueUsed;
1162 if (CLI.RetSExt)
1163 MyFlags.Flags.setSExt();
1164 if (CLI.RetZExt)
1165 MyFlags.Flags.setZExt();
1166 if (CLI.IsInReg)
1167 MyFlags.Flags.setInReg();
1168 CLI.Ins.push_back(MyFlags);
1172 // Handle all of the outgoing arguments.
1173 CLI.clearOuts();
1174 for (auto &Arg : CLI.getArgs()) {
1175 Type *FinalType = Arg.Ty;
1176 if (Arg.IsByVal)
1177 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
1178 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1179 FinalType, CLI.CallConv, CLI.IsVarArg);
1181 ISD::ArgFlagsTy Flags;
1182 if (Arg.IsZExt)
1183 Flags.setZExt();
1184 if (Arg.IsSExt)
1185 Flags.setSExt();
1186 if (Arg.IsInReg)
1187 Flags.setInReg();
1188 if (Arg.IsSRet)
1189 Flags.setSRet();
1190 if (Arg.IsSwiftSelf)
1191 Flags.setSwiftSelf();
1192 if (Arg.IsSwiftError)
1193 Flags.setSwiftError();
1194 if (Arg.IsByVal)
1195 Flags.setByVal();
1196 if (Arg.IsInAlloca) {
1197 Flags.setInAlloca();
1198 // Set the byval flag for CCAssignFn callbacks that don't know about
1199 // inalloca. This way we can know how many bytes we should've allocated
1200 // and how many bytes a callee cleanup function will pop. If we port
1201 // inalloca to more targets, we'll have to add custom inalloca handling in
1202 // the various CC lowering callbacks.
1203 Flags.setByVal();
1205 if (Arg.IsByVal || Arg.IsInAlloca) {
1206 PointerType *Ty = cast<PointerType>(Arg.Ty);
1207 Type *ElementTy = Ty->getElementType();
1208 unsigned FrameSize = DL.getTypeAllocSize(ElementTy);
1209 // For ByVal, alignment should come from FE. BE will guess if this info is
1210 // not there, but there are cases it cannot get right.
1211 unsigned FrameAlign = Arg.Alignment;
1212 if (!FrameAlign)
1213 FrameAlign = TLI.getByValTypeAlignment(ElementTy, DL);
1214 Flags.setByValSize(FrameSize);
1215 Flags.setByValAlign(FrameAlign);
1217 if (Arg.IsNest)
1218 Flags.setNest();
1219 if (NeedsRegBlock)
1220 Flags.setInConsecutiveRegs();
1221 unsigned OriginalAlignment = DL.getABITypeAlignment(Arg.Ty);
1222 Flags.setOrigAlign(OriginalAlignment);
1224 CLI.OutVals.push_back(Arg.Val);
1225 CLI.OutFlags.push_back(Flags);
1228 if (!fastLowerCall(CLI))
1229 return false;
1231 // Set all unused physreg defs as dead.
1232 assert(CLI.Call && "No call instruction specified.");
1233 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1235 if (CLI.NumResultRegs && CLI.CS)
1236 updateValueMap(CLI.CS->getInstruction(), CLI.ResultReg, CLI.NumResultRegs);
1238 return true;
1241 bool FastISel::lowerCall(const CallInst *CI) {
1242 ImmutableCallSite CS(CI);
1244 FunctionType *FuncTy = CS.getFunctionType();
1245 Type *RetTy = CS.getType();
1247 ArgListTy Args;
1248 ArgListEntry Entry;
1249 Args.reserve(CS.arg_size());
1251 for (ImmutableCallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1252 i != e; ++i) {
1253 Value *V = *i;
1255 // Skip empty types
1256 if (V->getType()->isEmptyTy())
1257 continue;
1259 Entry.Val = V;
1260 Entry.Ty = V->getType();
1262 // Skip the first return-type Attribute to get to params.
1263 Entry.setAttributes(&CS, i - CS.arg_begin());
1264 Args.push_back(Entry);
1267 // Check if target-independent constraints permit a tail call here.
1268 // Target-dependent constraints are checked within fastLowerCall.
1269 bool IsTailCall = CI->isTailCall();
1270 if (IsTailCall && !isInTailCallPosition(CS, TM))
1271 IsTailCall = false;
1273 CallLoweringInfo CLI;
1274 CLI.setCallee(RetTy, FuncTy, CI->getCalledValue(), std::move(Args), CS)
1275 .setTailCall(IsTailCall);
1277 return lowerCallTo(CLI);
1280 bool FastISel::selectCall(const User *I) {
1281 const CallInst *Call = cast<CallInst>(I);
1283 // Handle simple inline asms.
1284 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledValue())) {
1285 // If the inline asm has side effects, then make sure that no local value
1286 // lives across by flushing the local value map.
1287 if (IA->hasSideEffects())
1288 flushLocalValueMap();
1290 // Don't attempt to handle constraints.
1291 if (!IA->getConstraintString().empty())
1292 return false;
1294 unsigned ExtraInfo = 0;
1295 if (IA->hasSideEffects())
1296 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1297 if (IA->isAlignStack())
1298 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1300 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1301 TII.get(TargetOpcode::INLINEASM))
1302 .addExternalSymbol(IA->getAsmString().c_str())
1303 .addImm(ExtraInfo);
1304 return true;
1307 MachineModuleInfo &MMI = FuncInfo.MF->getMMI();
1308 computeUsesVAFloatArgument(*Call, MMI);
1310 // Handle intrinsic function calls.
1311 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1312 return selectIntrinsicCall(II);
1314 // Usually, it does not make sense to initialize a value,
1315 // make an unrelated function call and use the value, because
1316 // it tends to be spilled on the stack. So, we move the pointer
1317 // to the last local value to the beginning of the block, so that
1318 // all the values which have already been materialized,
1319 // appear after the call. It also makes sense to skip intrinsics
1320 // since they tend to be inlined.
1321 flushLocalValueMap();
1323 return lowerCall(Call);
1326 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1327 switch (II->getIntrinsicID()) {
1328 default:
1329 break;
1330 // At -O0 we don't care about the lifetime intrinsics.
1331 case Intrinsic::lifetime_start:
1332 case Intrinsic::lifetime_end:
1333 // The donothing intrinsic does, well, nothing.
1334 case Intrinsic::donothing:
1335 // Neither does the sideeffect intrinsic.
1336 case Intrinsic::sideeffect:
1337 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1338 case Intrinsic::assume:
1339 return true;
1340 case Intrinsic::dbg_declare: {
1341 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1342 assert(DI->getVariable() && "Missing variable");
1343 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1344 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1345 return true;
1348 const Value *Address = DI->getAddress();
1349 if (!Address || isa<UndefValue>(Address)) {
1350 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1351 return true;
1354 // Byval arguments with frame indices were already handled after argument
1355 // lowering and before isel.
1356 const auto *Arg =
1357 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1358 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1359 return true;
1361 Optional<MachineOperand> Op;
1362 if (unsigned Reg = lookUpRegForValue(Address))
1363 Op = MachineOperand::CreateReg(Reg, false);
1365 // If we have a VLA that has a "use" in a metadata node that's then used
1366 // here but it has no other uses, then we have a problem. E.g.,
1368 // int foo (const int *x) {
1369 // char a[*x];
1370 // return 0;
1371 // }
1373 // If we assign 'a' a vreg and fast isel later on has to use the selection
1374 // DAG isel, it will want to copy the value to the vreg. However, there are
1375 // no uses, which goes counter to what selection DAG isel expects.
1376 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1377 (!isa<AllocaInst>(Address) ||
1378 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1379 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1380 false);
1382 if (Op) {
1383 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1384 "Expected inlined-at fields to agree");
1385 // A dbg.declare describes the address of a source variable, so lower it
1386 // into an indirect DBG_VALUE.
1387 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1388 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1389 *Op, DI->getVariable(), DI->getExpression());
1390 } else {
1391 // We can't yet handle anything else here because it would require
1392 // generating code, thus altering codegen because of debug info.
1393 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1395 return true;
1397 case Intrinsic::dbg_value: {
1398 // This form of DBG_VALUE is target-independent.
1399 const DbgValueInst *DI = cast<DbgValueInst>(II);
1400 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1401 const Value *V = DI->getValue();
1402 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1403 "Expected inlined-at fields to agree");
1404 if (!V) {
1405 // Currently the optimizer can produce this; insert an undef to
1406 // help debugging. Probably the optimizer should not do this.
1407 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1408 DI->getVariable(), DI->getExpression());
1409 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1410 if (CI->getBitWidth() > 64)
1411 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1412 .addCImm(CI)
1413 .addImm(0U)
1414 .addMetadata(DI->getVariable())
1415 .addMetadata(DI->getExpression());
1416 else
1417 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1418 .addImm(CI->getZExtValue())
1419 .addImm(0U)
1420 .addMetadata(DI->getVariable())
1421 .addMetadata(DI->getExpression());
1422 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1423 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1424 .addFPImm(CF)
1425 .addImm(0U)
1426 .addMetadata(DI->getVariable())
1427 .addMetadata(DI->getExpression());
1428 } else if (unsigned Reg = lookUpRegForValue(V)) {
1429 // FIXME: This does not handle register-indirect values at offset 0.
1430 bool IsIndirect = false;
1431 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1432 DI->getVariable(), DI->getExpression());
1433 } else {
1434 // We can't yet handle anything else here because it would require
1435 // generating code, thus altering codegen because of debug info.
1436 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1438 return true;
1440 case Intrinsic::dbg_label: {
1441 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1442 assert(DI->getLabel() && "Missing label");
1443 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1444 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1445 return true;
1448 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1449 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1450 return true;
1452 case Intrinsic::objectsize: {
1453 ConstantInt *CI = cast<ConstantInt>(II->getArgOperand(1));
1454 unsigned long long Res = CI->isZero() ? -1ULL : 0;
1455 Constant *ResCI = ConstantInt::get(II->getType(), Res);
1456 unsigned ResultReg = getRegForValue(ResCI);
1457 if (!ResultReg)
1458 return false;
1459 updateValueMap(II, ResultReg);
1460 return true;
1462 case Intrinsic::is_constant: {
1463 Constant *ResCI = ConstantInt::get(II->getType(), 0);
1464 unsigned ResultReg = getRegForValue(ResCI);
1465 if (!ResultReg)
1466 return false;
1467 updateValueMap(II, ResultReg);
1468 return true;
1470 case Intrinsic::launder_invariant_group:
1471 case Intrinsic::strip_invariant_group:
1472 case Intrinsic::expect: {
1473 unsigned ResultReg = getRegForValue(II->getArgOperand(0));
1474 if (!ResultReg)
1475 return false;
1476 updateValueMap(II, ResultReg);
1477 return true;
1479 case Intrinsic::experimental_stackmap:
1480 return selectStackmap(II);
1481 case Intrinsic::experimental_patchpoint_void:
1482 case Intrinsic::experimental_patchpoint_i64:
1483 return selectPatchpoint(II);
1485 case Intrinsic::xray_customevent:
1486 return selectXRayCustomEvent(II);
1487 case Intrinsic::xray_typedevent:
1488 return selectXRayTypedEvent(II);
1491 return fastLowerIntrinsicCall(II);
1494 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1495 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1496 EVT DstVT = TLI.getValueType(DL, I->getType());
1498 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1499 !DstVT.isSimple())
1500 // Unhandled type. Halt "fast" selection and bail.
1501 return false;
1503 // Check if the destination type is legal.
1504 if (!TLI.isTypeLegal(DstVT))
1505 return false;
1507 // Check if the source operand is legal.
1508 if (!TLI.isTypeLegal(SrcVT))
1509 return false;
1511 unsigned InputReg = getRegForValue(I->getOperand(0));
1512 if (!InputReg)
1513 // Unhandled operand. Halt "fast" selection and bail.
1514 return false;
1516 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1518 unsigned ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1519 Opcode, InputReg, InputRegIsKill);
1520 if (!ResultReg)
1521 return false;
1523 updateValueMap(I, ResultReg);
1524 return true;
1527 bool FastISel::selectBitCast(const User *I) {
1528 // If the bitcast doesn't change the type, just use the operand value.
1529 if (I->getType() == I->getOperand(0)->getType()) {
1530 unsigned Reg = getRegForValue(I->getOperand(0));
1531 if (!Reg)
1532 return false;
1533 updateValueMap(I, Reg);
1534 return true;
1537 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1538 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1539 EVT DstEVT = TLI.getValueType(DL, I->getType());
1540 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1541 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1542 // Unhandled type. Halt "fast" selection and bail.
1543 return false;
1545 MVT SrcVT = SrcEVT.getSimpleVT();
1546 MVT DstVT = DstEVT.getSimpleVT();
1547 unsigned Op0 = getRegForValue(I->getOperand(0));
1548 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1549 return false;
1550 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1552 // First, try to perform the bitcast by inserting a reg-reg copy.
1553 unsigned ResultReg = 0;
1554 if (SrcVT == DstVT) {
1555 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1556 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1557 // Don't attempt a cross-class copy. It will likely fail.
1558 if (SrcClass == DstClass) {
1559 ResultReg = createResultReg(DstClass);
1560 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1561 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1565 // If the reg-reg copy failed, select a BITCAST opcode.
1566 if (!ResultReg)
1567 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1569 if (!ResultReg)
1570 return false;
1572 updateValueMap(I, ResultReg);
1573 return true;
1576 // Remove local value instructions starting from the instruction after
1577 // SavedLastLocalValue to the current function insert point.
1578 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1580 MachineInstr *CurLastLocalValue = getLastLocalValue();
1581 if (CurLastLocalValue != SavedLastLocalValue) {
1582 // Find the first local value instruction to be deleted.
1583 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1584 // Otherwise it's the first instruction in the block.
1585 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1586 if (SavedLastLocalValue)
1587 ++FirstDeadInst;
1588 else
1589 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1590 setLastLocalValue(SavedLastLocalValue);
1591 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1595 bool FastISel::selectInstruction(const Instruction *I) {
1596 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1597 // Just before the terminator instruction, insert instructions to
1598 // feed PHI nodes in successor blocks.
1599 if (I->isTerminator()) {
1600 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1601 // PHI node handling may have generated local value instructions,
1602 // even though it failed to handle all PHI nodes.
1603 // We remove these instructions because SelectionDAGISel will generate
1604 // them again.
1605 removeDeadLocalValueCode(SavedLastLocalValue);
1606 return false;
1610 // FastISel does not handle any operand bundles except OB_funclet.
1611 if (ImmutableCallSite CS = ImmutableCallSite(I))
1612 for (unsigned i = 0, e = CS.getNumOperandBundles(); i != e; ++i)
1613 if (CS.getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1614 return false;
1616 DbgLoc = I->getDebugLoc();
1618 SavedInsertPt = FuncInfo.InsertPt;
1620 if (const auto *Call = dyn_cast<CallInst>(I)) {
1621 const Function *F = Call->getCalledFunction();
1622 LibFunc Func;
1624 // As a special case, don't handle calls to builtin library functions that
1625 // may be translated directly to target instructions.
1626 if (F && !F->hasLocalLinkage() && F->hasName() &&
1627 LibInfo->getLibFunc(F->getName(), Func) &&
1628 LibInfo->hasOptimizedCodeGen(Func))
1629 return false;
1631 // Don't handle Intrinsic::trap if a trap function is specified.
1632 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1633 Call->hasFnAttr("trap-func-name"))
1634 return false;
1637 // First, try doing target-independent selection.
1638 if (!SkipTargetIndependentISel) {
1639 if (selectOperator(I, I->getOpcode())) {
1640 ++NumFastIselSuccessIndependent;
1641 DbgLoc = DebugLoc();
1642 return true;
1644 // Remove dead code.
1645 recomputeInsertPt();
1646 if (SavedInsertPt != FuncInfo.InsertPt)
1647 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1648 SavedInsertPt = FuncInfo.InsertPt;
1650 // Next, try calling the target to attempt to handle the instruction.
1651 if (fastSelectInstruction(I)) {
1652 ++NumFastIselSuccessTarget;
1653 DbgLoc = DebugLoc();
1654 return true;
1656 // Remove dead code.
1657 recomputeInsertPt();
1658 if (SavedInsertPt != FuncInfo.InsertPt)
1659 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1661 DbgLoc = DebugLoc();
1662 // Undo phi node updates, because they will be added again by SelectionDAG.
1663 if (I->isTerminator()) {
1664 // PHI node handling may have generated local value instructions.
1665 // We remove them because SelectionDAGISel will generate them again.
1666 removeDeadLocalValueCode(SavedLastLocalValue);
1667 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1669 return false;
1672 /// Emit an unconditional branch to the given block, unless it is the immediate
1673 /// (fall-through) successor, and update the CFG.
1674 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1675 const DebugLoc &DbgLoc) {
1676 if (FuncInfo.MBB->getBasicBlock()->size() > 1 &&
1677 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1678 // For more accurate line information if this is the only instruction
1679 // in the block then emit it, otherwise we have the unconditional
1680 // fall-through case, which needs no instructions.
1681 } else {
1682 // The unconditional branch case.
1683 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1684 SmallVector<MachineOperand, 0>(), DbgLoc);
1686 if (FuncInfo.BPI) {
1687 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1688 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1689 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1690 } else
1691 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1694 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1695 MachineBasicBlock *TrueMBB,
1696 MachineBasicBlock *FalseMBB) {
1697 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1698 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1699 // successor/predecessor lists.
1700 if (TrueMBB != FalseMBB) {
1701 if (FuncInfo.BPI) {
1702 auto BranchProbability =
1703 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1704 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1705 } else
1706 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1709 fastEmitBranch(FalseMBB, DbgLoc);
1712 /// Emit an FNeg operation.
1713 bool FastISel::selectFNeg(const User *I) {
1714 Value *X;
1715 if (!match(I, m_FNeg(m_Value(X))))
1716 return false;
1717 unsigned OpReg = getRegForValue(X);
1718 if (!OpReg)
1719 return false;
1720 bool OpRegIsKill = hasTrivialKill(I);
1722 // If the target has ISD::FNEG, use it.
1723 EVT VT = TLI.getValueType(DL, I->getType());
1724 unsigned ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1725 OpReg, OpRegIsKill);
1726 if (ResultReg) {
1727 updateValueMap(I, ResultReg);
1728 return true;
1731 // Bitcast the value to integer, twiddle the sign bit with xor,
1732 // and then bitcast it back to floating-point.
1733 if (VT.getSizeInBits() > 64)
1734 return false;
1735 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1736 if (!TLI.isTypeLegal(IntVT))
1737 return false;
1739 unsigned IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1740 ISD::BITCAST, OpReg, OpRegIsKill);
1741 if (!IntReg)
1742 return false;
1744 unsigned IntResultReg = fastEmit_ri_(
1745 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*IsKill=*/true,
1746 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1747 if (!IntResultReg)
1748 return false;
1750 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1751 IntResultReg, /*IsKill=*/true);
1752 if (!ResultReg)
1753 return false;
1755 updateValueMap(I, ResultReg);
1756 return true;
1759 bool FastISel::selectExtractValue(const User *U) {
1760 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1761 if (!EVI)
1762 return false;
1764 // Make sure we only try to handle extracts with a legal result. But also
1765 // allow i1 because it's easy.
1766 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1767 if (!RealVT.isSimple())
1768 return false;
1769 MVT VT = RealVT.getSimpleVT();
1770 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1771 return false;
1773 const Value *Op0 = EVI->getOperand(0);
1774 Type *AggTy = Op0->getType();
1776 // Get the base result register.
1777 unsigned ResultReg;
1778 DenseMap<const Value *, unsigned>::iterator I = FuncInfo.ValueMap.find(Op0);
1779 if (I != FuncInfo.ValueMap.end())
1780 ResultReg = I->second;
1781 else if (isa<Instruction>(Op0))
1782 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1783 else
1784 return false; // fast-isel can't handle aggregate constants at the moment
1786 // Get the actual result register, which is an offset from the base register.
1787 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1789 SmallVector<EVT, 4> AggValueVTs;
1790 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1792 for (unsigned i = 0; i < VTIndex; i++)
1793 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1795 updateValueMap(EVI, ResultReg);
1796 return true;
1799 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1800 switch (Opcode) {
1801 case Instruction::Add:
1802 return selectBinaryOp(I, ISD::ADD);
1803 case Instruction::FAdd:
1804 return selectBinaryOp(I, ISD::FADD);
1805 case Instruction::Sub:
1806 return selectBinaryOp(I, ISD::SUB);
1807 case Instruction::FSub:
1808 // FNeg is currently represented in LLVM IR as a special case of FSub.
1809 return selectFNeg(I) || selectBinaryOp(I, ISD::FSUB);
1810 case Instruction::Mul:
1811 return selectBinaryOp(I, ISD::MUL);
1812 case Instruction::FMul:
1813 return selectBinaryOp(I, ISD::FMUL);
1814 case Instruction::SDiv:
1815 return selectBinaryOp(I, ISD::SDIV);
1816 case Instruction::UDiv:
1817 return selectBinaryOp(I, ISD::UDIV);
1818 case Instruction::FDiv:
1819 return selectBinaryOp(I, ISD::FDIV);
1820 case Instruction::SRem:
1821 return selectBinaryOp(I, ISD::SREM);
1822 case Instruction::URem:
1823 return selectBinaryOp(I, ISD::UREM);
1824 case Instruction::FRem:
1825 return selectBinaryOp(I, ISD::FREM);
1826 case Instruction::Shl:
1827 return selectBinaryOp(I, ISD::SHL);
1828 case Instruction::LShr:
1829 return selectBinaryOp(I, ISD::SRL);
1830 case Instruction::AShr:
1831 return selectBinaryOp(I, ISD::SRA);
1832 case Instruction::And:
1833 return selectBinaryOp(I, ISD::AND);
1834 case Instruction::Or:
1835 return selectBinaryOp(I, ISD::OR);
1836 case Instruction::Xor:
1837 return selectBinaryOp(I, ISD::XOR);
1839 case Instruction::GetElementPtr:
1840 return selectGetElementPtr(I);
1842 case Instruction::Br: {
1843 const BranchInst *BI = cast<BranchInst>(I);
1845 if (BI->isUnconditional()) {
1846 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1847 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1848 fastEmitBranch(MSucc, BI->getDebugLoc());
1849 return true;
1852 // Conditional branches are not handed yet.
1853 // Halt "fast" selection and bail.
1854 return false;
1857 case Instruction::Unreachable:
1858 if (TM.Options.TrapUnreachable)
1859 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1860 else
1861 return true;
1863 case Instruction::Alloca:
1864 // FunctionLowering has the static-sized case covered.
1865 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1866 return true;
1868 // Dynamic-sized alloca is not handled yet.
1869 return false;
1871 case Instruction::Call:
1872 return selectCall(I);
1874 case Instruction::BitCast:
1875 return selectBitCast(I);
1877 case Instruction::FPToSI:
1878 return selectCast(I, ISD::FP_TO_SINT);
1879 case Instruction::ZExt:
1880 return selectCast(I, ISD::ZERO_EXTEND);
1881 case Instruction::SExt:
1882 return selectCast(I, ISD::SIGN_EXTEND);
1883 case Instruction::Trunc:
1884 return selectCast(I, ISD::TRUNCATE);
1885 case Instruction::SIToFP:
1886 return selectCast(I, ISD::SINT_TO_FP);
1888 case Instruction::IntToPtr: // Deliberate fall-through.
1889 case Instruction::PtrToInt: {
1890 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1891 EVT DstVT = TLI.getValueType(DL, I->getType());
1892 if (DstVT.bitsGT(SrcVT))
1893 return selectCast(I, ISD::ZERO_EXTEND);
1894 if (DstVT.bitsLT(SrcVT))
1895 return selectCast(I, ISD::TRUNCATE);
1896 unsigned Reg = getRegForValue(I->getOperand(0));
1897 if (!Reg)
1898 return false;
1899 updateValueMap(I, Reg);
1900 return true;
1903 case Instruction::ExtractValue:
1904 return selectExtractValue(I);
1906 case Instruction::PHI:
1907 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1909 default:
1910 // Unhandled instruction. Halt "fast" selection and bail.
1911 return false;
1915 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1916 const TargetLibraryInfo *LibInfo,
1917 bool SkipTargetIndependentISel)
1918 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1919 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1920 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1921 TII(*MF->getSubtarget().getInstrInfo()),
1922 TLI(*MF->getSubtarget().getTargetLowering()),
1923 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1924 SkipTargetIndependentISel(SkipTargetIndependentISel) {}
1926 FastISel::~FastISel() = default;
1928 bool FastISel::fastLowerArguments() { return false; }
1930 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1932 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1933 return false;
1936 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1938 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
1939 bool /*Op0IsKill*/) {
1940 return 0;
1943 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1944 bool /*Op0IsKill*/, unsigned /*Op1*/,
1945 bool /*Op1IsKill*/) {
1946 return 0;
1949 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1950 return 0;
1953 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1954 const ConstantFP * /*FPImm*/) {
1955 return 0;
1958 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1959 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1960 return 0;
1963 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1964 /// instruction with an immediate operand using fastEmit_ri.
1965 /// If that fails, it materializes the immediate into a register and try
1966 /// fastEmit_rr instead.
1967 unsigned FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1968 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
1969 // If this is a multiply by a power of two, emit this as a shift left.
1970 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1971 Opcode = ISD::SHL;
1972 Imm = Log2_64(Imm);
1973 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1974 // div x, 8 -> srl x, 3
1975 Opcode = ISD::SRL;
1976 Imm = Log2_64(Imm);
1979 // Horrible hack (to be removed), check to make sure shift amounts are
1980 // in-range.
1981 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1982 Imm >= VT.getSizeInBits())
1983 return 0;
1985 // First check if immediate type is legal. If not, we can't use the ri form.
1986 unsigned ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
1987 if (ResultReg)
1988 return ResultReg;
1989 unsigned MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1990 bool IsImmKill = true;
1991 if (!MaterialReg) {
1992 // This is a bit ugly/slow, but failing here means falling out of
1993 // fast-isel, which would be very slow.
1994 IntegerType *ITy =
1995 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1996 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1997 if (!MaterialReg)
1998 return 0;
1999 // FIXME: If the materialized register here has no uses yet then this
2000 // will be the first use and we should be able to mark it as killed.
2001 // However, the local value area for materialising constant expressions
2002 // grows down, not up, which means that any constant expressions we generate
2003 // later which also use 'Imm' could be after this instruction and therefore
2004 // after this kill.
2005 IsImmKill = false;
2007 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
2010 unsigned FastISel::createResultReg(const TargetRegisterClass *RC) {
2011 return MRI.createVirtualRegister(RC);
2014 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc &II, unsigned Op,
2015 unsigned OpNum) {
2016 if (TargetRegisterInfo::isVirtualRegister(Op)) {
2017 const TargetRegisterClass *RegClass =
2018 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
2019 if (!MRI.constrainRegClass(Op, RegClass)) {
2020 // If it's not legal to COPY between the register classes, something
2021 // has gone very wrong before we got here.
2022 unsigned NewOp = createResultReg(RegClass);
2023 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2024 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2025 return NewOp;
2028 return Op;
2031 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2032 const TargetRegisterClass *RC) {
2033 unsigned ResultReg = createResultReg(RC);
2034 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2036 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
2037 return ResultReg;
2040 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2041 const TargetRegisterClass *RC, unsigned Op0,
2042 bool Op0IsKill) {
2043 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2045 unsigned ResultReg = createResultReg(RC);
2046 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2048 if (II.getNumDefs() >= 1)
2049 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2050 .addReg(Op0, getKillRegState(Op0IsKill));
2051 else {
2052 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2053 .addReg(Op0, getKillRegState(Op0IsKill));
2054 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2055 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2058 return ResultReg;
2061 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2062 const TargetRegisterClass *RC, unsigned Op0,
2063 bool Op0IsKill, unsigned Op1,
2064 bool Op1IsKill) {
2065 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2067 unsigned ResultReg = createResultReg(RC);
2068 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2069 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2071 if (II.getNumDefs() >= 1)
2072 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2073 .addReg(Op0, getKillRegState(Op0IsKill))
2074 .addReg(Op1, getKillRegState(Op1IsKill));
2075 else {
2076 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2077 .addReg(Op0, getKillRegState(Op0IsKill))
2078 .addReg(Op1, getKillRegState(Op1IsKill));
2079 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2080 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2082 return ResultReg;
2085 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2086 const TargetRegisterClass *RC, unsigned Op0,
2087 bool Op0IsKill, unsigned Op1,
2088 bool Op1IsKill, unsigned Op2,
2089 bool Op2IsKill) {
2090 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2092 unsigned ResultReg = createResultReg(RC);
2093 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2094 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2095 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2097 if (II.getNumDefs() >= 1)
2098 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2099 .addReg(Op0, getKillRegState(Op0IsKill))
2100 .addReg(Op1, getKillRegState(Op1IsKill))
2101 .addReg(Op2, getKillRegState(Op2IsKill));
2102 else {
2103 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2104 .addReg(Op0, getKillRegState(Op0IsKill))
2105 .addReg(Op1, getKillRegState(Op1IsKill))
2106 .addReg(Op2, getKillRegState(Op2IsKill));
2107 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2108 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2110 return ResultReg;
2113 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2114 const TargetRegisterClass *RC, unsigned Op0,
2115 bool Op0IsKill, uint64_t Imm) {
2116 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2118 unsigned ResultReg = createResultReg(RC);
2119 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2121 if (II.getNumDefs() >= 1)
2122 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2123 .addReg(Op0, getKillRegState(Op0IsKill))
2124 .addImm(Imm);
2125 else {
2126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2127 .addReg(Op0, getKillRegState(Op0IsKill))
2128 .addImm(Imm);
2129 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2130 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2132 return ResultReg;
2135 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2136 const TargetRegisterClass *RC, unsigned Op0,
2137 bool Op0IsKill, uint64_t Imm1,
2138 uint64_t Imm2) {
2139 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2141 unsigned ResultReg = createResultReg(RC);
2142 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2144 if (II.getNumDefs() >= 1)
2145 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2146 .addReg(Op0, getKillRegState(Op0IsKill))
2147 .addImm(Imm1)
2148 .addImm(Imm2);
2149 else {
2150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2151 .addReg(Op0, getKillRegState(Op0IsKill))
2152 .addImm(Imm1)
2153 .addImm(Imm2);
2154 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2155 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2157 return ResultReg;
2160 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2161 const TargetRegisterClass *RC,
2162 const ConstantFP *FPImm) {
2163 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2165 unsigned ResultReg = createResultReg(RC);
2167 if (II.getNumDefs() >= 1)
2168 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2169 .addFPImm(FPImm);
2170 else {
2171 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2172 .addFPImm(FPImm);
2173 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2174 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2176 return ResultReg;
2179 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2180 const TargetRegisterClass *RC, unsigned Op0,
2181 bool Op0IsKill, unsigned Op1,
2182 bool Op1IsKill, uint64_t Imm) {
2183 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2185 unsigned ResultReg = createResultReg(RC);
2186 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2187 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2189 if (II.getNumDefs() >= 1)
2190 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2191 .addReg(Op0, getKillRegState(Op0IsKill))
2192 .addReg(Op1, getKillRegState(Op1IsKill))
2193 .addImm(Imm);
2194 else {
2195 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2196 .addReg(Op0, getKillRegState(Op0IsKill))
2197 .addReg(Op1, getKillRegState(Op1IsKill))
2198 .addImm(Imm);
2199 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2200 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2202 return ResultReg;
2205 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2206 const TargetRegisterClass *RC, uint64_t Imm) {
2207 unsigned ResultReg = createResultReg(RC);
2208 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2210 if (II.getNumDefs() >= 1)
2211 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2212 .addImm(Imm);
2213 else {
2214 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2215 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2216 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2218 return ResultReg;
2221 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2222 bool Op0IsKill, uint32_t Idx) {
2223 unsigned ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2224 assert(TargetRegisterInfo::isVirtualRegister(Op0) &&
2225 "Cannot yet extract from physregs");
2226 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2227 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2229 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2230 return ResultReg;
2233 /// Emit MachineInstrs to compute the value of Op with all but the least
2234 /// significant bit set to zero.
2235 unsigned FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2236 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2239 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2240 /// Emit code to ensure constants are copied into registers when needed.
2241 /// Remember the virtual registers that need to be added to the Machine PHI
2242 /// nodes as input. We cannot just directly add them, because expansion
2243 /// might result in multiple MBB's for one BB. As such, the start of the
2244 /// BB might correspond to a different MBB than the end.
2245 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2246 const Instruction *TI = LLVMBB->getTerminator();
2248 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2249 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2251 // Check successor nodes' PHI nodes that expect a constant to be available
2252 // from this block.
2253 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2254 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2255 if (!isa<PHINode>(SuccBB->begin()))
2256 continue;
2257 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2259 // If this terminator has multiple identical successors (common for
2260 // switches), only handle each succ once.
2261 if (!SuccsHandled.insert(SuccMBB).second)
2262 continue;
2264 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2266 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2267 // nodes and Machine PHI nodes, but the incoming operands have not been
2268 // emitted yet.
2269 for (const PHINode &PN : SuccBB->phis()) {
2270 // Ignore dead phi's.
2271 if (PN.use_empty())
2272 continue;
2274 // Only handle legal types. Two interesting things to note here. First,
2275 // by bailing out early, we may leave behind some dead instructions,
2276 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2277 // own moves. Second, this check is necessary because FastISel doesn't
2278 // use CreateRegs to create registers, so it always creates
2279 // exactly one register for each non-void instruction.
2280 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2281 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2282 // Handle integer promotions, though, because they're common and easy.
2283 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2284 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2285 return false;
2289 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2291 // Set the DebugLoc for the copy. Prefer the location of the operand
2292 // if there is one; use the location of the PHI otherwise.
2293 DbgLoc = PN.getDebugLoc();
2294 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2295 DbgLoc = Inst->getDebugLoc();
2297 unsigned Reg = getRegForValue(PHIOp);
2298 if (!Reg) {
2299 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2300 return false;
2302 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2303 DbgLoc = DebugLoc();
2307 return true;
2310 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2311 assert(LI->hasOneUse() &&
2312 "tryToFoldLoad expected a LoadInst with a single use");
2313 // We know that the load has a single use, but don't know what it is. If it
2314 // isn't one of the folded instructions, then we can't succeed here. Handle
2315 // this by scanning the single-use users of the load until we get to FoldInst.
2316 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2318 const Instruction *TheUser = LI->user_back();
2319 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2320 // Stay in the right block.
2321 TheUser->getParent() == FoldInst->getParent() &&
2322 --MaxUsers) { // Don't scan too far.
2323 // If there are multiple or no uses of this instruction, then bail out.
2324 if (!TheUser->hasOneUse())
2325 return false;
2327 TheUser = TheUser->user_back();
2330 // If we didn't find the fold instruction, then we failed to collapse the
2331 // sequence.
2332 if (TheUser != FoldInst)
2333 return false;
2335 // Don't try to fold volatile loads. Target has to deal with alignment
2336 // constraints.
2337 if (LI->isVolatile())
2338 return false;
2340 // Figure out which vreg this is going into. If there is no assigned vreg yet
2341 // then there actually was no reference to it. Perhaps the load is referenced
2342 // by a dead instruction.
2343 unsigned LoadReg = getRegForValue(LI);
2344 if (!LoadReg)
2345 return false;
2347 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2348 // may mean that the instruction got lowered to multiple MIs, or the use of
2349 // the loaded value ended up being multiple operands of the result.
2350 if (!MRI.hasOneUse(LoadReg))
2351 return false;
2353 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2354 MachineInstr *User = RI->getParent();
2356 // Set the insertion point properly. Folding the load can cause generation of
2357 // other random instructions (like sign extends) for addressing modes; make
2358 // sure they get inserted in a logical place before the new instruction.
2359 FuncInfo.InsertPt = User;
2360 FuncInfo.MBB = User->getParent();
2362 // Ask the target to try folding the load.
2363 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2366 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2367 // Must be an add.
2368 if (!isa<AddOperator>(Add))
2369 return false;
2370 // Type size needs to match.
2371 if (DL.getTypeSizeInBits(GEP->getType()) !=
2372 DL.getTypeSizeInBits(Add->getType()))
2373 return false;
2374 // Must be in the same basic block.
2375 if (isa<Instruction>(Add) &&
2376 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2377 return false;
2378 // Must have a constant operand.
2379 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2382 MachineMemOperand *
2383 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2384 const Value *Ptr;
2385 Type *ValTy;
2386 unsigned Alignment;
2387 MachineMemOperand::Flags Flags;
2388 bool IsVolatile;
2390 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2391 Alignment = LI->getAlignment();
2392 IsVolatile = LI->isVolatile();
2393 Flags = MachineMemOperand::MOLoad;
2394 Ptr = LI->getPointerOperand();
2395 ValTy = LI->getType();
2396 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2397 Alignment = SI->getAlignment();
2398 IsVolatile = SI->isVolatile();
2399 Flags = MachineMemOperand::MOStore;
2400 Ptr = SI->getPointerOperand();
2401 ValTy = SI->getValueOperand()->getType();
2402 } else
2403 return nullptr;
2405 bool IsNonTemporal = I->getMetadata(LLVMContext::MD_nontemporal) != nullptr;
2406 bool IsInvariant = I->getMetadata(LLVMContext::MD_invariant_load) != nullptr;
2407 bool IsDereferenceable =
2408 I->getMetadata(LLVMContext::MD_dereferenceable) != nullptr;
2409 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2411 AAMDNodes AAInfo;
2412 I->getAAMetadata(AAInfo);
2414 if (Alignment == 0) // Ensure that codegen never sees alignment 0.
2415 Alignment = DL.getABITypeAlignment(ValTy);
2417 unsigned Size = DL.getTypeStoreSize(ValTy);
2419 if (IsVolatile)
2420 Flags |= MachineMemOperand::MOVolatile;
2421 if (IsNonTemporal)
2422 Flags |= MachineMemOperand::MONonTemporal;
2423 if (IsDereferenceable)
2424 Flags |= MachineMemOperand::MODereferenceable;
2425 if (IsInvariant)
2426 Flags |= MachineMemOperand::MOInvariant;
2428 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2429 Alignment, AAInfo, Ranges);
2432 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2433 // If both operands are the same, then try to optimize or fold the cmp.
2434 CmpInst::Predicate Predicate = CI->getPredicate();
2435 if (CI->getOperand(0) != CI->getOperand(1))
2436 return Predicate;
2438 switch (Predicate) {
2439 default: llvm_unreachable("Invalid predicate!");
2440 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2441 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2442 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2443 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2444 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2445 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2446 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2447 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2448 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2449 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2450 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2451 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2452 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2453 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2454 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2455 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2457 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2458 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2459 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2460 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2461 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2462 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2463 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2464 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2465 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2466 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2469 return Predicate;