1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file contains the implementation of the FastISel class.
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time. For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support. In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated. Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time. Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators. More complicated operations currently require
37 // target-specific code.
39 //===----------------------------------------------------------------------===//
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/Optional.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallString.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Analysis/BranchProbabilityInfo.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/CodeGen/Analysis.h"
53 #include "llvm/CodeGen/FunctionLoweringInfo.h"
54 #include "llvm/CodeGen/ISDOpcodes.h"
55 #include "llvm/CodeGen/MachineBasicBlock.h"
56 #include "llvm/CodeGen/MachineFrameInfo.h"
57 #include "llvm/CodeGen/MachineInstr.h"
58 #include "llvm/CodeGen/MachineInstrBuilder.h"
59 #include "llvm/CodeGen/MachineMemOperand.h"
60 #include "llvm/CodeGen/MachineModuleInfo.h"
61 #include "llvm/CodeGen/MachineOperand.h"
62 #include "llvm/CodeGen/MachineRegisterInfo.h"
63 #include "llvm/CodeGen/StackMaps.h"
64 #include "llvm/CodeGen/TargetInstrInfo.h"
65 #include "llvm/CodeGen/TargetLowering.h"
66 #include "llvm/CodeGen/TargetSubtargetInfo.h"
67 #include "llvm/CodeGen/ValueTypes.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CallSite.h"
72 #include "llvm/IR/CallingConv.h"
73 #include "llvm/IR/Constant.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DebugInfo.h"
77 #include "llvm/IR/DebugLoc.h"
78 #include "llvm/IR/DerivedTypes.h"
79 #include "llvm/IR/Function.h"
80 #include "llvm/IR/GetElementPtrTypeIterator.h"
81 #include "llvm/IR/GlobalValue.h"
82 #include "llvm/IR/InlineAsm.h"
83 #include "llvm/IR/InstrTypes.h"
84 #include "llvm/IR/Instruction.h"
85 #include "llvm/IR/Instructions.h"
86 #include "llvm/IR/IntrinsicInst.h"
87 #include "llvm/IR/LLVMContext.h"
88 #include "llvm/IR/Mangler.h"
89 #include "llvm/IR/Metadata.h"
90 #include "llvm/IR/Operator.h"
91 #include "llvm/IR/PatternMatch.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/User.h"
94 #include "llvm/IR/Value.h"
95 #include "llvm/MC/MCContext.h"
96 #include "llvm/MC/MCInstrDesc.h"
97 #include "llvm/MC/MCRegisterInfo.h"
98 #include "llvm/Support/Casting.h"
99 #include "llvm/Support/Debug.h"
100 #include "llvm/Support/ErrorHandling.h"
101 #include "llvm/Support/MachineValueType.h"
102 #include "llvm/Support/MathExtras.h"
103 #include "llvm/Support/raw_ostream.h"
104 #include "llvm/Target/TargetMachine.h"
105 #include "llvm/Target/TargetOptions.h"
112 using namespace llvm
;
113 using namespace PatternMatch
;
115 #define DEBUG_TYPE "isel"
117 // FIXME: Remove this after the feature has proven reliable.
118 static cl::opt
<bool> SinkLocalValues("fast-isel-sink-local-values",
119 cl::init(true), cl::Hidden
,
120 cl::desc("Sink local values in FastISel"));
122 STATISTIC(NumFastIselSuccessIndependent
, "Number of insts selected by "
123 "target-independent selector");
124 STATISTIC(NumFastIselSuccessTarget
, "Number of insts selected by "
125 "target-specific selector");
126 STATISTIC(NumFastIselDead
, "Number of dead insts removed on failure");
128 /// Set the current block to which generated machine instructions will be
130 void FastISel::startNewBlock() {
131 assert(LocalValueMap
.empty() &&
132 "local values should be cleared after finishing a BB");
134 // Instructions are appended to FuncInfo.MBB. If the basic block already
135 // contains labels or copies, use the last instruction as the last local
137 EmitStartPt
= nullptr;
138 if (!FuncInfo
.MBB
->empty())
139 EmitStartPt
= &FuncInfo
.MBB
->back();
140 LastLocalValue
= EmitStartPt
;
143 /// Flush the local CSE map and sink anything we can.
144 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
146 bool FastISel::lowerArguments() {
147 if (!FuncInfo
.CanLowerReturn
)
148 // Fallback to SDISel argument lowering code to deal with sret pointer
152 if (!fastLowerArguments())
155 // Enter arguments into ValueMap for uses in non-entry BBs.
156 for (Function::const_arg_iterator I
= FuncInfo
.Fn
->arg_begin(),
157 E
= FuncInfo
.Fn
->arg_end();
159 DenseMap
<const Value
*, unsigned>::iterator VI
= LocalValueMap
.find(&*I
);
160 assert(VI
!= LocalValueMap
.end() && "Missed an argument?");
161 FuncInfo
.ValueMap
[&*I
] = VI
->second
;
166 /// Return the defined register if this instruction defines exactly one
167 /// virtual register and uses no other virtual registers. Otherwise return 0.
168 static unsigned findSinkableLocalRegDef(MachineInstr
&MI
) {
170 for (const MachineOperand
&MO
: MI
.operands()) {
176 RegDef
= MO
.getReg();
177 } else if (Register::isVirtualRegister(MO
.getReg())) {
178 // This is another use of a vreg. Don't try to sink it.
185 void FastISel::flushLocalValueMap() {
186 // Try to sink local values down to their first use so that we can give them a
187 // better debug location. This has the side effect of shrinking local value
188 // live ranges, which helps out fast regalloc.
189 if (SinkLocalValues
&& LastLocalValue
!= EmitStartPt
) {
190 // Sink local value materialization instructions between EmitStartPt and
191 // LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to
192 // avoid inserting into the range that we're iterating over.
193 MachineBasicBlock::reverse_iterator RE
=
194 EmitStartPt
? MachineBasicBlock::reverse_iterator(EmitStartPt
)
195 : FuncInfo
.MBB
->rend();
196 MachineBasicBlock::reverse_iterator
RI(LastLocalValue
);
198 InstOrderMap OrderMap
;
200 MachineInstr
&LocalMI
= *RI
;
203 if (!LocalMI
.isSafeToMove(nullptr, Store
))
205 unsigned DefReg
= findSinkableLocalRegDef(LocalMI
);
209 sinkLocalValueMaterialization(LocalMI
, DefReg
, OrderMap
);
213 LocalValueMap
.clear();
214 LastLocalValue
= EmitStartPt
;
216 SavedInsertPt
= FuncInfo
.InsertPt
;
217 LastFlushPoint
= FuncInfo
.InsertPt
;
220 static bool isRegUsedByPhiNodes(unsigned DefReg
,
221 FunctionLoweringInfo
&FuncInfo
) {
222 for (auto &P
: FuncInfo
.PHINodesToUpdate
)
223 if (P
.second
== DefReg
)
228 /// Build a map of instruction orders. Return the first terminator and its
229 /// order. Consider EH_LABEL instructions to be terminators as well, since local
230 /// values for phis after invokes must be materialized before the call.
231 void FastISel::InstOrderMap::initialize(
232 MachineBasicBlock
*MBB
, MachineBasicBlock::iterator LastFlushPoint
) {
234 for (MachineInstr
&I
: *MBB
) {
235 if (!FirstTerminator
&&
236 (I
.isTerminator() || (I
.isEHLabel() && &I
!= &MBB
->front()))) {
237 FirstTerminator
= &I
;
238 FirstTerminatorOrder
= Order
;
240 Orders
[&I
] = Order
++;
242 // We don't need to order instructions past the last flush point.
243 if (I
.getIterator() == LastFlushPoint
)
248 void FastISel::sinkLocalValueMaterialization(MachineInstr
&LocalMI
,
250 InstOrderMap
&OrderMap
) {
251 // If this register is used by a register fixup, MRI will not contain all
252 // the uses until after register fixups, so don't attempt to sink or DCE
253 // this instruction. Register fixups typically come from no-op cast
254 // instructions, which replace the cast instruction vreg with the local
256 if (FuncInfo
.RegsWithFixups
.count(DefReg
))
259 // We can DCE this instruction if there are no uses and it wasn't a
260 // materialized for a successor PHI node.
261 bool UsedByPHI
= isRegUsedByPhiNodes(DefReg
, FuncInfo
);
262 if (!UsedByPHI
&& MRI
.use_nodbg_empty(DefReg
)) {
263 if (EmitStartPt
== &LocalMI
)
264 EmitStartPt
= EmitStartPt
->getPrevNode();
265 LLVM_DEBUG(dbgs() << "removing dead local value materialization "
267 OrderMap
.Orders
.erase(&LocalMI
);
268 LocalMI
.eraseFromParent();
272 // Number the instructions if we haven't yet so we can efficiently find the
274 if (OrderMap
.Orders
.empty())
275 OrderMap
.initialize(FuncInfo
.MBB
, LastFlushPoint
);
277 // Find the first user in the BB.
278 MachineInstr
*FirstUser
= nullptr;
279 unsigned FirstOrder
= std::numeric_limits
<unsigned>::max();
280 for (MachineInstr
&UseInst
: MRI
.use_nodbg_instructions(DefReg
)) {
281 auto I
= OrderMap
.Orders
.find(&UseInst
);
282 assert(I
!= OrderMap
.Orders
.end() &&
283 "local value used by instruction outside local region");
284 unsigned UseOrder
= I
->second
;
285 if (UseOrder
< FirstOrder
) {
286 FirstOrder
= UseOrder
;
287 FirstUser
= &UseInst
;
291 // The insertion point will be the first terminator or the first user,
292 // whichever came first. If there was no terminator, this must be a
293 // fallthrough block and the insertion point is the end of the block.
294 MachineBasicBlock::instr_iterator SinkPos
;
295 if (UsedByPHI
&& OrderMap
.FirstTerminatorOrder
< FirstOrder
) {
296 FirstOrder
= OrderMap
.FirstTerminatorOrder
;
297 SinkPos
= OrderMap
.FirstTerminator
->getIterator();
298 } else if (FirstUser
) {
299 SinkPos
= FirstUser
->getIterator();
301 assert(UsedByPHI
&& "must be users if not used by a phi");
302 SinkPos
= FuncInfo
.MBB
->instr_end();
305 // Collect all DBG_VALUEs before the new insertion position so that we can
307 SmallVector
<MachineInstr
*, 1> DbgValues
;
308 for (MachineInstr
&DbgVal
: MRI
.use_instructions(DefReg
)) {
309 if (!DbgVal
.isDebugValue())
311 unsigned UseOrder
= OrderMap
.Orders
[&DbgVal
];
312 if (UseOrder
< FirstOrder
)
313 DbgValues
.push_back(&DbgVal
);
316 // Sink LocalMI before SinkPos and assign it the same DebugLoc.
317 LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI
);
318 FuncInfo
.MBB
->remove(&LocalMI
);
319 FuncInfo
.MBB
->insert(SinkPos
, &LocalMI
);
320 if (SinkPos
!= FuncInfo
.MBB
->end())
321 LocalMI
.setDebugLoc(SinkPos
->getDebugLoc());
323 // Sink any debug values that we've collected.
324 for (MachineInstr
*DI
: DbgValues
) {
325 FuncInfo
.MBB
->remove(DI
);
326 FuncInfo
.MBB
->insert(SinkPos
, DI
);
330 bool FastISel::hasTrivialKill(const Value
*V
) {
331 // Don't consider constants or arguments to have trivial kills.
332 const Instruction
*I
= dyn_cast
<Instruction
>(V
);
336 // No-op casts are trivially coalesced by fast-isel.
337 if (const auto *Cast
= dyn_cast
<CastInst
>(I
))
338 if (Cast
->isNoopCast(DL
) && !hasTrivialKill(Cast
->getOperand(0)))
341 // Even the value might have only one use in the LLVM IR, it is possible that
342 // FastISel might fold the use into another instruction and now there is more
343 // than one use at the Machine Instruction level.
344 unsigned Reg
= lookUpRegForValue(V
);
345 if (Reg
&& !MRI
.use_empty(Reg
))
348 // GEPs with all zero indices are trivially coalesced by fast-isel.
349 if (const auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
))
350 if (GEP
->hasAllZeroIndices() && !hasTrivialKill(GEP
->getOperand(0)))
353 // Only instructions with a single use in the same basic block are considered
354 // to have trivial kills.
355 return I
->hasOneUse() &&
356 !(I
->getOpcode() == Instruction::BitCast
||
357 I
->getOpcode() == Instruction::PtrToInt
||
358 I
->getOpcode() == Instruction::IntToPtr
) &&
359 cast
<Instruction
>(*I
->user_begin())->getParent() == I
->getParent();
362 unsigned FastISel::getRegForValue(const Value
*V
) {
363 EVT RealVT
= TLI
.getValueType(DL
, V
->getType(), /*AllowUnknown=*/true);
364 // Don't handle non-simple values in FastISel.
365 if (!RealVT
.isSimple())
368 // Ignore illegal types. We must do this before looking up the value
369 // in ValueMap because Arguments are given virtual registers regardless
370 // of whether FastISel can handle them.
371 MVT VT
= RealVT
.getSimpleVT();
372 if (!TLI
.isTypeLegal(VT
)) {
373 // Handle integer promotions, though, because they're common and easy.
374 if (VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)
375 VT
= TLI
.getTypeToTransformTo(V
->getContext(), VT
).getSimpleVT();
380 // Look up the value to see if we already have a register for it.
381 unsigned Reg
= lookUpRegForValue(V
);
385 // In bottom-up mode, just create the virtual register which will be used
386 // to hold the value. It will be materialized later.
387 if (isa
<Instruction
>(V
) &&
388 (!isa
<AllocaInst
>(V
) ||
389 !FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(V
))))
390 return FuncInfo
.InitializeRegForValue(V
);
392 SavePoint SaveInsertPt
= enterLocalValueArea();
394 // Materialize the value in a register. Emit any instructions in the
396 Reg
= materializeRegForValue(V
, VT
);
398 leaveLocalValueArea(SaveInsertPt
);
403 unsigned FastISel::materializeConstant(const Value
*V
, MVT VT
) {
405 if (const auto *CI
= dyn_cast
<ConstantInt
>(V
)) {
406 if (CI
->getValue().getActiveBits() <= 64)
407 Reg
= fastEmit_i(VT
, VT
, ISD::Constant
, CI
->getZExtValue());
408 } else if (isa
<AllocaInst
>(V
))
409 Reg
= fastMaterializeAlloca(cast
<AllocaInst
>(V
));
410 else if (isa
<ConstantPointerNull
>(V
))
411 // Translate this as an integer zero so that it can be
412 // local-CSE'd with actual integer zeros.
413 Reg
= getRegForValue(
414 Constant::getNullValue(DL
.getIntPtrType(V
->getContext())));
415 else if (const auto *CF
= dyn_cast
<ConstantFP
>(V
)) {
416 if (CF
->isNullValue())
417 Reg
= fastMaterializeFloatZero(CF
);
419 // Try to emit the constant directly.
420 Reg
= fastEmit_f(VT
, VT
, ISD::ConstantFP
, CF
);
423 // Try to emit the constant by using an integer constant with a cast.
424 const APFloat
&Flt
= CF
->getValueAPF();
425 EVT IntVT
= TLI
.getPointerTy(DL
);
426 uint32_t IntBitWidth
= IntVT
.getSizeInBits();
427 APSInt
SIntVal(IntBitWidth
, /*isUnsigned=*/false);
429 (void)Flt
.convertToInteger(SIntVal
, APFloat::rmTowardZero
, &isExact
);
431 unsigned IntegerReg
=
432 getRegForValue(ConstantInt::get(V
->getContext(), SIntVal
));
434 Reg
= fastEmit_r(IntVT
.getSimpleVT(), VT
, ISD::SINT_TO_FP
, IntegerReg
,
438 } else if (const auto *Op
= dyn_cast
<Operator
>(V
)) {
439 if (!selectOperator(Op
, Op
->getOpcode()))
440 if (!isa
<Instruction
>(Op
) ||
441 !fastSelectInstruction(cast
<Instruction
>(Op
)))
443 Reg
= lookUpRegForValue(Op
);
444 } else if (isa
<UndefValue
>(V
)) {
445 Reg
= createResultReg(TLI
.getRegClassFor(VT
));
446 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
447 TII
.get(TargetOpcode::IMPLICIT_DEF
), Reg
);
452 /// Helper for getRegForValue. This function is called when the value isn't
453 /// already available in a register and must be materialized with new
455 unsigned FastISel::materializeRegForValue(const Value
*V
, MVT VT
) {
457 // Give the target-specific code a try first.
458 if (isa
<Constant
>(V
))
459 Reg
= fastMaterializeConstant(cast
<Constant
>(V
));
461 // If target-specific code couldn't or didn't want to handle the value, then
462 // give target-independent code a try.
464 Reg
= materializeConstant(V
, VT
);
466 // Don't cache constant materializations in the general ValueMap.
467 // To do so would require tracking what uses they dominate.
469 LocalValueMap
[V
] = Reg
;
470 LastLocalValue
= MRI
.getVRegDef(Reg
);
475 unsigned FastISel::lookUpRegForValue(const Value
*V
) {
476 // Look up the value to see if we already have a register for it. We
477 // cache values defined by Instructions across blocks, and other values
478 // only locally. This is because Instructions already have the SSA
479 // def-dominates-use requirement enforced.
480 DenseMap
<const Value
*, unsigned>::iterator I
= FuncInfo
.ValueMap
.find(V
);
481 if (I
!= FuncInfo
.ValueMap
.end())
483 return LocalValueMap
[V
];
486 void FastISel::updateValueMap(const Value
*I
, unsigned Reg
, unsigned NumRegs
) {
487 if (!isa
<Instruction
>(I
)) {
488 LocalValueMap
[I
] = Reg
;
492 unsigned &AssignedReg
= FuncInfo
.ValueMap
[I
];
493 if (AssignedReg
== 0)
494 // Use the new register.
496 else if (Reg
!= AssignedReg
) {
497 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
498 for (unsigned i
= 0; i
< NumRegs
; i
++) {
499 FuncInfo
.RegFixups
[AssignedReg
+ i
] = Reg
+ i
;
500 FuncInfo
.RegsWithFixups
.insert(Reg
+ i
);
507 std::pair
<unsigned, bool> FastISel::getRegForGEPIndex(const Value
*Idx
) {
508 unsigned IdxN
= getRegForValue(Idx
);
510 // Unhandled operand. Halt "fast" selection and bail.
511 return std::pair
<unsigned, bool>(0, false);
513 bool IdxNIsKill
= hasTrivialKill(Idx
);
515 // If the index is smaller or larger than intptr_t, truncate or extend it.
516 MVT PtrVT
= TLI
.getPointerTy(DL
);
517 EVT IdxVT
= EVT::getEVT(Idx
->getType(), /*HandleUnknown=*/false);
518 if (IdxVT
.bitsLT(PtrVT
)) {
519 IdxN
= fastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::SIGN_EXTEND
, IdxN
,
522 } else if (IdxVT
.bitsGT(PtrVT
)) {
524 fastEmit_r(IdxVT
.getSimpleVT(), PtrVT
, ISD::TRUNCATE
, IdxN
, IdxNIsKill
);
527 return std::pair
<unsigned, bool>(IdxN
, IdxNIsKill
);
530 void FastISel::recomputeInsertPt() {
531 if (getLastLocalValue()) {
532 FuncInfo
.InsertPt
= getLastLocalValue();
533 FuncInfo
.MBB
= FuncInfo
.InsertPt
->getParent();
536 FuncInfo
.InsertPt
= FuncInfo
.MBB
->getFirstNonPHI();
538 // Now skip past any EH_LABELs, which must remain at the beginning.
539 while (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->end() &&
540 FuncInfo
.InsertPt
->getOpcode() == TargetOpcode::EH_LABEL
)
544 void FastISel::removeDeadCode(MachineBasicBlock::iterator I
,
545 MachineBasicBlock::iterator E
) {
546 assert(I
.isValid() && E
.isValid() && std::distance(I
, E
) > 0 &&
547 "Invalid iterator!");
549 if (LastFlushPoint
== I
)
551 if (SavedInsertPt
== I
)
553 if (EmitStartPt
== I
)
554 EmitStartPt
= E
.isValid() ? &*E
: nullptr;
555 if (LastLocalValue
== I
)
556 LastLocalValue
= E
.isValid() ? &*E
: nullptr;
558 MachineInstr
*Dead
= &*I
;
560 Dead
->eraseFromParent();
566 FastISel::SavePoint
FastISel::enterLocalValueArea() {
567 MachineBasicBlock::iterator OldInsertPt
= FuncInfo
.InsertPt
;
568 DebugLoc OldDL
= DbgLoc
;
571 SavePoint SP
= {OldInsertPt
, OldDL
};
575 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt
) {
576 if (FuncInfo
.InsertPt
!= FuncInfo
.MBB
->begin())
577 LastLocalValue
= &*std::prev(FuncInfo
.InsertPt
);
579 // Restore the previous insert position.
580 FuncInfo
.InsertPt
= OldInsertPt
.InsertPt
;
581 DbgLoc
= OldInsertPt
.DL
;
584 bool FastISel::selectBinaryOp(const User
*I
, unsigned ISDOpcode
) {
585 EVT VT
= EVT::getEVT(I
->getType(), /*HandleUnknown=*/true);
586 if (VT
== MVT::Other
|| !VT
.isSimple())
587 // Unhandled type. Halt "fast" selection and bail.
590 // We only handle legal types. For example, on x86-32 the instruction
591 // selector contains all of the 64-bit instructions from x86-64,
592 // under the assumption that i64 won't be used if the target doesn't
594 if (!TLI
.isTypeLegal(VT
)) {
595 // MVT::i1 is special. Allow AND, OR, or XOR because they
596 // don't require additional zeroing, which makes them easy.
597 if (VT
== MVT::i1
&& (ISDOpcode
== ISD::AND
|| ISDOpcode
== ISD::OR
||
598 ISDOpcode
== ISD::XOR
))
599 VT
= TLI
.getTypeToTransformTo(I
->getContext(), VT
);
604 // Check if the first operand is a constant, and handle it as "ri". At -O0,
605 // we don't have anything that canonicalizes operand order.
606 if (const auto *CI
= dyn_cast
<ConstantInt
>(I
->getOperand(0)))
607 if (isa
<Instruction
>(I
) && cast
<Instruction
>(I
)->isCommutative()) {
608 unsigned Op1
= getRegForValue(I
->getOperand(1));
611 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
614 fastEmit_ri_(VT
.getSimpleVT(), ISDOpcode
, Op1
, Op1IsKill
,
615 CI
->getZExtValue(), VT
.getSimpleVT());
619 // We successfully emitted code for the given LLVM Instruction.
620 updateValueMap(I
, ResultReg
);
624 unsigned Op0
= getRegForValue(I
->getOperand(0));
625 if (!Op0
) // Unhandled operand. Halt "fast" selection and bail.
627 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
629 // Check if the second operand is a constant and handle it appropriately.
630 if (const auto *CI
= dyn_cast
<ConstantInt
>(I
->getOperand(1))) {
631 uint64_t Imm
= CI
->getSExtValue();
633 // Transform "sdiv exact X, 8" -> "sra X, 3".
634 if (ISDOpcode
== ISD::SDIV
&& isa
<BinaryOperator
>(I
) &&
635 cast
<BinaryOperator
>(I
)->isExact() && isPowerOf2_64(Imm
)) {
637 ISDOpcode
= ISD::SRA
;
640 // Transform "urem x, pow2" -> "and x, pow2-1".
641 if (ISDOpcode
== ISD::UREM
&& isa
<BinaryOperator
>(I
) &&
642 isPowerOf2_64(Imm
)) {
644 ISDOpcode
= ISD::AND
;
647 unsigned ResultReg
= fastEmit_ri_(VT
.getSimpleVT(), ISDOpcode
, Op0
,
648 Op0IsKill
, Imm
, VT
.getSimpleVT());
652 // We successfully emitted code for the given LLVM Instruction.
653 updateValueMap(I
, ResultReg
);
657 unsigned Op1
= getRegForValue(I
->getOperand(1));
658 if (!Op1
) // Unhandled operand. Halt "fast" selection and bail.
660 bool Op1IsKill
= hasTrivialKill(I
->getOperand(1));
662 // Now we have both operands in registers. Emit the instruction.
663 unsigned ResultReg
= fastEmit_rr(VT
.getSimpleVT(), VT
.getSimpleVT(),
664 ISDOpcode
, Op0
, Op0IsKill
, Op1
, Op1IsKill
);
666 // Target-specific code wasn't able to find a machine opcode for
667 // the given ISD opcode and type. Halt "fast" selection and bail.
670 // We successfully emitted code for the given LLVM Instruction.
671 updateValueMap(I
, ResultReg
);
675 bool FastISel::selectGetElementPtr(const User
*I
) {
676 unsigned N
= getRegForValue(I
->getOperand(0));
677 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
679 bool NIsKill
= hasTrivialKill(I
->getOperand(0));
681 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
682 // into a single N = N + TotalOffset.
683 uint64_t TotalOffs
= 0;
684 // FIXME: What's a good SWAG number for MaxOffs?
685 uint64_t MaxOffs
= 2048;
686 MVT VT
= TLI
.getPointerTy(DL
);
687 for (gep_type_iterator GTI
= gep_type_begin(I
), E
= gep_type_end(I
);
689 const Value
*Idx
= GTI
.getOperand();
690 if (StructType
*StTy
= GTI
.getStructTypeOrNull()) {
691 uint64_t Field
= cast
<ConstantInt
>(Idx
)->getZExtValue();
694 TotalOffs
+= DL
.getStructLayout(StTy
)->getElementOffset(Field
);
695 if (TotalOffs
>= MaxOffs
) {
696 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, TotalOffs
, VT
);
697 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
704 Type
*Ty
= GTI
.getIndexedType();
706 // If this is a constant subscript, handle it quickly.
707 if (const auto *CI
= dyn_cast
<ConstantInt
>(Idx
)) {
711 uint64_t IdxN
= CI
->getValue().sextOrTrunc(64).getSExtValue();
712 TotalOffs
+= DL
.getTypeAllocSize(Ty
) * IdxN
;
713 if (TotalOffs
>= MaxOffs
) {
714 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, TotalOffs
, VT
);
715 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
723 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, TotalOffs
, VT
);
724 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
730 // N = N + Idx * ElementSize;
731 uint64_t ElementSize
= DL
.getTypeAllocSize(Ty
);
732 std::pair
<unsigned, bool> Pair
= getRegForGEPIndex(Idx
);
733 unsigned IdxN
= Pair
.first
;
734 bool IdxNIsKill
= Pair
.second
;
735 if (!IdxN
) // Unhandled operand. Halt "fast" selection and bail.
738 if (ElementSize
!= 1) {
739 IdxN
= fastEmit_ri_(VT
, ISD::MUL
, IdxN
, IdxNIsKill
, ElementSize
, VT
);
740 if (!IdxN
) // Unhandled operand. Halt "fast" selection and bail.
744 N
= fastEmit_rr(VT
, VT
, ISD::ADD
, N
, NIsKill
, IdxN
, IdxNIsKill
);
745 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
750 N
= fastEmit_ri_(VT
, ISD::ADD
, N
, NIsKill
, TotalOffs
, VT
);
751 if (!N
) // Unhandled operand. Halt "fast" selection and bail.
755 // We successfully emitted code for the given LLVM Instruction.
756 updateValueMap(I
, N
);
760 bool FastISel::addStackMapLiveVars(SmallVectorImpl
<MachineOperand
> &Ops
,
761 const CallInst
*CI
, unsigned StartIdx
) {
762 for (unsigned i
= StartIdx
, e
= CI
->getNumArgOperands(); i
!= e
; ++i
) {
763 Value
*Val
= CI
->getArgOperand(i
);
764 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
765 if (const auto *C
= dyn_cast
<ConstantInt
>(Val
)) {
766 Ops
.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp
));
767 Ops
.push_back(MachineOperand::CreateImm(C
->getSExtValue()));
768 } else if (isa
<ConstantPointerNull
>(Val
)) {
769 Ops
.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp
));
770 Ops
.push_back(MachineOperand::CreateImm(0));
771 } else if (auto *AI
= dyn_cast
<AllocaInst
>(Val
)) {
772 // Values coming from a stack location also require a special encoding,
773 // but that is added later on by the target specific frame index
774 // elimination implementation.
775 auto SI
= FuncInfo
.StaticAllocaMap
.find(AI
);
776 if (SI
!= FuncInfo
.StaticAllocaMap
.end())
777 Ops
.push_back(MachineOperand::CreateFI(SI
->second
));
781 unsigned Reg
= getRegForValue(Val
);
784 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/false));
790 bool FastISel::selectStackmap(const CallInst
*I
) {
791 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
792 // [live variables...])
793 assert(I
->getCalledFunction()->getReturnType()->isVoidTy() &&
794 "Stackmap cannot return a value.");
796 // The stackmap intrinsic only records the live variables (the arguments
797 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
798 // intrinsic, this won't be lowered to a function call. This means we don't
799 // have to worry about calling conventions and target-specific lowering code.
800 // Instead we perform the call lowering right here.
802 // CALLSEQ_START(0, 0...)
803 // STACKMAP(id, nbytes, ...)
806 SmallVector
<MachineOperand
, 32> Ops
;
808 // Add the <id> and <numBytes> constants.
809 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
)) &&
810 "Expected a constant integer.");
811 const auto *ID
= cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
));
812 Ops
.push_back(MachineOperand::CreateImm(ID
->getZExtValue()));
814 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
)) &&
815 "Expected a constant integer.");
816 const auto *NumBytes
=
817 cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
));
818 Ops
.push_back(MachineOperand::CreateImm(NumBytes
->getZExtValue()));
820 // Push live variables for the stack map (skipping the first two arguments
821 // <id> and <numBytes>).
822 if (!addStackMapLiveVars(Ops
, I
, 2))
825 // We are not adding any register mask info here, because the stackmap doesn't
828 // Add scratch registers as implicit def and early clobber.
829 CallingConv::ID CC
= I
->getCallingConv();
830 const MCPhysReg
*ScratchRegs
= TLI
.getScratchRegisters(CC
);
831 for (unsigned i
= 0; ScratchRegs
[i
]; ++i
)
832 Ops
.push_back(MachineOperand::CreateReg(
833 ScratchRegs
[i
], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
834 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
836 // Issue CALLSEQ_START
837 unsigned AdjStackDown
= TII
.getCallFrameSetupOpcode();
839 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackDown
));
840 const MCInstrDesc
&MCID
= Builder
.getInstr()->getDesc();
841 for (unsigned I
= 0, E
= MCID
.getNumOperands(); I
< E
; ++I
)
845 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
846 TII
.get(TargetOpcode::STACKMAP
));
847 for (auto const &MO
: Ops
)
851 unsigned AdjStackUp
= TII
.getCallFrameDestroyOpcode();
852 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(AdjStackUp
))
856 // Inform the Frame Information that we have a stackmap in this function.
857 FuncInfo
.MF
->getFrameInfo().setHasStackMap();
862 /// Lower an argument list according to the target calling convention.
864 /// This is a helper for lowering intrinsics that follow a target calling
865 /// convention or require stack pointer adjustment. Only a subset of the
866 /// intrinsic's operands need to participate in the calling convention.
867 bool FastISel::lowerCallOperands(const CallInst
*CI
, unsigned ArgIdx
,
868 unsigned NumArgs
, const Value
*Callee
,
869 bool ForceRetVoidTy
, CallLoweringInfo
&CLI
) {
871 Args
.reserve(NumArgs
);
873 // Populate the argument list.
874 ImmutableCallSite
CS(CI
);
875 for (unsigned ArgI
= ArgIdx
, ArgE
= ArgIdx
+ NumArgs
; ArgI
!= ArgE
; ++ArgI
) {
876 Value
*V
= CI
->getOperand(ArgI
);
878 assert(!V
->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
882 Entry
.Ty
= V
->getType();
883 Entry
.setAttributes(&CS
, ArgI
);
884 Args
.push_back(Entry
);
887 Type
*RetTy
= ForceRetVoidTy
? Type::getVoidTy(CI
->getType()->getContext())
889 CLI
.setCallee(CI
->getCallingConv(), RetTy
, Callee
, std::move(Args
), NumArgs
);
891 return lowerCallTo(CLI
);
894 FastISel::CallLoweringInfo
&FastISel::CallLoweringInfo::setCallee(
895 const DataLayout
&DL
, MCContext
&Ctx
, CallingConv::ID CC
, Type
*ResultTy
,
896 StringRef Target
, ArgListTy
&&ArgsList
, unsigned FixedArgs
) {
897 SmallString
<32> MangledName
;
898 Mangler::getNameWithPrefix(MangledName
, Target
, DL
);
899 MCSymbol
*Sym
= Ctx
.getOrCreateSymbol(MangledName
);
900 return setCallee(CC
, ResultTy
, Sym
, std::move(ArgsList
), FixedArgs
);
903 bool FastISel::selectPatchpoint(const CallInst
*I
) {
904 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
909 // [live variables...])
910 CallingConv::ID CC
= I
->getCallingConv();
911 bool IsAnyRegCC
= CC
== CallingConv::AnyReg
;
912 bool HasDef
= !I
->getType()->isVoidTy();
913 Value
*Callee
= I
->getOperand(PatchPointOpers::TargetPos
)->stripPointerCasts();
915 // Get the real number of arguments participating in the call <numArgs>
916 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::NArgPos
)) &&
917 "Expected a constant integer.");
918 const auto *NumArgsVal
=
919 cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::NArgPos
));
920 unsigned NumArgs
= NumArgsVal
->getZExtValue();
922 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
923 // This includes all meta-operands up to but not including CC.
924 unsigned NumMetaOpers
= PatchPointOpers::CCPos
;
925 assert(I
->getNumArgOperands() >= NumMetaOpers
+ NumArgs
&&
926 "Not enough arguments provided to the patchpoint intrinsic");
928 // For AnyRegCC the arguments are lowered later on manually.
929 unsigned NumCallArgs
= IsAnyRegCC
? 0 : NumArgs
;
930 CallLoweringInfo CLI
;
931 CLI
.setIsPatchPoint();
932 if (!lowerCallOperands(I
, NumMetaOpers
, NumCallArgs
, Callee
, IsAnyRegCC
, CLI
))
935 assert(CLI
.Call
&& "No call instruction specified.");
937 SmallVector
<MachineOperand
, 32> Ops
;
939 // Add an explicit result reg if we use the anyreg calling convention.
940 if (IsAnyRegCC
&& HasDef
) {
941 assert(CLI
.NumResultRegs
== 0 && "Unexpected result register.");
942 CLI
.ResultReg
= createResultReg(TLI
.getRegClassFor(MVT::i64
));
943 CLI
.NumResultRegs
= 1;
944 Ops
.push_back(MachineOperand::CreateReg(CLI
.ResultReg
, /*isDef=*/true));
947 // Add the <id> and <numBytes> constants.
948 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
)) &&
949 "Expected a constant integer.");
950 const auto *ID
= cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::IDPos
));
951 Ops
.push_back(MachineOperand::CreateImm(ID
->getZExtValue()));
953 assert(isa
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
)) &&
954 "Expected a constant integer.");
955 const auto *NumBytes
=
956 cast
<ConstantInt
>(I
->getOperand(PatchPointOpers::NBytesPos
));
957 Ops
.push_back(MachineOperand::CreateImm(NumBytes
->getZExtValue()));
959 // Add the call target.
960 if (const auto *C
= dyn_cast
<IntToPtrInst
>(Callee
)) {
961 uint64_t CalleeConstAddr
=
962 cast
<ConstantInt
>(C
->getOperand(0))->getZExtValue();
963 Ops
.push_back(MachineOperand::CreateImm(CalleeConstAddr
));
964 } else if (const auto *C
= dyn_cast
<ConstantExpr
>(Callee
)) {
965 if (C
->getOpcode() == Instruction::IntToPtr
) {
966 uint64_t CalleeConstAddr
=
967 cast
<ConstantInt
>(C
->getOperand(0))->getZExtValue();
968 Ops
.push_back(MachineOperand::CreateImm(CalleeConstAddr
));
970 llvm_unreachable("Unsupported ConstantExpr.");
971 } else if (const auto *GV
= dyn_cast
<GlobalValue
>(Callee
)) {
972 Ops
.push_back(MachineOperand::CreateGA(GV
, 0));
973 } else if (isa
<ConstantPointerNull
>(Callee
))
974 Ops
.push_back(MachineOperand::CreateImm(0));
976 llvm_unreachable("Unsupported callee address.");
978 // Adjust <numArgs> to account for any arguments that have been passed on
979 // the stack instead.
980 unsigned NumCallRegArgs
= IsAnyRegCC
? NumArgs
: CLI
.OutRegs
.size();
981 Ops
.push_back(MachineOperand::CreateImm(NumCallRegArgs
));
983 // Add the calling convention
984 Ops
.push_back(MachineOperand::CreateImm((unsigned)CC
));
986 // Add the arguments we omitted previously. The register allocator should
987 // place these in any free register.
989 for (unsigned i
= NumMetaOpers
, e
= NumMetaOpers
+ NumArgs
; i
!= e
; ++i
) {
990 unsigned Reg
= getRegForValue(I
->getArgOperand(i
));
993 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/false));
997 // Push the arguments from the call instruction.
998 for (auto Reg
: CLI
.OutRegs
)
999 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/false));
1001 // Push live variables for the stack map.
1002 if (!addStackMapLiveVars(Ops
, I
, NumMetaOpers
+ NumArgs
))
1005 // Push the register mask info.
1006 Ops
.push_back(MachineOperand::CreateRegMask(
1007 TRI
.getCallPreservedMask(*FuncInfo
.MF
, CC
)));
1009 // Add scratch registers as implicit def and early clobber.
1010 const MCPhysReg
*ScratchRegs
= TLI
.getScratchRegisters(CC
);
1011 for (unsigned i
= 0; ScratchRegs
[i
]; ++i
)
1012 Ops
.push_back(MachineOperand::CreateReg(
1013 ScratchRegs
[i
], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
1014 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
1016 // Add implicit defs (return values).
1017 for (auto Reg
: CLI
.InRegs
)
1018 Ops
.push_back(MachineOperand::CreateReg(Reg
, /*isDef=*/true,
1021 // Insert the patchpoint instruction before the call generated by the target.
1022 MachineInstrBuilder MIB
= BuildMI(*FuncInfo
.MBB
, CLI
.Call
, DbgLoc
,
1023 TII
.get(TargetOpcode::PATCHPOINT
));
1025 for (auto &MO
: Ops
)
1028 MIB
->setPhysRegsDeadExcept(CLI
.InRegs
, TRI
);
1030 // Delete the original call instruction.
1031 CLI
.Call
->eraseFromParent();
1033 // Inform the Frame Information that we have a patchpoint in this function.
1034 FuncInfo
.MF
->getFrameInfo().setHasPatchPoint();
1036 if (CLI
.NumResultRegs
)
1037 updateValueMap(I
, CLI
.ResultReg
, CLI
.NumResultRegs
);
1041 bool FastISel::selectXRayCustomEvent(const CallInst
*I
) {
1042 const auto &Triple
= TM
.getTargetTriple();
1043 if (Triple
.getArch() != Triple::x86_64
|| !Triple
.isOSLinux())
1044 return true; // don't do anything to this instruction.
1045 SmallVector
<MachineOperand
, 8> Ops
;
1046 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(0)),
1048 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(1)),
1050 MachineInstrBuilder MIB
=
1051 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1052 TII
.get(TargetOpcode::PATCHABLE_EVENT_CALL
));
1053 for (auto &MO
: Ops
)
1056 // Insert the Patchable Event Call instruction, that gets lowered properly.
1060 bool FastISel::selectXRayTypedEvent(const CallInst
*I
) {
1061 const auto &Triple
= TM
.getTargetTriple();
1062 if (Triple
.getArch() != Triple::x86_64
|| !Triple
.isOSLinux())
1063 return true; // don't do anything to this instruction.
1064 SmallVector
<MachineOperand
, 8> Ops
;
1065 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(0)),
1067 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(1)),
1069 Ops
.push_back(MachineOperand::CreateReg(getRegForValue(I
->getArgOperand(2)),
1071 MachineInstrBuilder MIB
=
1072 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1073 TII
.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL
));
1074 for (auto &MO
: Ops
)
1077 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
1081 /// Returns an AttributeList representing the attributes applied to the return
1082 /// value of the given call.
1083 static AttributeList
getReturnAttrs(FastISel::CallLoweringInfo
&CLI
) {
1084 SmallVector
<Attribute::AttrKind
, 2> Attrs
;
1086 Attrs
.push_back(Attribute::SExt
);
1088 Attrs
.push_back(Attribute::ZExt
);
1090 Attrs
.push_back(Attribute::InReg
);
1092 return AttributeList::get(CLI
.RetTy
->getContext(), AttributeList::ReturnIndex
,
1096 bool FastISel::lowerCallTo(const CallInst
*CI
, const char *SymName
,
1098 MCContext
&Ctx
= MF
->getContext();
1099 SmallString
<32> MangledName
;
1100 Mangler::getNameWithPrefix(MangledName
, SymName
, DL
);
1101 MCSymbol
*Sym
= Ctx
.getOrCreateSymbol(MangledName
);
1102 return lowerCallTo(CI
, Sym
, NumArgs
);
1105 bool FastISel::lowerCallTo(const CallInst
*CI
, MCSymbol
*Symbol
,
1107 ImmutableCallSite
CS(CI
);
1109 FunctionType
*FTy
= CS
.getFunctionType();
1110 Type
*RetTy
= CS
.getType();
1113 Args
.reserve(NumArgs
);
1115 // Populate the argument list.
1116 // Attributes for args start at offset 1, after the return attribute.
1117 for (unsigned ArgI
= 0; ArgI
!= NumArgs
; ++ArgI
) {
1118 Value
*V
= CI
->getOperand(ArgI
);
1120 assert(!V
->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
1124 Entry
.Ty
= V
->getType();
1125 Entry
.setAttributes(&CS
, ArgI
);
1126 Args
.push_back(Entry
);
1128 TLI
.markLibCallAttributes(MF
, CS
.getCallingConv(), Args
);
1130 CallLoweringInfo CLI
;
1131 CLI
.setCallee(RetTy
, FTy
, Symbol
, std::move(Args
), CS
, NumArgs
);
1133 return lowerCallTo(CLI
);
1136 bool FastISel::lowerCallTo(CallLoweringInfo
&CLI
) {
1137 // Handle the incoming return values from the call.
1139 SmallVector
<EVT
, 4> RetTys
;
1140 ComputeValueVTs(TLI
, DL
, CLI
.RetTy
, RetTys
);
1142 SmallVector
<ISD::OutputArg
, 4> Outs
;
1143 GetReturnInfo(CLI
.CallConv
, CLI
.RetTy
, getReturnAttrs(CLI
), Outs
, TLI
, DL
);
1145 bool CanLowerReturn
= TLI
.CanLowerReturn(
1146 CLI
.CallConv
, *FuncInfo
.MF
, CLI
.IsVarArg
, Outs
, CLI
.RetTy
->getContext());
1148 // FIXME: sret demotion isn't supported yet - bail out.
1149 if (!CanLowerReturn
)
1152 for (unsigned I
= 0, E
= RetTys
.size(); I
!= E
; ++I
) {
1154 MVT RegisterVT
= TLI
.getRegisterType(CLI
.RetTy
->getContext(), VT
);
1155 unsigned NumRegs
= TLI
.getNumRegisters(CLI
.RetTy
->getContext(), VT
);
1156 for (unsigned i
= 0; i
!= NumRegs
; ++i
) {
1157 ISD::InputArg MyFlags
;
1158 MyFlags
.VT
= RegisterVT
;
1160 MyFlags
.Used
= CLI
.IsReturnValueUsed
;
1162 MyFlags
.Flags
.setSExt();
1164 MyFlags
.Flags
.setZExt();
1166 MyFlags
.Flags
.setInReg();
1167 CLI
.Ins
.push_back(MyFlags
);
1171 // Handle all of the outgoing arguments.
1173 for (auto &Arg
: CLI
.getArgs()) {
1174 Type
*FinalType
= Arg
.Ty
;
1176 FinalType
= cast
<PointerType
>(Arg
.Ty
)->getElementType();
1177 bool NeedsRegBlock
= TLI
.functionArgumentNeedsConsecutiveRegisters(
1178 FinalType
, CLI
.CallConv
, CLI
.IsVarArg
);
1180 ISD::ArgFlagsTy Flags
;
1189 if (Arg
.IsSwiftSelf
)
1190 Flags
.setSwiftSelf();
1191 if (Arg
.IsSwiftError
)
1192 Flags
.setSwiftError();
1195 if (Arg
.IsInAlloca
) {
1196 Flags
.setInAlloca();
1197 // Set the byval flag for CCAssignFn callbacks that don't know about
1198 // inalloca. This way we can know how many bytes we should've allocated
1199 // and how many bytes a callee cleanup function will pop. If we port
1200 // inalloca to more targets, we'll have to add custom inalloca handling in
1201 // the various CC lowering callbacks.
1204 if (Arg
.IsByVal
|| Arg
.IsInAlloca
) {
1205 PointerType
*Ty
= cast
<PointerType
>(Arg
.Ty
);
1206 Type
*ElementTy
= Ty
->getElementType();
1207 unsigned FrameSize
=
1208 DL
.getTypeAllocSize(Arg
.ByValType
? Arg
.ByValType
: ElementTy
);
1210 // For ByVal, alignment should come from FE. BE will guess if this info
1211 // is not there, but there are cases it cannot get right.
1212 unsigned FrameAlign
= Arg
.Alignment
;
1214 FrameAlign
= TLI
.getByValTypeAlignment(ElementTy
, DL
);
1215 Flags
.setByValSize(FrameSize
);
1216 Flags
.setByValAlign(FrameAlign
);
1221 Flags
.setInConsecutiveRegs();
1222 unsigned OriginalAlignment
= DL
.getABITypeAlignment(Arg
.Ty
);
1223 Flags
.setOrigAlign(OriginalAlignment
);
1225 CLI
.OutVals
.push_back(Arg
.Val
);
1226 CLI
.OutFlags
.push_back(Flags
);
1229 if (!fastLowerCall(CLI
))
1232 // Set all unused physreg defs as dead.
1233 assert(CLI
.Call
&& "No call instruction specified.");
1234 CLI
.Call
->setPhysRegsDeadExcept(CLI
.InRegs
, TRI
);
1236 if (CLI
.NumResultRegs
&& CLI
.CS
)
1237 updateValueMap(CLI
.CS
->getInstruction(), CLI
.ResultReg
, CLI
.NumResultRegs
);
1239 // Set labels for heapallocsite call.
1240 if (CLI
.CS
&& CLI
.CS
->getInstruction()->hasMetadata("heapallocsite")) {
1241 const MDNode
*MD
= CLI
.CS
->getInstruction()->getMetadata("heapallocsite");
1242 MF
->addCodeViewHeapAllocSite(CLI
.Call
, MD
);
1248 bool FastISel::lowerCall(const CallInst
*CI
) {
1249 ImmutableCallSite
CS(CI
);
1251 FunctionType
*FuncTy
= CS
.getFunctionType();
1252 Type
*RetTy
= CS
.getType();
1256 Args
.reserve(CS
.arg_size());
1258 for (ImmutableCallSite::arg_iterator i
= CS
.arg_begin(), e
= CS
.arg_end();
1263 if (V
->getType()->isEmptyTy())
1267 Entry
.Ty
= V
->getType();
1269 // Skip the first return-type Attribute to get to params.
1270 Entry
.setAttributes(&CS
, i
- CS
.arg_begin());
1271 Args
.push_back(Entry
);
1274 // Check if target-independent constraints permit a tail call here.
1275 // Target-dependent constraints are checked within fastLowerCall.
1276 bool IsTailCall
= CI
->isTailCall();
1277 if (IsTailCall
&& !isInTailCallPosition(CS
, TM
))
1280 CallLoweringInfo CLI
;
1281 CLI
.setCallee(RetTy
, FuncTy
, CI
->getCalledValue(), std::move(Args
), CS
)
1282 .setTailCall(IsTailCall
);
1284 return lowerCallTo(CLI
);
1287 bool FastISel::selectCall(const User
*I
) {
1288 const CallInst
*Call
= cast
<CallInst
>(I
);
1290 // Handle simple inline asms.
1291 if (const InlineAsm
*IA
= dyn_cast
<InlineAsm
>(Call
->getCalledValue())) {
1292 // If the inline asm has side effects, then make sure that no local value
1293 // lives across by flushing the local value map.
1294 if (IA
->hasSideEffects())
1295 flushLocalValueMap();
1297 // Don't attempt to handle constraints.
1298 if (!IA
->getConstraintString().empty())
1301 unsigned ExtraInfo
= 0;
1302 if (IA
->hasSideEffects())
1303 ExtraInfo
|= InlineAsm::Extra_HasSideEffects
;
1304 if (IA
->isAlignStack())
1305 ExtraInfo
|= InlineAsm::Extra_IsAlignStack
;
1307 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1308 TII
.get(TargetOpcode::INLINEASM
))
1309 .addExternalSymbol(IA
->getAsmString().c_str())
1314 // Handle intrinsic function calls.
1315 if (const auto *II
= dyn_cast
<IntrinsicInst
>(Call
))
1316 return selectIntrinsicCall(II
);
1318 // Usually, it does not make sense to initialize a value,
1319 // make an unrelated function call and use the value, because
1320 // it tends to be spilled on the stack. So, we move the pointer
1321 // to the last local value to the beginning of the block, so that
1322 // all the values which have already been materialized,
1323 // appear after the call. It also makes sense to skip intrinsics
1324 // since they tend to be inlined.
1325 flushLocalValueMap();
1327 return lowerCall(Call
);
1330 bool FastISel::selectIntrinsicCall(const IntrinsicInst
*II
) {
1331 switch (II
->getIntrinsicID()) {
1334 // At -O0 we don't care about the lifetime intrinsics.
1335 case Intrinsic::lifetime_start
:
1336 case Intrinsic::lifetime_end
:
1337 // The donothing intrinsic does, well, nothing.
1338 case Intrinsic::donothing
:
1339 // Neither does the sideeffect intrinsic.
1340 case Intrinsic::sideeffect
:
1341 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1342 case Intrinsic::assume
:
1344 case Intrinsic::dbg_declare
: {
1345 const DbgDeclareInst
*DI
= cast
<DbgDeclareInst
>(II
);
1346 assert(DI
->getVariable() && "Missing variable");
1347 if (!FuncInfo
.MF
->getMMI().hasDebugInfo()) {
1348 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1352 const Value
*Address
= DI
->getAddress();
1353 if (!Address
|| isa
<UndefValue
>(Address
)) {
1354 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1358 // Byval arguments with frame indices were already handled after argument
1359 // lowering and before isel.
1361 dyn_cast
<Argument
>(Address
->stripInBoundsConstantOffsets());
1362 if (Arg
&& FuncInfo
.getArgumentFrameIndex(Arg
) != INT_MAX
)
1365 Optional
<MachineOperand
> Op
;
1366 if (unsigned Reg
= lookUpRegForValue(Address
))
1367 Op
= MachineOperand::CreateReg(Reg
, false);
1369 // If we have a VLA that has a "use" in a metadata node that's then used
1370 // here but it has no other uses, then we have a problem. E.g.,
1372 // int foo (const int *x) {
1377 // If we assign 'a' a vreg and fast isel later on has to use the selection
1378 // DAG isel, it will want to copy the value to the vreg. However, there are
1379 // no uses, which goes counter to what selection DAG isel expects.
1380 if (!Op
&& !Address
->use_empty() && isa
<Instruction
>(Address
) &&
1381 (!isa
<AllocaInst
>(Address
) ||
1382 !FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(Address
))))
1383 Op
= MachineOperand::CreateReg(FuncInfo
.InitializeRegForValue(Address
),
1387 assert(DI
->getVariable()->isValidLocationForIntrinsic(DbgLoc
) &&
1388 "Expected inlined-at fields to agree");
1389 // A dbg.declare describes the address of a source variable, so lower it
1390 // into an indirect DBG_VALUE.
1391 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1392 TII
.get(TargetOpcode::DBG_VALUE
), /*IsIndirect*/ true,
1393 *Op
, DI
->getVariable(), DI
->getExpression());
1395 // We can't yet handle anything else here because it would require
1396 // generating code, thus altering codegen because of debug info.
1397 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1401 case Intrinsic::dbg_value
: {
1402 // This form of DBG_VALUE is target-independent.
1403 const DbgValueInst
*DI
= cast
<DbgValueInst
>(II
);
1404 const MCInstrDesc
&II
= TII
.get(TargetOpcode::DBG_VALUE
);
1405 const Value
*V
= DI
->getValue();
1406 assert(DI
->getVariable()->isValidLocationForIntrinsic(DbgLoc
) &&
1407 "Expected inlined-at fields to agree");
1409 // Currently the optimizer can produce this; insert an undef to
1410 // help debugging. Probably the optimizer should not do this.
1411 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, false, 0U,
1412 DI
->getVariable(), DI
->getExpression());
1413 } else if (const auto *CI
= dyn_cast
<ConstantInt
>(V
)) {
1414 if (CI
->getBitWidth() > 64)
1415 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1418 .addMetadata(DI
->getVariable())
1419 .addMetadata(DI
->getExpression());
1421 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1422 .addImm(CI
->getZExtValue())
1424 .addMetadata(DI
->getVariable())
1425 .addMetadata(DI
->getExpression());
1426 } else if (const auto *CF
= dyn_cast
<ConstantFP
>(V
)) {
1427 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
1430 .addMetadata(DI
->getVariable())
1431 .addMetadata(DI
->getExpression());
1432 } else if (unsigned Reg
= lookUpRegForValue(V
)) {
1433 // FIXME: This does not handle register-indirect values at offset 0.
1434 bool IsIndirect
= false;
1435 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, IsIndirect
, Reg
,
1436 DI
->getVariable(), DI
->getExpression());
1438 // We can't yet handle anything else here because it would require
1439 // generating code, thus altering codegen because of debug info.
1440 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1444 case Intrinsic::dbg_label
: {
1445 const DbgLabelInst
*DI
= cast
<DbgLabelInst
>(II
);
1446 assert(DI
->getLabel() && "Missing label");
1447 if (!FuncInfo
.MF
->getMMI().hasDebugInfo()) {
1448 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
<< "\n");
1452 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1453 TII
.get(TargetOpcode::DBG_LABEL
)).addMetadata(DI
->getLabel());
1456 case Intrinsic::objectsize
: {
1457 ConstantInt
*CI
= cast
<ConstantInt
>(II
->getArgOperand(1));
1458 unsigned long long Res
= CI
->isZero() ? -1ULL : 0;
1459 Constant
*ResCI
= ConstantInt::get(II
->getType(), Res
);
1460 unsigned ResultReg
= getRegForValue(ResCI
);
1463 updateValueMap(II
, ResultReg
);
1466 case Intrinsic::is_constant
: {
1467 Constant
*ResCI
= ConstantInt::get(II
->getType(), 0);
1468 unsigned ResultReg
= getRegForValue(ResCI
);
1471 updateValueMap(II
, ResultReg
);
1474 case Intrinsic::launder_invariant_group
:
1475 case Intrinsic::strip_invariant_group
:
1476 case Intrinsic::expect
: {
1477 unsigned ResultReg
= getRegForValue(II
->getArgOperand(0));
1480 updateValueMap(II
, ResultReg
);
1483 case Intrinsic::experimental_stackmap
:
1484 return selectStackmap(II
);
1485 case Intrinsic::experimental_patchpoint_void
:
1486 case Intrinsic::experimental_patchpoint_i64
:
1487 return selectPatchpoint(II
);
1489 case Intrinsic::xray_customevent
:
1490 return selectXRayCustomEvent(II
);
1491 case Intrinsic::xray_typedevent
:
1492 return selectXRayTypedEvent(II
);
1495 return fastLowerIntrinsicCall(II
);
1498 bool FastISel::selectCast(const User
*I
, unsigned Opcode
) {
1499 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1500 EVT DstVT
= TLI
.getValueType(DL
, I
->getType());
1502 if (SrcVT
== MVT::Other
|| !SrcVT
.isSimple() || DstVT
== MVT::Other
||
1504 // Unhandled type. Halt "fast" selection and bail.
1507 // Check if the destination type is legal.
1508 if (!TLI
.isTypeLegal(DstVT
))
1511 // Check if the source operand is legal.
1512 if (!TLI
.isTypeLegal(SrcVT
))
1515 unsigned InputReg
= getRegForValue(I
->getOperand(0));
1517 // Unhandled operand. Halt "fast" selection and bail.
1520 bool InputRegIsKill
= hasTrivialKill(I
->getOperand(0));
1522 unsigned ResultReg
= fastEmit_r(SrcVT
.getSimpleVT(), DstVT
.getSimpleVT(),
1523 Opcode
, InputReg
, InputRegIsKill
);
1527 updateValueMap(I
, ResultReg
);
1531 bool FastISel::selectBitCast(const User
*I
) {
1532 // If the bitcast doesn't change the type, just use the operand value.
1533 if (I
->getType() == I
->getOperand(0)->getType()) {
1534 unsigned Reg
= getRegForValue(I
->getOperand(0));
1537 updateValueMap(I
, Reg
);
1541 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1542 EVT SrcEVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1543 EVT DstEVT
= TLI
.getValueType(DL
, I
->getType());
1544 if (SrcEVT
== MVT::Other
|| DstEVT
== MVT::Other
||
1545 !TLI
.isTypeLegal(SrcEVT
) || !TLI
.isTypeLegal(DstEVT
))
1546 // Unhandled type. Halt "fast" selection and bail.
1549 MVT SrcVT
= SrcEVT
.getSimpleVT();
1550 MVT DstVT
= DstEVT
.getSimpleVT();
1551 unsigned Op0
= getRegForValue(I
->getOperand(0));
1552 if (!Op0
) // Unhandled operand. Halt "fast" selection and bail.
1554 bool Op0IsKill
= hasTrivialKill(I
->getOperand(0));
1556 // First, try to perform the bitcast by inserting a reg-reg copy.
1557 unsigned ResultReg
= 0;
1558 if (SrcVT
== DstVT
) {
1559 const TargetRegisterClass
*SrcClass
= TLI
.getRegClassFor(SrcVT
);
1560 const TargetRegisterClass
*DstClass
= TLI
.getRegClassFor(DstVT
);
1561 // Don't attempt a cross-class copy. It will likely fail.
1562 if (SrcClass
== DstClass
) {
1563 ResultReg
= createResultReg(DstClass
);
1564 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
1565 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(Op0
);
1569 // If the reg-reg copy failed, select a BITCAST opcode.
1571 ResultReg
= fastEmit_r(SrcVT
, DstVT
, ISD::BITCAST
, Op0
, Op0IsKill
);
1576 updateValueMap(I
, ResultReg
);
1580 // Remove local value instructions starting from the instruction after
1581 // SavedLastLocalValue to the current function insert point.
1582 void FastISel::removeDeadLocalValueCode(MachineInstr
*SavedLastLocalValue
)
1584 MachineInstr
*CurLastLocalValue
= getLastLocalValue();
1585 if (CurLastLocalValue
!= SavedLastLocalValue
) {
1586 // Find the first local value instruction to be deleted.
1587 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1588 // Otherwise it's the first instruction in the block.
1589 MachineBasicBlock::iterator
FirstDeadInst(SavedLastLocalValue
);
1590 if (SavedLastLocalValue
)
1593 FirstDeadInst
= FuncInfo
.MBB
->getFirstNonPHI();
1594 setLastLocalValue(SavedLastLocalValue
);
1595 removeDeadCode(FirstDeadInst
, FuncInfo
.InsertPt
);
1599 bool FastISel::selectInstruction(const Instruction
*I
) {
1600 MachineInstr
*SavedLastLocalValue
= getLastLocalValue();
1601 // Just before the terminator instruction, insert instructions to
1602 // feed PHI nodes in successor blocks.
1603 if (I
->isTerminator()) {
1604 if (!handlePHINodesInSuccessorBlocks(I
->getParent())) {
1605 // PHI node handling may have generated local value instructions,
1606 // even though it failed to handle all PHI nodes.
1607 // We remove these instructions because SelectionDAGISel will generate
1609 removeDeadLocalValueCode(SavedLastLocalValue
);
1614 // FastISel does not handle any operand bundles except OB_funclet.
1615 if (ImmutableCallSite CS
= ImmutableCallSite(I
))
1616 for (unsigned i
= 0, e
= CS
.getNumOperandBundles(); i
!= e
; ++i
)
1617 if (CS
.getOperandBundleAt(i
).getTagID() != LLVMContext::OB_funclet
)
1620 DbgLoc
= I
->getDebugLoc();
1622 SavedInsertPt
= FuncInfo
.InsertPt
;
1624 if (const auto *Call
= dyn_cast
<CallInst
>(I
)) {
1625 const Function
*F
= Call
->getCalledFunction();
1628 // As a special case, don't handle calls to builtin library functions that
1629 // may be translated directly to target instructions.
1630 if (F
&& !F
->hasLocalLinkage() && F
->hasName() &&
1631 LibInfo
->getLibFunc(F
->getName(), Func
) &&
1632 LibInfo
->hasOptimizedCodeGen(Func
))
1635 // Don't handle Intrinsic::trap if a trap function is specified.
1636 if (F
&& F
->getIntrinsicID() == Intrinsic::trap
&&
1637 Call
->hasFnAttr("trap-func-name"))
1641 // First, try doing target-independent selection.
1642 if (!SkipTargetIndependentISel
) {
1643 if (selectOperator(I
, I
->getOpcode())) {
1644 ++NumFastIselSuccessIndependent
;
1645 DbgLoc
= DebugLoc();
1648 // Remove dead code.
1649 recomputeInsertPt();
1650 if (SavedInsertPt
!= FuncInfo
.InsertPt
)
1651 removeDeadCode(FuncInfo
.InsertPt
, SavedInsertPt
);
1652 SavedInsertPt
= FuncInfo
.InsertPt
;
1654 // Next, try calling the target to attempt to handle the instruction.
1655 if (fastSelectInstruction(I
)) {
1656 ++NumFastIselSuccessTarget
;
1657 DbgLoc
= DebugLoc();
1660 // Remove dead code.
1661 recomputeInsertPt();
1662 if (SavedInsertPt
!= FuncInfo
.InsertPt
)
1663 removeDeadCode(FuncInfo
.InsertPt
, SavedInsertPt
);
1665 DbgLoc
= DebugLoc();
1666 // Undo phi node updates, because they will be added again by SelectionDAG.
1667 if (I
->isTerminator()) {
1668 // PHI node handling may have generated local value instructions.
1669 // We remove them because SelectionDAGISel will generate them again.
1670 removeDeadLocalValueCode(SavedLastLocalValue
);
1671 FuncInfo
.PHINodesToUpdate
.resize(FuncInfo
.OrigNumPHINodesToUpdate
);
1676 /// Emit an unconditional branch to the given block, unless it is the immediate
1677 /// (fall-through) successor, and update the CFG.
1678 void FastISel::fastEmitBranch(MachineBasicBlock
*MSucc
,
1679 const DebugLoc
&DbgLoc
) {
1680 if (FuncInfo
.MBB
->getBasicBlock()->sizeWithoutDebug() > 1 &&
1681 FuncInfo
.MBB
->isLayoutSuccessor(MSucc
)) {
1682 // For more accurate line information if this is the only non-debug
1683 // instruction in the block then emit it, otherwise we have the
1684 // unconditional fall-through case, which needs no instructions.
1686 // The unconditional branch case.
1687 TII
.insertBranch(*FuncInfo
.MBB
, MSucc
, nullptr,
1688 SmallVector
<MachineOperand
, 0>(), DbgLoc
);
1691 auto BranchProbability
= FuncInfo
.BPI
->getEdgeProbability(
1692 FuncInfo
.MBB
->getBasicBlock(), MSucc
->getBasicBlock());
1693 FuncInfo
.MBB
->addSuccessor(MSucc
, BranchProbability
);
1695 FuncInfo
.MBB
->addSuccessorWithoutProb(MSucc
);
1698 void FastISel::finishCondBranch(const BasicBlock
*BranchBB
,
1699 MachineBasicBlock
*TrueMBB
,
1700 MachineBasicBlock
*FalseMBB
) {
1701 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1702 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1703 // successor/predecessor lists.
1704 if (TrueMBB
!= FalseMBB
) {
1706 auto BranchProbability
=
1707 FuncInfo
.BPI
->getEdgeProbability(BranchBB
, TrueMBB
->getBasicBlock());
1708 FuncInfo
.MBB
->addSuccessor(TrueMBB
, BranchProbability
);
1710 FuncInfo
.MBB
->addSuccessorWithoutProb(TrueMBB
);
1713 fastEmitBranch(FalseMBB
, DbgLoc
);
1716 /// Emit an FNeg operation.
1717 bool FastISel::selectFNeg(const User
*I
, const Value
*In
) {
1718 unsigned OpReg
= getRegForValue(In
);
1721 bool OpRegIsKill
= hasTrivialKill(In
);
1723 // If the target has ISD::FNEG, use it.
1724 EVT VT
= TLI
.getValueType(DL
, I
->getType());
1725 unsigned ResultReg
= fastEmit_r(VT
.getSimpleVT(), VT
.getSimpleVT(), ISD::FNEG
,
1726 OpReg
, OpRegIsKill
);
1728 updateValueMap(I
, ResultReg
);
1732 // Bitcast the value to integer, twiddle the sign bit with xor,
1733 // and then bitcast it back to floating-point.
1734 if (VT
.getSizeInBits() > 64)
1736 EVT IntVT
= EVT::getIntegerVT(I
->getContext(), VT
.getSizeInBits());
1737 if (!TLI
.isTypeLegal(IntVT
))
1740 unsigned IntReg
= fastEmit_r(VT
.getSimpleVT(), IntVT
.getSimpleVT(),
1741 ISD::BITCAST
, OpReg
, OpRegIsKill
);
1745 unsigned IntResultReg
= fastEmit_ri_(
1746 IntVT
.getSimpleVT(), ISD::XOR
, IntReg
, /*IsKill=*/true,
1747 UINT64_C(1) << (VT
.getSizeInBits() - 1), IntVT
.getSimpleVT());
1751 ResultReg
= fastEmit_r(IntVT
.getSimpleVT(), VT
.getSimpleVT(), ISD::BITCAST
,
1752 IntResultReg
, /*IsKill=*/true);
1756 updateValueMap(I
, ResultReg
);
1760 bool FastISel::selectExtractValue(const User
*U
) {
1761 const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(U
);
1765 // Make sure we only try to handle extracts with a legal result. But also
1766 // allow i1 because it's easy.
1767 EVT RealVT
= TLI
.getValueType(DL
, EVI
->getType(), /*AllowUnknown=*/true);
1768 if (!RealVT
.isSimple())
1770 MVT VT
= RealVT
.getSimpleVT();
1771 if (!TLI
.isTypeLegal(VT
) && VT
!= MVT::i1
)
1774 const Value
*Op0
= EVI
->getOperand(0);
1775 Type
*AggTy
= Op0
->getType();
1777 // Get the base result register.
1779 DenseMap
<const Value
*, unsigned>::iterator I
= FuncInfo
.ValueMap
.find(Op0
);
1780 if (I
!= FuncInfo
.ValueMap
.end())
1781 ResultReg
= I
->second
;
1782 else if (isa
<Instruction
>(Op0
))
1783 ResultReg
= FuncInfo
.InitializeRegForValue(Op0
);
1785 return false; // fast-isel can't handle aggregate constants at the moment
1787 // Get the actual result register, which is an offset from the base register.
1788 unsigned VTIndex
= ComputeLinearIndex(AggTy
, EVI
->getIndices());
1790 SmallVector
<EVT
, 4> AggValueVTs
;
1791 ComputeValueVTs(TLI
, DL
, AggTy
, AggValueVTs
);
1793 for (unsigned i
= 0; i
< VTIndex
; i
++)
1794 ResultReg
+= TLI
.getNumRegisters(FuncInfo
.Fn
->getContext(), AggValueVTs
[i
]);
1796 updateValueMap(EVI
, ResultReg
);
1800 bool FastISel::selectOperator(const User
*I
, unsigned Opcode
) {
1802 case Instruction::Add
:
1803 return selectBinaryOp(I
, ISD::ADD
);
1804 case Instruction::FAdd
:
1805 return selectBinaryOp(I
, ISD::FADD
);
1806 case Instruction::Sub
:
1807 return selectBinaryOp(I
, ISD::SUB
);
1808 case Instruction::FSub
: {
1809 // FNeg is currently represented in LLVM IR as a special case of FSub.
1811 if (match(I
, m_FNeg(m_Value(X
))))
1812 return selectFNeg(I
, X
);
1813 return selectBinaryOp(I
, ISD::FSUB
);
1815 case Instruction::Mul
:
1816 return selectBinaryOp(I
, ISD::MUL
);
1817 case Instruction::FMul
:
1818 return selectBinaryOp(I
, ISD::FMUL
);
1819 case Instruction::SDiv
:
1820 return selectBinaryOp(I
, ISD::SDIV
);
1821 case Instruction::UDiv
:
1822 return selectBinaryOp(I
, ISD::UDIV
);
1823 case Instruction::FDiv
:
1824 return selectBinaryOp(I
, ISD::FDIV
);
1825 case Instruction::SRem
:
1826 return selectBinaryOp(I
, ISD::SREM
);
1827 case Instruction::URem
:
1828 return selectBinaryOp(I
, ISD::UREM
);
1829 case Instruction::FRem
:
1830 return selectBinaryOp(I
, ISD::FREM
);
1831 case Instruction::Shl
:
1832 return selectBinaryOp(I
, ISD::SHL
);
1833 case Instruction::LShr
:
1834 return selectBinaryOp(I
, ISD::SRL
);
1835 case Instruction::AShr
:
1836 return selectBinaryOp(I
, ISD::SRA
);
1837 case Instruction::And
:
1838 return selectBinaryOp(I
, ISD::AND
);
1839 case Instruction::Or
:
1840 return selectBinaryOp(I
, ISD::OR
);
1841 case Instruction::Xor
:
1842 return selectBinaryOp(I
, ISD::XOR
);
1844 case Instruction::FNeg
:
1845 return selectFNeg(I
, I
->getOperand(0));
1847 case Instruction::GetElementPtr
:
1848 return selectGetElementPtr(I
);
1850 case Instruction::Br
: {
1851 const BranchInst
*BI
= cast
<BranchInst
>(I
);
1853 if (BI
->isUnconditional()) {
1854 const BasicBlock
*LLVMSucc
= BI
->getSuccessor(0);
1855 MachineBasicBlock
*MSucc
= FuncInfo
.MBBMap
[LLVMSucc
];
1856 fastEmitBranch(MSucc
, BI
->getDebugLoc());
1860 // Conditional branches are not handed yet.
1861 // Halt "fast" selection and bail.
1865 case Instruction::Unreachable
:
1866 if (TM
.Options
.TrapUnreachable
)
1867 return fastEmit_(MVT::Other
, MVT::Other
, ISD::TRAP
) != 0;
1871 case Instruction::Alloca
:
1872 // FunctionLowering has the static-sized case covered.
1873 if (FuncInfo
.StaticAllocaMap
.count(cast
<AllocaInst
>(I
)))
1876 // Dynamic-sized alloca is not handled yet.
1879 case Instruction::Call
:
1880 // On AIX, call lowering uses the DAG-ISEL path currently so that the
1881 // callee of the direct function call instruction will be mapped to the
1882 // symbol for the function's entry point, which is distinct from the
1883 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1884 // name is the C-linkage name of the source level function.
1885 if (TM
.getTargetTriple().isOSAIX())
1887 return selectCall(I
);
1889 case Instruction::BitCast
:
1890 return selectBitCast(I
);
1892 case Instruction::FPToSI
:
1893 return selectCast(I
, ISD::FP_TO_SINT
);
1894 case Instruction::ZExt
:
1895 return selectCast(I
, ISD::ZERO_EXTEND
);
1896 case Instruction::SExt
:
1897 return selectCast(I
, ISD::SIGN_EXTEND
);
1898 case Instruction::Trunc
:
1899 return selectCast(I
, ISD::TRUNCATE
);
1900 case Instruction::SIToFP
:
1901 return selectCast(I
, ISD::SINT_TO_FP
);
1903 case Instruction::IntToPtr
: // Deliberate fall-through.
1904 case Instruction::PtrToInt
: {
1905 EVT SrcVT
= TLI
.getValueType(DL
, I
->getOperand(0)->getType());
1906 EVT DstVT
= TLI
.getValueType(DL
, I
->getType());
1907 if (DstVT
.bitsGT(SrcVT
))
1908 return selectCast(I
, ISD::ZERO_EXTEND
);
1909 if (DstVT
.bitsLT(SrcVT
))
1910 return selectCast(I
, ISD::TRUNCATE
);
1911 unsigned Reg
= getRegForValue(I
->getOperand(0));
1914 updateValueMap(I
, Reg
);
1918 case Instruction::ExtractValue
:
1919 return selectExtractValue(I
);
1921 case Instruction::PHI
:
1922 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1925 // Unhandled instruction. Halt "fast" selection and bail.
1930 FastISel::FastISel(FunctionLoweringInfo
&FuncInfo
,
1931 const TargetLibraryInfo
*LibInfo
,
1932 bool SkipTargetIndependentISel
)
1933 : FuncInfo(FuncInfo
), MF(FuncInfo
.MF
), MRI(FuncInfo
.MF
->getRegInfo()),
1934 MFI(FuncInfo
.MF
->getFrameInfo()), MCP(*FuncInfo
.MF
->getConstantPool()),
1935 TM(FuncInfo
.MF
->getTarget()), DL(MF
->getDataLayout()),
1936 TII(*MF
->getSubtarget().getInstrInfo()),
1937 TLI(*MF
->getSubtarget().getTargetLowering()),
1938 TRI(*MF
->getSubtarget().getRegisterInfo()), LibInfo(LibInfo
),
1939 SkipTargetIndependentISel(SkipTargetIndependentISel
) {}
1941 FastISel::~FastISel() = default;
1943 bool FastISel::fastLowerArguments() { return false; }
1945 bool FastISel::fastLowerCall(CallLoweringInfo
& /*CLI*/) { return false; }
1947 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst
* /*II*/) {
1951 unsigned FastISel::fastEmit_(MVT
, MVT
, unsigned) { return 0; }
1953 unsigned FastISel::fastEmit_r(MVT
, MVT
, unsigned, unsigned /*Op0*/,
1954 bool /*Op0IsKill*/) {
1958 unsigned FastISel::fastEmit_rr(MVT
, MVT
, unsigned, unsigned /*Op0*/,
1959 bool /*Op0IsKill*/, unsigned /*Op1*/,
1960 bool /*Op1IsKill*/) {
1964 unsigned FastISel::fastEmit_i(MVT
, MVT
, unsigned, uint64_t /*Imm*/) {
1968 unsigned FastISel::fastEmit_f(MVT
, MVT
, unsigned,
1969 const ConstantFP
* /*FPImm*/) {
1973 unsigned FastISel::fastEmit_ri(MVT
, MVT
, unsigned, unsigned /*Op0*/,
1974 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
1978 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1979 /// instruction with an immediate operand using fastEmit_ri.
1980 /// If that fails, it materializes the immediate into a register and try
1981 /// fastEmit_rr instead.
1982 unsigned FastISel::fastEmit_ri_(MVT VT
, unsigned Opcode
, unsigned Op0
,
1983 bool Op0IsKill
, uint64_t Imm
, MVT ImmType
) {
1984 // If this is a multiply by a power of two, emit this as a shift left.
1985 if (Opcode
== ISD::MUL
&& isPowerOf2_64(Imm
)) {
1988 } else if (Opcode
== ISD::UDIV
&& isPowerOf2_64(Imm
)) {
1989 // div x, 8 -> srl x, 3
1994 // Horrible hack (to be removed), check to make sure shift amounts are
1996 if ((Opcode
== ISD::SHL
|| Opcode
== ISD::SRA
|| Opcode
== ISD::SRL
) &&
1997 Imm
>= VT
.getSizeInBits())
2000 // First check if immediate type is legal. If not, we can't use the ri form.
2001 unsigned ResultReg
= fastEmit_ri(VT
, VT
, Opcode
, Op0
, Op0IsKill
, Imm
);
2004 unsigned MaterialReg
= fastEmit_i(ImmType
, ImmType
, ISD::Constant
, Imm
);
2005 bool IsImmKill
= true;
2007 // This is a bit ugly/slow, but failing here means falling out of
2008 // fast-isel, which would be very slow.
2010 IntegerType::get(FuncInfo
.Fn
->getContext(), VT
.getSizeInBits());
2011 MaterialReg
= getRegForValue(ConstantInt::get(ITy
, Imm
));
2014 // FIXME: If the materialized register here has no uses yet then this
2015 // will be the first use and we should be able to mark it as killed.
2016 // However, the local value area for materialising constant expressions
2017 // grows down, not up, which means that any constant expressions we generate
2018 // later which also use 'Imm' could be after this instruction and therefore
2022 return fastEmit_rr(VT
, VT
, Opcode
, Op0
, Op0IsKill
, MaterialReg
, IsImmKill
);
2025 unsigned FastISel::createResultReg(const TargetRegisterClass
*RC
) {
2026 return MRI
.createVirtualRegister(RC
);
2029 unsigned FastISel::constrainOperandRegClass(const MCInstrDesc
&II
, unsigned Op
,
2031 if (Register::isVirtualRegister(Op
)) {
2032 const TargetRegisterClass
*RegClass
=
2033 TII
.getRegClass(II
, OpNum
, &TRI
, *FuncInfo
.MF
);
2034 if (!MRI
.constrainRegClass(Op
, RegClass
)) {
2035 // If it's not legal to COPY between the register classes, something
2036 // has gone very wrong before we got here.
2037 unsigned NewOp
= createResultReg(RegClass
);
2038 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2039 TII
.get(TargetOpcode::COPY
), NewOp
).addReg(Op
);
2046 unsigned FastISel::fastEmitInst_(unsigned MachineInstOpcode
,
2047 const TargetRegisterClass
*RC
) {
2048 unsigned ResultReg
= createResultReg(RC
);
2049 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2051 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
);
2055 unsigned FastISel::fastEmitInst_r(unsigned MachineInstOpcode
,
2056 const TargetRegisterClass
*RC
, unsigned Op0
,
2058 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2060 unsigned ResultReg
= createResultReg(RC
);
2061 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2063 if (II
.getNumDefs() >= 1)
2064 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2065 .addReg(Op0
, getKillRegState(Op0IsKill
));
2067 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2068 .addReg(Op0
, getKillRegState(Op0IsKill
));
2069 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2070 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2076 unsigned FastISel::fastEmitInst_rr(unsigned MachineInstOpcode
,
2077 const TargetRegisterClass
*RC
, unsigned Op0
,
2078 bool Op0IsKill
, unsigned Op1
,
2080 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2082 unsigned ResultReg
= createResultReg(RC
);
2083 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2084 Op1
= constrainOperandRegClass(II
, Op1
, II
.getNumDefs() + 1);
2086 if (II
.getNumDefs() >= 1)
2087 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2088 .addReg(Op0
, getKillRegState(Op0IsKill
))
2089 .addReg(Op1
, getKillRegState(Op1IsKill
));
2091 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2092 .addReg(Op0
, getKillRegState(Op0IsKill
))
2093 .addReg(Op1
, getKillRegState(Op1IsKill
));
2094 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2095 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2100 unsigned FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode
,
2101 const TargetRegisterClass
*RC
, unsigned Op0
,
2102 bool Op0IsKill
, unsigned Op1
,
2103 bool Op1IsKill
, unsigned Op2
,
2105 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2107 unsigned ResultReg
= createResultReg(RC
);
2108 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2109 Op1
= constrainOperandRegClass(II
, Op1
, II
.getNumDefs() + 1);
2110 Op2
= constrainOperandRegClass(II
, Op2
, II
.getNumDefs() + 2);
2112 if (II
.getNumDefs() >= 1)
2113 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2114 .addReg(Op0
, getKillRegState(Op0IsKill
))
2115 .addReg(Op1
, getKillRegState(Op1IsKill
))
2116 .addReg(Op2
, getKillRegState(Op2IsKill
));
2118 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2119 .addReg(Op0
, getKillRegState(Op0IsKill
))
2120 .addReg(Op1
, getKillRegState(Op1IsKill
))
2121 .addReg(Op2
, getKillRegState(Op2IsKill
));
2122 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2123 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2128 unsigned FastISel::fastEmitInst_ri(unsigned MachineInstOpcode
,
2129 const TargetRegisterClass
*RC
, unsigned Op0
,
2130 bool Op0IsKill
, uint64_t Imm
) {
2131 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2133 unsigned ResultReg
= createResultReg(RC
);
2134 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2136 if (II
.getNumDefs() >= 1)
2137 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2138 .addReg(Op0
, getKillRegState(Op0IsKill
))
2141 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2142 .addReg(Op0
, getKillRegState(Op0IsKill
))
2144 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2145 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2150 unsigned FastISel::fastEmitInst_rii(unsigned MachineInstOpcode
,
2151 const TargetRegisterClass
*RC
, unsigned Op0
,
2152 bool Op0IsKill
, uint64_t Imm1
,
2154 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2156 unsigned ResultReg
= createResultReg(RC
);
2157 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2159 if (II
.getNumDefs() >= 1)
2160 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2161 .addReg(Op0
, getKillRegState(Op0IsKill
))
2165 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2166 .addReg(Op0
, getKillRegState(Op0IsKill
))
2169 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2170 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2175 unsigned FastISel::fastEmitInst_f(unsigned MachineInstOpcode
,
2176 const TargetRegisterClass
*RC
,
2177 const ConstantFP
*FPImm
) {
2178 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2180 unsigned ResultReg
= createResultReg(RC
);
2182 if (II
.getNumDefs() >= 1)
2183 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2186 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2188 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2189 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2194 unsigned FastISel::fastEmitInst_rri(unsigned MachineInstOpcode
,
2195 const TargetRegisterClass
*RC
, unsigned Op0
,
2196 bool Op0IsKill
, unsigned Op1
,
2197 bool Op1IsKill
, uint64_t Imm
) {
2198 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2200 unsigned ResultReg
= createResultReg(RC
);
2201 Op0
= constrainOperandRegClass(II
, Op0
, II
.getNumDefs());
2202 Op1
= constrainOperandRegClass(II
, Op1
, II
.getNumDefs() + 1);
2204 if (II
.getNumDefs() >= 1)
2205 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2206 .addReg(Op0
, getKillRegState(Op0IsKill
))
2207 .addReg(Op1
, getKillRegState(Op1IsKill
))
2210 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
)
2211 .addReg(Op0
, getKillRegState(Op0IsKill
))
2212 .addReg(Op1
, getKillRegState(Op1IsKill
))
2214 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2215 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2220 unsigned FastISel::fastEmitInst_i(unsigned MachineInstOpcode
,
2221 const TargetRegisterClass
*RC
, uint64_t Imm
) {
2222 unsigned ResultReg
= createResultReg(RC
);
2223 const MCInstrDesc
&II
= TII
.get(MachineInstOpcode
);
2225 if (II
.getNumDefs() >= 1)
2226 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
, ResultReg
)
2229 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, II
).addImm(Imm
);
2230 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
,
2231 TII
.get(TargetOpcode::COPY
), ResultReg
).addReg(II
.ImplicitDefs
[0]);
2236 unsigned FastISel::fastEmitInst_extractsubreg(MVT RetVT
, unsigned Op0
,
2237 bool Op0IsKill
, uint32_t Idx
) {
2238 unsigned ResultReg
= createResultReg(TLI
.getRegClassFor(RetVT
));
2239 assert(Register::isVirtualRegister(Op0
) &&
2240 "Cannot yet extract from physregs");
2241 const TargetRegisterClass
*RC
= MRI
.getRegClass(Op0
);
2242 MRI
.constrainRegClass(Op0
, TRI
.getSubClassWithSubReg(RC
, Idx
));
2243 BuildMI(*FuncInfo
.MBB
, FuncInfo
.InsertPt
, DbgLoc
, TII
.get(TargetOpcode::COPY
),
2244 ResultReg
).addReg(Op0
, getKillRegState(Op0IsKill
), Idx
);
2248 /// Emit MachineInstrs to compute the value of Op with all but the least
2249 /// significant bit set to zero.
2250 unsigned FastISel::fastEmitZExtFromI1(MVT VT
, unsigned Op0
, bool Op0IsKill
) {
2251 return fastEmit_ri(VT
, VT
, ISD::AND
, Op0
, Op0IsKill
, 1);
2254 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2255 /// Emit code to ensure constants are copied into registers when needed.
2256 /// Remember the virtual registers that need to be added to the Machine PHI
2257 /// nodes as input. We cannot just directly add them, because expansion
2258 /// might result in multiple MBB's for one BB. As such, the start of the
2259 /// BB might correspond to a different MBB than the end.
2260 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
) {
2261 const Instruction
*TI
= LLVMBB
->getTerminator();
2263 SmallPtrSet
<MachineBasicBlock
*, 4> SuccsHandled
;
2264 FuncInfo
.OrigNumPHINodesToUpdate
= FuncInfo
.PHINodesToUpdate
.size();
2266 // Check successor nodes' PHI nodes that expect a constant to be available
2268 for (unsigned succ
= 0, e
= TI
->getNumSuccessors(); succ
!= e
; ++succ
) {
2269 const BasicBlock
*SuccBB
= TI
->getSuccessor(succ
);
2270 if (!isa
<PHINode
>(SuccBB
->begin()))
2272 MachineBasicBlock
*SuccMBB
= FuncInfo
.MBBMap
[SuccBB
];
2274 // If this terminator has multiple identical successors (common for
2275 // switches), only handle each succ once.
2276 if (!SuccsHandled
.insert(SuccMBB
).second
)
2279 MachineBasicBlock::iterator MBBI
= SuccMBB
->begin();
2281 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2282 // nodes and Machine PHI nodes, but the incoming operands have not been
2284 for (const PHINode
&PN
: SuccBB
->phis()) {
2285 // Ignore dead phi's.
2289 // Only handle legal types. Two interesting things to note here. First,
2290 // by bailing out early, we may leave behind some dead instructions,
2291 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2292 // own moves. Second, this check is necessary because FastISel doesn't
2293 // use CreateRegs to create registers, so it always creates
2294 // exactly one register for each non-void instruction.
2295 EVT VT
= TLI
.getValueType(DL
, PN
.getType(), /*AllowUnknown=*/true);
2296 if (VT
== MVT::Other
|| !TLI
.isTypeLegal(VT
)) {
2297 // Handle integer promotions, though, because they're common and easy.
2298 if (!(VT
== MVT::i1
|| VT
== MVT::i8
|| VT
== MVT::i16
)) {
2299 FuncInfo
.PHINodesToUpdate
.resize(FuncInfo
.OrigNumPHINodesToUpdate
);
2304 const Value
*PHIOp
= PN
.getIncomingValueForBlock(LLVMBB
);
2306 // Set the DebugLoc for the copy. Prefer the location of the operand
2307 // if there is one; use the location of the PHI otherwise.
2308 DbgLoc
= PN
.getDebugLoc();
2309 if (const auto *Inst
= dyn_cast
<Instruction
>(PHIOp
))
2310 DbgLoc
= Inst
->getDebugLoc();
2312 unsigned Reg
= getRegForValue(PHIOp
);
2314 FuncInfo
.PHINodesToUpdate
.resize(FuncInfo
.OrigNumPHINodesToUpdate
);
2317 FuncInfo
.PHINodesToUpdate
.push_back(std::make_pair(&*MBBI
++, Reg
));
2318 DbgLoc
= DebugLoc();
2325 bool FastISel::tryToFoldLoad(const LoadInst
*LI
, const Instruction
*FoldInst
) {
2326 assert(LI
->hasOneUse() &&
2327 "tryToFoldLoad expected a LoadInst with a single use");
2328 // We know that the load has a single use, but don't know what it is. If it
2329 // isn't one of the folded instructions, then we can't succeed here. Handle
2330 // this by scanning the single-use users of the load until we get to FoldInst.
2331 unsigned MaxUsers
= 6; // Don't scan down huge single-use chains of instrs.
2333 const Instruction
*TheUser
= LI
->user_back();
2334 while (TheUser
!= FoldInst
&& // Scan up until we find FoldInst.
2335 // Stay in the right block.
2336 TheUser
->getParent() == FoldInst
->getParent() &&
2337 --MaxUsers
) { // Don't scan too far.
2338 // If there are multiple or no uses of this instruction, then bail out.
2339 if (!TheUser
->hasOneUse())
2342 TheUser
= TheUser
->user_back();
2345 // If we didn't find the fold instruction, then we failed to collapse the
2347 if (TheUser
!= FoldInst
)
2350 // Don't try to fold volatile loads. Target has to deal with alignment
2352 if (LI
->isVolatile())
2355 // Figure out which vreg this is going into. If there is no assigned vreg yet
2356 // then there actually was no reference to it. Perhaps the load is referenced
2357 // by a dead instruction.
2358 unsigned LoadReg
= getRegForValue(LI
);
2362 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2363 // may mean that the instruction got lowered to multiple MIs, or the use of
2364 // the loaded value ended up being multiple operands of the result.
2365 if (!MRI
.hasOneUse(LoadReg
))
2368 MachineRegisterInfo::reg_iterator RI
= MRI
.reg_begin(LoadReg
);
2369 MachineInstr
*User
= RI
->getParent();
2371 // Set the insertion point properly. Folding the load can cause generation of
2372 // other random instructions (like sign extends) for addressing modes; make
2373 // sure they get inserted in a logical place before the new instruction.
2374 FuncInfo
.InsertPt
= User
;
2375 FuncInfo
.MBB
= User
->getParent();
2377 // Ask the target to try folding the load.
2378 return tryToFoldLoadIntoMI(User
, RI
.getOperandNo(), LI
);
2381 bool FastISel::canFoldAddIntoGEP(const User
*GEP
, const Value
*Add
) {
2383 if (!isa
<AddOperator
>(Add
))
2385 // Type size needs to match.
2386 if (DL
.getTypeSizeInBits(GEP
->getType()) !=
2387 DL
.getTypeSizeInBits(Add
->getType()))
2389 // Must be in the same basic block.
2390 if (isa
<Instruction
>(Add
) &&
2391 FuncInfo
.MBBMap
[cast
<Instruction
>(Add
)->getParent()] != FuncInfo
.MBB
)
2393 // Must have a constant operand.
2394 return isa
<ConstantInt
>(cast
<AddOperator
>(Add
)->getOperand(1));
2398 FastISel::createMachineMemOperandFor(const Instruction
*I
) const {
2402 MachineMemOperand::Flags Flags
;
2405 if (const auto *LI
= dyn_cast
<LoadInst
>(I
)) {
2406 Alignment
= LI
->getAlignment();
2407 IsVolatile
= LI
->isVolatile();
2408 Flags
= MachineMemOperand::MOLoad
;
2409 Ptr
= LI
->getPointerOperand();
2410 ValTy
= LI
->getType();
2411 } else if (const auto *SI
= dyn_cast
<StoreInst
>(I
)) {
2412 Alignment
= SI
->getAlignment();
2413 IsVolatile
= SI
->isVolatile();
2414 Flags
= MachineMemOperand::MOStore
;
2415 Ptr
= SI
->getPointerOperand();
2416 ValTy
= SI
->getValueOperand()->getType();
2420 bool IsNonTemporal
= I
->hasMetadata(LLVMContext::MD_nontemporal
);
2421 bool IsInvariant
= I
->hasMetadata(LLVMContext::MD_invariant_load
);
2422 bool IsDereferenceable
= I
->hasMetadata(LLVMContext::MD_dereferenceable
);
2423 const MDNode
*Ranges
= I
->getMetadata(LLVMContext::MD_range
);
2426 I
->getAAMetadata(AAInfo
);
2428 if (Alignment
== 0) // Ensure that codegen never sees alignment 0.
2429 Alignment
= DL
.getABITypeAlignment(ValTy
);
2431 unsigned Size
= DL
.getTypeStoreSize(ValTy
);
2434 Flags
|= MachineMemOperand::MOVolatile
;
2436 Flags
|= MachineMemOperand::MONonTemporal
;
2437 if (IsDereferenceable
)
2438 Flags
|= MachineMemOperand::MODereferenceable
;
2440 Flags
|= MachineMemOperand::MOInvariant
;
2442 return FuncInfo
.MF
->getMachineMemOperand(MachinePointerInfo(Ptr
), Flags
, Size
,
2443 Alignment
, AAInfo
, Ranges
);
2446 CmpInst::Predicate
FastISel::optimizeCmpPredicate(const CmpInst
*CI
) const {
2447 // If both operands are the same, then try to optimize or fold the cmp.
2448 CmpInst::Predicate Predicate
= CI
->getPredicate();
2449 if (CI
->getOperand(0) != CI
->getOperand(1))
2452 switch (Predicate
) {
2453 default: llvm_unreachable("Invalid predicate!");
2454 case CmpInst::FCMP_FALSE
: Predicate
= CmpInst::FCMP_FALSE
; break;
2455 case CmpInst::FCMP_OEQ
: Predicate
= CmpInst::FCMP_ORD
; break;
2456 case CmpInst::FCMP_OGT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2457 case CmpInst::FCMP_OGE
: Predicate
= CmpInst::FCMP_ORD
; break;
2458 case CmpInst::FCMP_OLT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2459 case CmpInst::FCMP_OLE
: Predicate
= CmpInst::FCMP_ORD
; break;
2460 case CmpInst::FCMP_ONE
: Predicate
= CmpInst::FCMP_FALSE
; break;
2461 case CmpInst::FCMP_ORD
: Predicate
= CmpInst::FCMP_ORD
; break;
2462 case CmpInst::FCMP_UNO
: Predicate
= CmpInst::FCMP_UNO
; break;
2463 case CmpInst::FCMP_UEQ
: Predicate
= CmpInst::FCMP_TRUE
; break;
2464 case CmpInst::FCMP_UGT
: Predicate
= CmpInst::FCMP_UNO
; break;
2465 case CmpInst::FCMP_UGE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2466 case CmpInst::FCMP_ULT
: Predicate
= CmpInst::FCMP_UNO
; break;
2467 case CmpInst::FCMP_ULE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2468 case CmpInst::FCMP_UNE
: Predicate
= CmpInst::FCMP_UNO
; break;
2469 case CmpInst::FCMP_TRUE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2471 case CmpInst::ICMP_EQ
: Predicate
= CmpInst::FCMP_TRUE
; break;
2472 case CmpInst::ICMP_NE
: Predicate
= CmpInst::FCMP_FALSE
; break;
2473 case CmpInst::ICMP_UGT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2474 case CmpInst::ICMP_UGE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2475 case CmpInst::ICMP_ULT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2476 case CmpInst::ICMP_ULE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2477 case CmpInst::ICMP_SGT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2478 case CmpInst::ICMP_SGE
: Predicate
= CmpInst::FCMP_TRUE
; break;
2479 case CmpInst::ICMP_SLT
: Predicate
= CmpInst::FCMP_FALSE
; break;
2480 case CmpInst::ICMP_SLE
: Predicate
= CmpInst::FCMP_TRUE
; break;