1 //===- SelectionDAGBuilder.h - Selection-DAG building -----------*- C++ -*-===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This implements routines for translating from LLVM IR into SelectionDAG IR.
11 //===----------------------------------------------------------------------===//
13 #ifndef LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
14 #define LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H
16 #include "StatepointLowering.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/DenseMap.h"
19 #include "llvm/ADT/MapVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/CodeGenCommonISel.h"
22 #include "llvm/CodeGen/ISDOpcodes.h"
23 #include "llvm/CodeGen/SelectionDAGNodes.h"
24 #include "llvm/CodeGen/SwitchLoweringUtils.h"
25 #include "llvm/CodeGen/TargetLowering.h"
26 #include "llvm/CodeGen/ValueTypes.h"
27 #include "llvm/IR/DebugLoc.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/Support/BranchProbability.h"
30 #include "llvm/Support/CodeGen.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/MachineValueType.h"
43 class AtomicCmpXchgInst
;
50 class CatchReturnInst
;
51 class CatchSwitchInst
;
53 class CleanupReturnInst
;
55 class ConstrainedFPIntrinsic
;
59 class DILocalVariable
;
62 class FunctionLoweringInfo
;
66 class GCStatepointInst
;
72 class MachineBasicBlock
;
79 class SwiftErrorValueTracking
;
81 class TargetLibraryInfo
;
85 class UnreachableInst
;
90 //===----------------------------------------------------------------------===//
91 /// SelectionDAGBuilder - This is the common target-independent lowering
92 /// implementation that is parameterized by a TargetLowering object.
94 class SelectionDAGBuilder
{
95 /// The current instruction being visited.
96 const Instruction
*CurInst
= nullptr;
98 DenseMap
<const Value
*, SDValue
> NodeMap
;
100 /// Maps argument value for unused arguments. This is used
101 /// to preserve debug information for incoming arguments.
102 DenseMap
<const Value
*, SDValue
> UnusedArgNodeMap
;
104 /// Helper type for DanglingDebugInfoMap.
105 class DanglingDebugInfo
{
106 const DbgValueInst
* DI
= nullptr;
108 unsigned SDNodeOrder
= 0;
111 DanglingDebugInfo() = default;
112 DanglingDebugInfo(const DbgValueInst
*di
, DebugLoc DL
, unsigned SDNO
)
113 : DI(di
), dl(std::move(DL
)), SDNodeOrder(SDNO
) {}
115 const DbgValueInst
* getDI() { return DI
; }
116 DebugLoc
getdl() { return dl
; }
117 unsigned getSDNodeOrder() { return SDNodeOrder
; }
120 /// Helper type for DanglingDebugInfoMap.
121 typedef std::vector
<DanglingDebugInfo
> DanglingDebugInfoVector
;
123 /// Keeps track of dbg_values for which we have not yet seen the referent.
124 /// We defer handling these until we do see it.
125 MapVector
<const Value
*, DanglingDebugInfoVector
> DanglingDebugInfoMap
;
128 /// Loads are not emitted to the program immediately. We bunch them up and
129 /// then emit token factor nodes when possible. This allows us to get simple
130 /// disambiguation between loads without worrying about alias analysis.
131 SmallVector
<SDValue
, 8> PendingLoads
;
133 /// State used while lowering a statepoint sequence (gc_statepoint,
134 /// gc_relocate, and gc_result). See StatepointLowering.hpp/cpp for details.
135 StatepointLoweringState StatepointLowering
;
138 /// CopyToReg nodes that copy values to virtual registers for export to other
139 /// blocks need to be emitted before any terminator instruction, but they have
140 /// no other ordering requirements. We bunch them up and the emit a single
141 /// tokenfactor for them just before terminator instructions.
142 SmallVector
<SDValue
, 8> PendingExports
;
144 /// Similar to loads, nodes corresponding to constrained FP intrinsics are
145 /// bunched up and emitted when necessary. These can be moved across each
146 /// other and any (normal) memory operation (load or store), but not across
147 /// calls or instructions having unspecified side effects. As a special
148 /// case, constrained FP intrinsics using fpexcept.strict may not be deleted
149 /// even if otherwise unused, so they need to be chained before any
150 /// terminator instruction (like PendingExports). We track the latter
151 /// set of nodes in a separate list.
152 SmallVector
<SDValue
, 8> PendingConstrainedFP
;
153 SmallVector
<SDValue
, 8> PendingConstrainedFPStrict
;
155 /// Update root to include all chains from the Pending list.
156 SDValue
updateRoot(SmallVectorImpl
<SDValue
> &Pending
);
158 /// A unique monotonically increasing number used to order the SDNodes we
160 unsigned SDNodeOrder
;
162 /// Determine the rank by weight of CC in [First,Last]. If CC has more weight
163 /// than each cluster in the range, its rank is 0.
164 unsigned caseClusterRank(const SwitchCG::CaseCluster
&CC
,
165 SwitchCG::CaseClusterIt First
,
166 SwitchCG::CaseClusterIt Last
);
168 /// Emit comparison and split W into two subtrees.
169 void splitWorkItem(SwitchCG::SwitchWorkList
&WorkList
,
170 const SwitchCG::SwitchWorkListItem
&W
, Value
*Cond
,
171 MachineBasicBlock
*SwitchMBB
);
174 void lowerWorkItem(SwitchCG::SwitchWorkListItem W
, Value
*Cond
,
175 MachineBasicBlock
*SwitchMBB
,
176 MachineBasicBlock
*DefaultMBB
);
178 /// Peel the top probability case if it exceeds the threshold
180 peelDominantCaseCluster(const SwitchInst
&SI
,
181 SwitchCG::CaseClusterVector
&Clusters
,
182 BranchProbability
&PeeledCaseProb
);
185 const TargetMachine
&TM
;
188 /// Lowest valid SDNodeOrder. The special case 0 is reserved for scheduling
189 /// nodes without a corresponding SDNode.
190 static const unsigned LowestSDNodeOrder
= 1;
193 AAResults
*AA
= nullptr;
194 const TargetLibraryInfo
*LibInfo
;
196 class SDAGSwitchLowering
: public SwitchCG::SwitchLowering
{
198 SDAGSwitchLowering(SelectionDAGBuilder
*sdb
, FunctionLoweringInfo
&funcinfo
)
199 : SwitchCG::SwitchLowering(funcinfo
), SDB(sdb
) {}
201 virtual void addSuccessorWithProb(
202 MachineBasicBlock
*Src
, MachineBasicBlock
*Dst
,
203 BranchProbability Prob
= BranchProbability::getUnknown()) override
{
204 SDB
->addSuccessorWithProb(Src
, Dst
, Prob
);
208 SelectionDAGBuilder
*SDB
;
211 // Data related to deferred switch lowerings. Used to construct additional
212 // Basic Blocks in SelectionDAGISel::FinishBasicBlock.
213 std::unique_ptr
<SDAGSwitchLowering
> SL
;
215 /// A StackProtectorDescriptor structure used to communicate stack protector
216 /// information in between SelectBasicBlock and FinishBasicBlock.
217 StackProtectorDescriptor SPDescriptor
;
219 // Emit PHI-node-operand constants only once even if used by multiple
221 DenseMap
<const Constant
*, unsigned> ConstantsOut
;
223 /// Information about the function as a whole.
224 FunctionLoweringInfo
&FuncInfo
;
226 /// Information about the swifterror values used throughout the function.
227 SwiftErrorValueTracking
&SwiftError
;
229 /// Garbage collection metadata for the function.
232 /// Map a landing pad to the call site indexes.
233 DenseMap
<MachineBasicBlock
*, SmallVector
<unsigned, 4>> LPadToCallSiteMap
;
235 /// This is set to true if a call in the current block has been translated as
236 /// a tail call. In this case, no subsequent DAG nodes should be created.
237 bool HasTailCall
= false;
239 LLVMContext
*Context
;
241 SelectionDAGBuilder(SelectionDAG
&dag
, FunctionLoweringInfo
&funcinfo
,
242 SwiftErrorValueTracking
&swifterror
, CodeGenOpt::Level ol
)
243 : SDNodeOrder(LowestSDNodeOrder
), TM(dag
.getTarget()), DAG(dag
),
244 SL(std::make_unique
<SDAGSwitchLowering
>(this, funcinfo
)), FuncInfo(funcinfo
),
245 SwiftError(swifterror
) {}
247 void init(GCFunctionInfo
*gfi
, AAResults
*AA
,
248 const TargetLibraryInfo
*li
);
250 /// Clear out the current SelectionDAG and the associated state and prepare
251 /// this SelectionDAGBuilder object to be used for a new block. This doesn't
252 /// clear out information about additional blocks that are needed to complete
253 /// switch lowering or PHI node updating; that information is cleared out as
257 /// Clear the dangling debug information map. This function is separated from
258 /// the clear so that debug information that is dangling in a basic block can
259 /// be properly resolved in a different basic block. This allows the
260 /// SelectionDAG to resolve dangling debug information attached to PHI nodes.
261 void clearDanglingDebugInfo();
263 /// Return the current virtual root of the Selection DAG, flushing any
264 /// PendingLoad items. This must be done before emitting a store or any other
265 /// memory node that may need to be ordered after any prior load instructions.
266 SDValue
getMemoryRoot();
268 /// Similar to getMemoryRoot, but also flushes PendingConstrainedFP(Strict)
269 /// items. This must be done before emitting any call other any other node
270 /// that may need to be ordered after FP instructions due to other side
274 /// Similar to getRoot, but instead of flushing all the PendingLoad items,
275 /// flush all the PendingExports (and PendingConstrainedFPStrict) items.
276 /// It is necessary to do this before emitting a terminator instruction.
277 SDValue
getControlRoot();
279 SDLoc
getCurSDLoc() const {
280 return SDLoc(CurInst
, SDNodeOrder
);
283 DebugLoc
getCurDebugLoc() const {
284 return CurInst
? CurInst
->getDebugLoc() : DebugLoc();
287 void CopyValueToVirtualRegister(const Value
*V
, unsigned Reg
);
289 void visit(const Instruction
&I
);
291 void visit(unsigned Opcode
, const User
&I
);
293 /// If there was virtual register allocated for the value V emit CopyFromReg
294 /// of the specified type Ty. Return empty SDValue() otherwise.
295 SDValue
getCopyFromRegs(const Value
*V
, Type
*Ty
);
297 /// Register a dbg_value which relies on a Value which we have not yet seen.
298 void addDanglingDebugInfo(const DbgValueInst
*DI
, DebugLoc DL
,
301 /// If we have dangling debug info that describes \p Variable, or an
302 /// overlapping part of variable considering the \p Expr, then this method
303 /// will drop that debug info as it isn't valid any longer.
304 void dropDanglingDebugInfo(const DILocalVariable
*Variable
,
305 const DIExpression
*Expr
);
307 /// If we saw an earlier dbg_value referring to V, generate the debug data
308 /// structures now that we've seen its definition.
309 void resolveDanglingDebugInfo(const Value
*V
, SDValue Val
);
311 /// For the given dangling debuginfo record, perform last-ditch efforts to
312 /// resolve the debuginfo to something that is represented in this DAG. If
313 /// this cannot be done, produce an Undef debug value record.
314 void salvageUnresolvedDbgValue(DanglingDebugInfo
&DDI
);
316 /// For a given list of Values, attempt to create and record a SDDbgValue in
317 /// the SelectionDAG.
318 bool handleDebugValue(ArrayRef
<const Value
*> Values
, DILocalVariable
*Var
,
319 DIExpression
*Expr
, DebugLoc CurDL
, DebugLoc InstDL
,
320 unsigned Order
, bool IsVariadic
);
322 /// Evict any dangling debug information, attempting to salvage it first.
323 void resolveOrClearDbgInfo();
325 SDValue
getValue(const Value
*V
);
327 SDValue
getNonRegisterValue(const Value
*V
);
328 SDValue
getValueImpl(const Value
*V
);
330 void setValue(const Value
*V
, SDValue NewN
) {
331 SDValue
&N
= NodeMap
[V
];
332 assert(!N
.getNode() && "Already set a value for this node!");
336 void setUnusedArgValue(const Value
*V
, SDValue NewN
) {
337 SDValue
&N
= UnusedArgNodeMap
[V
];
338 assert(!N
.getNode() && "Already set a value for this node!");
342 void FindMergedConditions(const Value
*Cond
, MachineBasicBlock
*TBB
,
343 MachineBasicBlock
*FBB
, MachineBasicBlock
*CurBB
,
344 MachineBasicBlock
*SwitchBB
,
345 Instruction::BinaryOps Opc
, BranchProbability TProb
,
346 BranchProbability FProb
, bool InvertCond
);
347 void EmitBranchForMergedCondition(const Value
*Cond
, MachineBasicBlock
*TBB
,
348 MachineBasicBlock
*FBB
,
349 MachineBasicBlock
*CurBB
,
350 MachineBasicBlock
*SwitchBB
,
351 BranchProbability TProb
, BranchProbability FProb
,
353 bool ShouldEmitAsBranches(const std::vector
<SwitchCG::CaseBlock
> &Cases
);
354 bool isExportableFromCurrentBlock(const Value
*V
, const BasicBlock
*FromBB
);
355 void CopyToExportRegsIfNeeded(const Value
*V
);
356 void ExportFromCurrentBlock(const Value
*V
);
357 void LowerCallTo(const CallBase
&CB
, SDValue Callee
, bool IsTailCall
,
358 bool IsMustTailCall
, const BasicBlock
*EHPadBB
= nullptr);
360 // Lower range metadata from 0 to N to assert zext to an integer of nearest
361 // floor power of two.
362 SDValue
lowerRangeToAssertZExt(SelectionDAG
&DAG
, const Instruction
&I
,
365 void populateCallLoweringInfo(TargetLowering::CallLoweringInfo
&CLI
,
366 const CallBase
*Call
, unsigned ArgIdx
,
367 unsigned NumArgs
, SDValue Callee
,
368 Type
*ReturnTy
, bool IsPatchPoint
);
370 std::pair
<SDValue
, SDValue
>
371 lowerInvokable(TargetLowering::CallLoweringInfo
&CLI
,
372 const BasicBlock
*EHPadBB
= nullptr);
374 /// When an MBB was split during scheduling, update the
375 /// references that need to refer to the last resulting block.
376 void UpdateSplitBlock(MachineBasicBlock
*First
, MachineBasicBlock
*Last
);
378 /// Describes a gc.statepoint or a gc.statepoint like thing for the purposes
379 /// of lowering into a STATEPOINT node.
380 struct StatepointLoweringInfo
{
381 /// Bases[i] is the base pointer for Ptrs[i]. Together they denote the set
382 /// of gc pointers this STATEPOINT has to relocate.
383 SmallVector
<const Value
*, 16> Bases
;
384 SmallVector
<const Value
*, 16> Ptrs
;
386 /// The set of gc.relocate calls associated with this gc.statepoint.
387 SmallVector
<const GCRelocateInst
*, 16> GCRelocates
;
389 /// The full list of gc arguments to the gc.statepoint being lowered.
390 ArrayRef
<const Use
> GCArgs
;
392 /// The gc.statepoint instruction.
393 const Instruction
*StatepointInstr
= nullptr;
395 /// The list of gc transition arguments present in the gc.statepoint being
397 ArrayRef
<const Use
> GCTransitionArgs
;
399 /// The ID that the resulting STATEPOINT instruction has to report.
402 /// Information regarding the underlying call instruction.
403 TargetLowering::CallLoweringInfo CLI
;
405 /// The deoptimization state associated with this gc.statepoint call, if
407 ArrayRef
<const Use
> DeoptState
;
409 /// Flags associated with the meta arguments being lowered.
410 uint64_t StatepointFlags
= -1;
412 /// The number of patchable bytes the call needs to get lowered into.
413 unsigned NumPatchBytes
= -1;
415 /// The exception handling unwind destination, in case this represents an
416 /// invoke of gc.statepoint.
417 const BasicBlock
*EHPadBB
= nullptr;
419 explicit StatepointLoweringInfo(SelectionDAG
&DAG
) : CLI(DAG
) {}
422 /// Lower \p SLI into a STATEPOINT instruction.
423 SDValue
LowerAsSTATEPOINT(StatepointLoweringInfo
&SI
);
425 // This function is responsible for the whole statepoint lowering process.
426 // It uniformly handles invoke and call statepoints.
427 void LowerStatepoint(const GCStatepointInst
&I
,
428 const BasicBlock
*EHPadBB
= nullptr);
430 void LowerCallSiteWithDeoptBundle(const CallBase
*Call
, SDValue Callee
,
431 const BasicBlock
*EHPadBB
);
433 void LowerDeoptimizeCall(const CallInst
*CI
);
434 void LowerDeoptimizingReturn();
436 void LowerCallSiteWithDeoptBundleImpl(const CallBase
*Call
, SDValue Callee
,
437 const BasicBlock
*EHPadBB
,
438 bool VarArgDisallowed
,
439 bool ForceVoidReturnTy
);
441 /// Returns the type of FrameIndex and TargetFrameIndex nodes.
442 MVT
getFrameIndexTy() {
443 return DAG
.getTargetLoweringInfo().getFrameIndexTy(DAG
.getDataLayout());
447 // Terminator instructions.
448 void visitRet(const ReturnInst
&I
);
449 void visitBr(const BranchInst
&I
);
450 void visitSwitch(const SwitchInst
&I
);
451 void visitIndirectBr(const IndirectBrInst
&I
);
452 void visitUnreachable(const UnreachableInst
&I
);
453 void visitCleanupRet(const CleanupReturnInst
&I
);
454 void visitCatchSwitch(const CatchSwitchInst
&I
);
455 void visitCatchRet(const CatchReturnInst
&I
);
456 void visitCatchPad(const CatchPadInst
&I
);
457 void visitCleanupPad(const CleanupPadInst
&CPI
);
459 BranchProbability
getEdgeProbability(const MachineBasicBlock
*Src
,
460 const MachineBasicBlock
*Dst
) const;
461 void addSuccessorWithProb(
462 MachineBasicBlock
*Src
, MachineBasicBlock
*Dst
,
463 BranchProbability Prob
= BranchProbability::getUnknown());
466 void visitSwitchCase(SwitchCG::CaseBlock
&CB
, MachineBasicBlock
*SwitchBB
);
467 void visitSPDescriptorParent(StackProtectorDescriptor
&SPD
,
468 MachineBasicBlock
*ParentBB
);
469 void visitSPDescriptorFailure(StackProtectorDescriptor
&SPD
);
470 void visitBitTestHeader(SwitchCG::BitTestBlock
&B
,
471 MachineBasicBlock
*SwitchBB
);
472 void visitBitTestCase(SwitchCG::BitTestBlock
&BB
, MachineBasicBlock
*NextMBB
,
473 BranchProbability BranchProbToNext
, unsigned Reg
,
474 SwitchCG::BitTestCase
&B
, MachineBasicBlock
*SwitchBB
);
475 void visitJumpTable(SwitchCG::JumpTable
&JT
);
476 void visitJumpTableHeader(SwitchCG::JumpTable
&JT
,
477 SwitchCG::JumpTableHeader
&JTH
,
478 MachineBasicBlock
*SwitchBB
);
481 // These all get lowered before this pass.
482 void visitInvoke(const InvokeInst
&I
);
483 void visitCallBr(const CallBrInst
&I
);
484 void visitResume(const ResumeInst
&I
);
486 void visitUnary(const User
&I
, unsigned Opcode
);
487 void visitFNeg(const User
&I
) { visitUnary(I
, ISD::FNEG
); }
489 void visitBinary(const User
&I
, unsigned Opcode
);
490 void visitShift(const User
&I
, unsigned Opcode
);
491 void visitAdd(const User
&I
) { visitBinary(I
, ISD::ADD
); }
492 void visitFAdd(const User
&I
) { visitBinary(I
, ISD::FADD
); }
493 void visitSub(const User
&I
) { visitBinary(I
, ISD::SUB
); }
494 void visitFSub(const User
&I
) { visitBinary(I
, ISD::FSUB
); }
495 void visitMul(const User
&I
) { visitBinary(I
, ISD::MUL
); }
496 void visitFMul(const User
&I
) { visitBinary(I
, ISD::FMUL
); }
497 void visitURem(const User
&I
) { visitBinary(I
, ISD::UREM
); }
498 void visitSRem(const User
&I
) { visitBinary(I
, ISD::SREM
); }
499 void visitFRem(const User
&I
) { visitBinary(I
, ISD::FREM
); }
500 void visitUDiv(const User
&I
) { visitBinary(I
, ISD::UDIV
); }
501 void visitSDiv(const User
&I
);
502 void visitFDiv(const User
&I
) { visitBinary(I
, ISD::FDIV
); }
503 void visitAnd (const User
&I
) { visitBinary(I
, ISD::AND
); }
504 void visitOr (const User
&I
) { visitBinary(I
, ISD::OR
); }
505 void visitXor (const User
&I
) { visitBinary(I
, ISD::XOR
); }
506 void visitShl (const User
&I
) { visitShift(I
, ISD::SHL
); }
507 void visitLShr(const User
&I
) { visitShift(I
, ISD::SRL
); }
508 void visitAShr(const User
&I
) { visitShift(I
, ISD::SRA
); }
509 void visitICmp(const User
&I
);
510 void visitFCmp(const User
&I
);
511 // Visit the conversion instructions
512 void visitTrunc(const User
&I
);
513 void visitZExt(const User
&I
);
514 void visitSExt(const User
&I
);
515 void visitFPTrunc(const User
&I
);
516 void visitFPExt(const User
&I
);
517 void visitFPToUI(const User
&I
);
518 void visitFPToSI(const User
&I
);
519 void visitUIToFP(const User
&I
);
520 void visitSIToFP(const User
&I
);
521 void visitPtrToInt(const User
&I
);
522 void visitIntToPtr(const User
&I
);
523 void visitBitCast(const User
&I
);
524 void visitAddrSpaceCast(const User
&I
);
526 void visitExtractElement(const User
&I
);
527 void visitInsertElement(const User
&I
);
528 void visitShuffleVector(const User
&I
);
530 void visitExtractValue(const User
&I
);
531 void visitInsertValue(const User
&I
);
532 void visitLandingPad(const LandingPadInst
&LP
);
534 void visitGetElementPtr(const User
&I
);
535 void visitSelect(const User
&I
);
537 void visitAlloca(const AllocaInst
&I
);
538 void visitLoad(const LoadInst
&I
);
539 void visitStore(const StoreInst
&I
);
540 void visitMaskedLoad(const CallInst
&I
, bool IsExpanding
= false);
541 void visitMaskedStore(const CallInst
&I
, bool IsCompressing
= false);
542 void visitMaskedGather(const CallInst
&I
);
543 void visitMaskedScatter(const CallInst
&I
);
544 void visitAtomicCmpXchg(const AtomicCmpXchgInst
&I
);
545 void visitAtomicRMW(const AtomicRMWInst
&I
);
546 void visitFence(const FenceInst
&I
);
547 void visitPHI(const PHINode
&I
);
548 void visitCall(const CallInst
&I
);
549 bool visitMemCmpBCmpCall(const CallInst
&I
);
550 bool visitMemPCpyCall(const CallInst
&I
);
551 bool visitMemChrCall(const CallInst
&I
);
552 bool visitStrCpyCall(const CallInst
&I
, bool isStpcpy
);
553 bool visitStrCmpCall(const CallInst
&I
);
554 bool visitStrLenCall(const CallInst
&I
);
555 bool visitStrNLenCall(const CallInst
&I
);
556 bool visitUnaryFloatCall(const CallInst
&I
, unsigned Opcode
);
557 bool visitBinaryFloatCall(const CallInst
&I
, unsigned Opcode
);
558 void visitAtomicLoad(const LoadInst
&I
);
559 void visitAtomicStore(const StoreInst
&I
);
560 void visitLoadFromSwiftError(const LoadInst
&I
);
561 void visitStoreToSwiftError(const StoreInst
&I
);
562 void visitFreeze(const FreezeInst
&I
);
564 void visitInlineAsm(const CallBase
&Call
,
565 const BasicBlock
*EHPadBB
= nullptr);
566 void visitIntrinsicCall(const CallInst
&I
, unsigned Intrinsic
);
567 void visitTargetIntrinsic(const CallInst
&I
, unsigned Intrinsic
);
568 void visitConstrainedFPIntrinsic(const ConstrainedFPIntrinsic
&FPI
);
569 void visitVPLoadGather(const VPIntrinsic
&VPIntrin
, EVT VT
,
570 SmallVector
<SDValue
, 7> &OpValues
, bool IsGather
);
571 void visitVPStoreScatter(const VPIntrinsic
&VPIntrin
,
572 SmallVector
<SDValue
, 7> &OpValues
, bool IsScatter
);
573 void visitVectorPredicationIntrinsic(const VPIntrinsic
&VPIntrin
);
575 void visitVAStart(const CallInst
&I
);
576 void visitVAArg(const VAArgInst
&I
);
577 void visitVAEnd(const CallInst
&I
);
578 void visitVACopy(const CallInst
&I
);
579 void visitStackmap(const CallInst
&I
);
580 void visitPatchpoint(const CallBase
&CB
, const BasicBlock
*EHPadBB
= nullptr);
582 // These two are implemented in StatepointLowering.cpp
583 void visitGCRelocate(const GCRelocateInst
&Relocate
);
584 void visitGCResult(const GCResultInst
&I
);
586 void visitVectorReduce(const CallInst
&I
, unsigned Intrinsic
);
587 void visitVectorReverse(const CallInst
&I
);
588 void visitVectorSplice(const CallInst
&I
);
589 void visitStepVector(const CallInst
&I
);
591 void visitUserOp1(const Instruction
&I
) {
592 llvm_unreachable("UserOp1 should not exist at instruction selection time!");
594 void visitUserOp2(const Instruction
&I
) {
595 llvm_unreachable("UserOp2 should not exist at instruction selection time!");
598 void processIntegerCallValue(const Instruction
&I
,
599 SDValue Value
, bool IsSigned
);
601 void HandlePHINodesInSuccessorBlocks(const BasicBlock
*LLVMBB
);
603 void emitInlineAsmError(const CallBase
&Call
, const Twine
&Message
);
605 /// If V is an function argument then create corresponding DBG_VALUE machine
606 /// instruction for it now. At the end of instruction selection, they will be
607 /// inserted to the entry BB.
608 bool EmitFuncArgumentDbgValue(const Value
*V
, DILocalVariable
*Variable
,
609 DIExpression
*Expr
, DILocation
*DL
,
610 bool IsDbgDeclare
, const SDValue
&N
);
612 /// Return the next block after MBB, or nullptr if there is none.
613 MachineBasicBlock
*NextBlock(MachineBasicBlock
*MBB
);
615 /// Update the DAG and DAG builder with the relevant information after
616 /// a new root node has been created which could be a tail call.
617 void updateDAGForMaybeTailCall(SDValue MaybeTC
);
619 /// Return the appropriate SDDbgValue based on N.
620 SDDbgValue
*getDbgValue(SDValue N
, DILocalVariable
*Variable
,
621 DIExpression
*Expr
, const DebugLoc
&dl
,
622 unsigned DbgSDNodeOrder
);
624 /// Lowers CallInst to an external symbol.
625 void lowerCallToExternalSymbol(const CallInst
&I
, const char *FunctionName
);
627 SDValue
lowerStartEH(SDValue Chain
, const BasicBlock
*EHPadBB
,
628 MCSymbol
*&BeginLabel
);
629 SDValue
lowerEndEH(SDValue Chain
, const InvokeInst
*II
,
630 const BasicBlock
*EHPadBB
, MCSymbol
*BeginLabel
);
633 /// This struct represents the registers (physical or virtual)
634 /// that a particular set of values is assigned, and the type information about
635 /// the value. The most common situation is to represent one value at a time,
636 /// but struct or array values are handled element-wise as multiple values. The
637 /// splitting of aggregates is performed recursively, so that we never have
638 /// aggregate-typed registers. The values at this point do not necessarily have
639 /// legal types, so each value may require one or more registers of some legal
642 struct RegsForValue
{
643 /// The value types of the values, which may not be legal, and
644 /// may need be promoted or synthesized from one or more registers.
645 SmallVector
<EVT
, 4> ValueVTs
;
647 /// The value types of the registers. This is the same size as ValueVTs and it
648 /// records, for each value, what the type of the assigned register or
649 /// registers are. (Individual values are never synthesized from more than one
650 /// type of register.)
652 /// With virtual registers, the contents of RegVTs is redundant with TLI's
653 /// getRegisterType member function, however when with physical registers
654 /// it is necessary to have a separate record of the types.
655 SmallVector
<MVT
, 4> RegVTs
;
657 /// This list holds the registers assigned to the values.
658 /// Each legal or promoted value requires one register, and each
659 /// expanded value requires multiple registers.
660 SmallVector
<unsigned, 4> Regs
;
662 /// This list holds the number of registers for each value.
663 SmallVector
<unsigned, 4> RegCount
;
665 /// Records if this value needs to be treated in an ABI dependant manner,
666 /// different to normal type legalization.
667 Optional
<CallingConv::ID
> CallConv
;
669 RegsForValue() = default;
670 RegsForValue(const SmallVector
<unsigned, 4> ®s
, MVT regvt
, EVT valuevt
,
671 Optional
<CallingConv::ID
> CC
= None
);
672 RegsForValue(LLVMContext
&Context
, const TargetLowering
&TLI
,
673 const DataLayout
&DL
, unsigned Reg
, Type
*Ty
,
674 Optional
<CallingConv::ID
> CC
);
676 bool isABIMangled() const {
677 return CallConv
.hasValue();
680 /// Add the specified values to this one.
681 void append(const RegsForValue
&RHS
) {
682 ValueVTs
.append(RHS
.ValueVTs
.begin(), RHS
.ValueVTs
.end());
683 RegVTs
.append(RHS
.RegVTs
.begin(), RHS
.RegVTs
.end());
684 Regs
.append(RHS
.Regs
.begin(), RHS
.Regs
.end());
685 RegCount
.push_back(RHS
.Regs
.size());
688 /// Emit a series of CopyFromReg nodes that copies from this value and returns
689 /// the result as a ValueVTs value. This uses Chain/Flag as the input and
690 /// updates them for the output Chain/Flag. If the Flag pointer is NULL, no
692 SDValue
getCopyFromRegs(SelectionDAG
&DAG
, FunctionLoweringInfo
&FuncInfo
,
693 const SDLoc
&dl
, SDValue
&Chain
, SDValue
*Flag
,
694 const Value
*V
= nullptr) const;
696 /// Emit a series of CopyToReg nodes that copies the specified value into the
697 /// registers specified by this object. This uses Chain/Flag as the input and
698 /// updates them for the output Chain/Flag. If the Flag pointer is nullptr, no
699 /// flag is used. If V is not nullptr, then it is used in printing better
700 /// diagnostic messages on error.
701 void getCopyToRegs(SDValue Val
, SelectionDAG
&DAG
, const SDLoc
&dl
,
702 SDValue
&Chain
, SDValue
*Flag
, const Value
*V
= nullptr,
703 ISD::NodeType PreferredExtendType
= ISD::ANY_EXTEND
) const;
705 /// Add this value to the specified inlineasm node operand list. This adds the
706 /// code marker, matching input operand index (if applicable), and includes
707 /// the number of values added into it.
708 void AddInlineAsmOperands(unsigned Code
, bool HasMatching
,
709 unsigned MatchingIdx
, const SDLoc
&dl
,
710 SelectionDAG
&DAG
, std::vector
<SDValue
> &Ops
) const;
712 /// Check if the total RegCount is greater than one.
713 bool occupiesMultipleRegs() const {
714 return std::accumulate(RegCount
.begin(), RegCount
.end(), 0) > 1;
717 /// Return a list of registers and their sizes.
718 SmallVector
<std::pair
<unsigned, TypeSize
>, 4> getRegsAndSizes() const;
721 } // end namespace llvm
723 #endif // LLVM_LIB_CODEGEN_SELECTIONDAG_SELECTIONDAGBUILDER_H