1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/Support/ErrorHandling.h"
26 #include "llvm/Target/TargetMachine.h"
30 /// Compute the linearized index of a member in a nested aggregate/struct/array
31 /// by recursing and accumulating CurIndex as long as there are indices in the
33 unsigned llvm::ComputeLinearIndex(Type
*Ty
,
34 const unsigned *Indices
,
35 const unsigned *IndicesEnd
,
37 // Base case: We're done.
38 if (Indices
&& Indices
== IndicesEnd
)
41 // Given a struct type, recursively traverse the elements.
42 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
43 for (auto I
: llvm::enumerate(STy
->elements())) {
45 if (Indices
&& *Indices
== I
.index())
46 return ComputeLinearIndex(ET
, Indices
+ 1, IndicesEnd
, CurIndex
);
47 CurIndex
= ComputeLinearIndex(ET
, nullptr, nullptr, CurIndex
);
49 assert(!Indices
&& "Unexpected out of bound");
52 // Given an array type, recursively traverse the elements.
53 else if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
54 Type
*EltTy
= ATy
->getElementType();
55 unsigned NumElts
= ATy
->getNumElements();
56 // Compute the Linear offset when jumping one element of the array
57 unsigned EltLinearOffset
= ComputeLinearIndex(EltTy
, nullptr, nullptr, 0);
59 assert(*Indices
< NumElts
&& "Unexpected out of bound");
60 // If the indice is inside the array, compute the index to the requested
61 // elt and recurse inside the element with the end of the indices list
62 CurIndex
+= EltLinearOffset
* *Indices
;
63 return ComputeLinearIndex(EltTy
, Indices
+1, IndicesEnd
, CurIndex
);
65 CurIndex
+= EltLinearOffset
*NumElts
;
68 // We haven't found the type we're looking for, so keep searching.
72 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
73 /// EVTs that represent all the individual underlying
74 /// non-aggregate types that comprise it.
76 /// If Offsets is non-null, it points to a vector to be filled in
77 /// with the in-memory offsets of each of the individual values.
79 void llvm::ComputeValueVTs(const TargetLowering
&TLI
, const DataLayout
&DL
,
80 Type
*Ty
, SmallVectorImpl
<EVT
> &ValueVTs
,
81 SmallVectorImpl
<EVT
> *MemVTs
,
82 SmallVectorImpl
<uint64_t> *Offsets
,
83 uint64_t StartingOffset
) {
84 // Given a struct type, recursively traverse the elements.
85 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
86 // If the Offsets aren't needed, don't query the struct layout. This allows
87 // us to support structs with scalable vectors for operations that don't
89 const StructLayout
*SL
= Offsets
? DL
.getStructLayout(STy
) : nullptr;
90 for (StructType::element_iterator EB
= STy
->element_begin(),
92 EE
= STy
->element_end();
94 // Don't compute the element offset if we didn't get a StructLayout above.
95 uint64_t EltOffset
= SL
? SL
->getElementOffset(EI
- EB
) : 0;
96 ComputeValueVTs(TLI
, DL
, *EI
, ValueVTs
, MemVTs
, Offsets
,
97 StartingOffset
+ EltOffset
);
101 // Given an array type, recursively traverse the elements.
102 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
103 Type
*EltTy
= ATy
->getElementType();
104 uint64_t EltSize
= DL
.getTypeAllocSize(EltTy
).getFixedValue();
105 for (unsigned i
= 0, e
= ATy
->getNumElements(); i
!= e
; ++i
)
106 ComputeValueVTs(TLI
, DL
, EltTy
, ValueVTs
, MemVTs
, Offsets
,
107 StartingOffset
+ i
* EltSize
);
110 // Interpret void as zero return values.
113 // Base case: we can get an EVT for this LLVM IR type.
114 ValueVTs
.push_back(TLI
.getValueType(DL
, Ty
));
116 MemVTs
->push_back(TLI
.getMemValueType(DL
, Ty
));
118 Offsets
->push_back(StartingOffset
);
121 void llvm::ComputeValueVTs(const TargetLowering
&TLI
, const DataLayout
&DL
,
122 Type
*Ty
, SmallVectorImpl
<EVT
> &ValueVTs
,
123 SmallVectorImpl
<uint64_t> *Offsets
,
124 uint64_t StartingOffset
) {
125 return ComputeValueVTs(TLI
, DL
, Ty
, ValueVTs
, /*MemVTs=*/nullptr, Offsets
,
129 void llvm::computeValueLLTs(const DataLayout
&DL
, Type
&Ty
,
130 SmallVectorImpl
<LLT
> &ValueTys
,
131 SmallVectorImpl
<uint64_t> *Offsets
,
132 uint64_t StartingOffset
) {
133 // Given a struct type, recursively traverse the elements.
134 if (StructType
*STy
= dyn_cast
<StructType
>(&Ty
)) {
135 // If the Offsets aren't needed, don't query the struct layout. This allows
136 // us to support structs with scalable vectors for operations that don't
138 const StructLayout
*SL
= Offsets
? DL
.getStructLayout(STy
) : nullptr;
139 for (unsigned I
= 0, E
= STy
->getNumElements(); I
!= E
; ++I
) {
140 uint64_t EltOffset
= SL
? SL
->getElementOffset(I
) : 0;
141 computeValueLLTs(DL
, *STy
->getElementType(I
), ValueTys
, Offsets
,
142 StartingOffset
+ EltOffset
);
146 // Given an array type, recursively traverse the elements.
147 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(&Ty
)) {
148 Type
*EltTy
= ATy
->getElementType();
149 uint64_t EltSize
= DL
.getTypeAllocSize(EltTy
).getFixedValue();
150 for (unsigned i
= 0, e
= ATy
->getNumElements(); i
!= e
; ++i
)
151 computeValueLLTs(DL
, *EltTy
, ValueTys
, Offsets
,
152 StartingOffset
+ i
* EltSize
);
155 // Interpret void as zero return values.
158 // Base case: we can get an LLT for this LLVM IR type.
159 ValueTys
.push_back(getLLTForType(Ty
, DL
));
160 if (Offsets
!= nullptr)
161 Offsets
->push_back(StartingOffset
* 8);
164 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
165 GlobalValue
*llvm::ExtractTypeInfo(Value
*V
) {
166 V
= V
->stripPointerCasts();
167 GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
);
168 GlobalVariable
*Var
= dyn_cast
<GlobalVariable
>(V
);
170 if (Var
&& Var
->getName() == "llvm.eh.catch.all.value") {
171 assert(Var
->hasInitializer() &&
172 "The EH catch-all value must have an initializer");
173 Value
*Init
= Var
->getInitializer();
174 GV
= dyn_cast
<GlobalValue
>(Init
);
175 if (!GV
) V
= cast
<ConstantPointerNull
>(Init
);
178 assert((GV
|| isa
<ConstantPointerNull
>(V
)) &&
179 "TypeInfo must be a global variable or NULL");
183 /// getFCmpCondCode - Return the ISD condition code corresponding to
184 /// the given LLVM IR floating-point condition code. This includes
185 /// consideration of global floating-point math flags.
187 ISD::CondCode
llvm::getFCmpCondCode(FCmpInst::Predicate Pred
) {
189 case FCmpInst::FCMP_FALSE
: return ISD::SETFALSE
;
190 case FCmpInst::FCMP_OEQ
: return ISD::SETOEQ
;
191 case FCmpInst::FCMP_OGT
: return ISD::SETOGT
;
192 case FCmpInst::FCMP_OGE
: return ISD::SETOGE
;
193 case FCmpInst::FCMP_OLT
: return ISD::SETOLT
;
194 case FCmpInst::FCMP_OLE
: return ISD::SETOLE
;
195 case FCmpInst::FCMP_ONE
: return ISD::SETONE
;
196 case FCmpInst::FCMP_ORD
: return ISD::SETO
;
197 case FCmpInst::FCMP_UNO
: return ISD::SETUO
;
198 case FCmpInst::FCMP_UEQ
: return ISD::SETUEQ
;
199 case FCmpInst::FCMP_UGT
: return ISD::SETUGT
;
200 case FCmpInst::FCMP_UGE
: return ISD::SETUGE
;
201 case FCmpInst::FCMP_ULT
: return ISD::SETULT
;
202 case FCmpInst::FCMP_ULE
: return ISD::SETULE
;
203 case FCmpInst::FCMP_UNE
: return ISD::SETUNE
;
204 case FCmpInst::FCMP_TRUE
: return ISD::SETTRUE
;
205 default: llvm_unreachable("Invalid FCmp predicate opcode!");
209 ISD::CondCode
llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC
) {
211 case ISD::SETOEQ
: case ISD::SETUEQ
: return ISD::SETEQ
;
212 case ISD::SETONE
: case ISD::SETUNE
: return ISD::SETNE
;
213 case ISD::SETOLT
: case ISD::SETULT
: return ISD::SETLT
;
214 case ISD::SETOLE
: case ISD::SETULE
: return ISD::SETLE
;
215 case ISD::SETOGT
: case ISD::SETUGT
: return ISD::SETGT
;
216 case ISD::SETOGE
: case ISD::SETUGE
: return ISD::SETGE
;
221 ISD::CondCode
llvm::getICmpCondCode(ICmpInst::Predicate Pred
) {
223 case ICmpInst::ICMP_EQ
: return ISD::SETEQ
;
224 case ICmpInst::ICMP_NE
: return ISD::SETNE
;
225 case ICmpInst::ICMP_SLE
: return ISD::SETLE
;
226 case ICmpInst::ICMP_ULE
: return ISD::SETULE
;
227 case ICmpInst::ICMP_SGE
: return ISD::SETGE
;
228 case ICmpInst::ICMP_UGE
: return ISD::SETUGE
;
229 case ICmpInst::ICMP_SLT
: return ISD::SETLT
;
230 case ICmpInst::ICMP_ULT
: return ISD::SETULT
;
231 case ICmpInst::ICMP_SGT
: return ISD::SETGT
;
232 case ICmpInst::ICMP_UGT
: return ISD::SETUGT
;
234 llvm_unreachable("Invalid ICmp predicate opcode!");
238 ICmpInst::Predicate
llvm::getICmpCondCode(ISD::CondCode Pred
) {
241 return ICmpInst::ICMP_EQ
;
243 return ICmpInst::ICMP_NE
;
245 return ICmpInst::ICMP_SLE
;
247 return ICmpInst::ICMP_ULE
;
249 return ICmpInst::ICMP_SGE
;
251 return ICmpInst::ICMP_UGE
;
253 return ICmpInst::ICMP_SLT
;
255 return ICmpInst::ICMP_ULT
;
257 return ICmpInst::ICMP_SGT
;
259 return ICmpInst::ICMP_UGT
;
261 llvm_unreachable("Invalid ISD integer condition code!");
265 static bool isNoopBitcast(Type
*T1
, Type
*T2
,
266 const TargetLoweringBase
& TLI
) {
267 return T1
== T2
|| (T1
->isPointerTy() && T2
->isPointerTy()) ||
268 (isa
<VectorType
>(T1
) && isa
<VectorType
>(T2
) &&
269 TLI
.isTypeLegal(EVT::getEVT(T1
)) && TLI
.isTypeLegal(EVT::getEVT(T2
)));
272 /// Look through operations that will be free to find the earliest source of
275 /// @param ValLoc If V has aggregate type, we will be interested in a particular
276 /// scalar component. This records its address; the reverse of this list gives a
277 /// sequence of indices appropriate for an extractvalue to locate the important
278 /// value. This value is updated during the function and on exit will indicate
279 /// similar information for the Value returned.
281 /// @param DataBits If this function looks through truncate instructions, this
282 /// will record the smallest size attained.
283 static const Value
*getNoopInput(const Value
*V
,
284 SmallVectorImpl
<unsigned> &ValLoc
,
286 const TargetLoweringBase
&TLI
,
287 const DataLayout
&DL
) {
289 // Try to look through V1; if V1 is not an instruction, it can't be looked
291 const Instruction
*I
= dyn_cast
<Instruction
>(V
);
292 if (!I
|| I
->getNumOperands() == 0) return V
;
293 const Value
*NoopInput
= nullptr;
295 Value
*Op
= I
->getOperand(0);
296 if (isa
<BitCastInst
>(I
)) {
297 // Look through truly no-op bitcasts.
298 if (isNoopBitcast(Op
->getType(), I
->getType(), TLI
))
300 } else if (isa
<GetElementPtrInst
>(I
)) {
301 // Look through getelementptr
302 if (cast
<GetElementPtrInst
>(I
)->hasAllZeroIndices())
304 } else if (isa
<IntToPtrInst
>(I
)) {
305 // Look through inttoptr.
306 // Make sure this isn't a truncating or extending cast. We could
307 // support this eventually, but don't bother for now.
308 if (!isa
<VectorType
>(I
->getType()) &&
309 DL
.getPointerSizeInBits() ==
310 cast
<IntegerType
>(Op
->getType())->getBitWidth())
312 } else if (isa
<PtrToIntInst
>(I
)) {
313 // Look through ptrtoint.
314 // Make sure this isn't a truncating or extending cast. We could
315 // support this eventually, but don't bother for now.
316 if (!isa
<VectorType
>(I
->getType()) &&
317 DL
.getPointerSizeInBits() ==
318 cast
<IntegerType
>(I
->getType())->getBitWidth())
320 } else if (isa
<TruncInst
>(I
) &&
321 TLI
.allowTruncateForTailCall(Op
->getType(), I
->getType())) {
322 DataBits
= std::min((uint64_t)DataBits
,
323 I
->getType()->getPrimitiveSizeInBits().getFixedSize());
325 } else if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
326 const Value
*ReturnedOp
= CB
->getReturnedArgOperand();
327 if (ReturnedOp
&& isNoopBitcast(ReturnedOp
->getType(), I
->getType(), TLI
))
328 NoopInput
= ReturnedOp
;
329 } else if (const InsertValueInst
*IVI
= dyn_cast
<InsertValueInst
>(V
)) {
330 // Value may come from either the aggregate or the scalar
331 ArrayRef
<unsigned> InsertLoc
= IVI
->getIndices();
332 if (ValLoc
.size() >= InsertLoc
.size() &&
333 std::equal(InsertLoc
.begin(), InsertLoc
.end(), ValLoc
.rbegin())) {
334 // The type being inserted is a nested sub-type of the aggregate; we
335 // have to remove those initial indices to get the location we're
336 // interested in for the operand.
337 ValLoc
.resize(ValLoc
.size() - InsertLoc
.size());
338 NoopInput
= IVI
->getInsertedValueOperand();
340 // The struct we're inserting into has the value we're interested in, no
341 // change of address.
344 } else if (const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(V
)) {
345 // The part we're interested in will inevitably be some sub-section of the
346 // previous aggregate. Combine the two paths to obtain the true address of
348 ArrayRef
<unsigned> ExtractLoc
= EVI
->getIndices();
349 ValLoc
.append(ExtractLoc
.rbegin(), ExtractLoc
.rend());
352 // Terminate if we couldn't find anything to look through.
360 /// Return true if this scalar return value only has bits discarded on its path
361 /// from the "tail call" to the "ret". This includes the obvious noop
362 /// instructions handled by getNoopInput above as well as free truncations (or
363 /// extensions prior to the call).
364 static bool slotOnlyDiscardsData(const Value
*RetVal
, const Value
*CallVal
,
365 SmallVectorImpl
<unsigned> &RetIndices
,
366 SmallVectorImpl
<unsigned> &CallIndices
,
367 bool AllowDifferingSizes
,
368 const TargetLoweringBase
&TLI
,
369 const DataLayout
&DL
) {
371 // Trace the sub-value needed by the return value as far back up the graph as
372 // possible, in the hope that it will intersect with the value produced by the
373 // call. In the simple case with no "returned" attribute, the hope is actually
374 // that we end up back at the tail call instruction itself.
375 unsigned BitsRequired
= UINT_MAX
;
376 RetVal
= getNoopInput(RetVal
, RetIndices
, BitsRequired
, TLI
, DL
);
378 // If this slot in the value returned is undef, it doesn't matter what the
379 // call puts there, it'll be fine.
380 if (isa
<UndefValue
>(RetVal
))
383 // Now do a similar search up through the graph to find where the value
384 // actually returned by the "tail call" comes from. In the simple case without
385 // a "returned" attribute, the search will be blocked immediately and the loop
387 unsigned BitsProvided
= UINT_MAX
;
388 CallVal
= getNoopInput(CallVal
, CallIndices
, BitsProvided
, TLI
, DL
);
390 // There's no hope if we can't actually trace them to (the same part of!) the
392 if (CallVal
!= RetVal
|| CallIndices
!= RetIndices
)
395 // However, intervening truncates may have made the call non-tail. Make sure
396 // all the bits that are needed by the "ret" have been provided by the "tail
397 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
399 if (BitsProvided
< BitsRequired
||
400 (!AllowDifferingSizes
&& BitsProvided
!= BitsRequired
))
406 /// For an aggregate type, determine whether a given index is within bounds or
408 static bool indexReallyValid(Type
*T
, unsigned Idx
) {
409 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(T
))
410 return Idx
< AT
->getNumElements();
412 return Idx
< cast
<StructType
>(T
)->getNumElements();
415 /// Move the given iterators to the next leaf type in depth first traversal.
417 /// Performs a depth-first traversal of the type as specified by its arguments,
418 /// stopping at the next leaf node (which may be a legitimate scalar type or an
419 /// empty struct or array).
421 /// @param SubTypes List of the partial components making up the type from
422 /// outermost to innermost non-empty aggregate. The element currently
423 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
425 /// @param Path Set of extractvalue indices leading from the outermost type
426 /// (SubTypes[0]) to the leaf node currently represented.
428 /// @returns true if a new type was found, false otherwise. Calling this
429 /// function again on a finished iterator will repeatedly return
430 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
431 /// aggregate or a non-aggregate
432 static bool advanceToNextLeafType(SmallVectorImpl
<Type
*> &SubTypes
,
433 SmallVectorImpl
<unsigned> &Path
) {
434 // First march back up the tree until we can successfully increment one of the
435 // coordinates in Path.
436 while (!Path
.empty() && !indexReallyValid(SubTypes
.back(), Path
.back() + 1)) {
441 // If we reached the top, then the iterator is done.
445 // We know there's *some* valid leaf now, so march back down the tree picking
446 // out the left-most element at each node.
449 ExtractValueInst::getIndexedType(SubTypes
.back(), Path
.back());
450 while (DeeperType
->isAggregateType()) {
451 if (!indexReallyValid(DeeperType
, 0))
454 SubTypes
.push_back(DeeperType
);
457 DeeperType
= ExtractValueInst::getIndexedType(DeeperType
, 0);
463 /// Find the first non-empty, scalar-like type in Next and setup the iterator
466 /// Assuming Next is an aggregate of some kind, this function will traverse the
467 /// tree from left to right (i.e. depth-first) looking for the first
468 /// non-aggregate type which will play a role in function return.
470 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
471 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
472 /// i32 in that type.
473 static bool firstRealType(Type
*Next
, SmallVectorImpl
<Type
*> &SubTypes
,
474 SmallVectorImpl
<unsigned> &Path
) {
475 // First initialise the iterator components to the first "leaf" node
476 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
477 // despite nominally being an aggregate).
478 while (Type
*FirstInner
= ExtractValueInst::getIndexedType(Next
, 0)) {
479 SubTypes
.push_back(Next
);
484 // If there's no Path now, Next was originally scalar already (or empty
485 // leaf). We're done.
489 // Otherwise, use normal iteration to keep looking through the tree until we
490 // find a non-aggregate type.
491 while (ExtractValueInst::getIndexedType(SubTypes
.back(), Path
.back())
492 ->isAggregateType()) {
493 if (!advanceToNextLeafType(SubTypes
, Path
))
500 /// Set the iterator data-structures to the next non-empty, non-aggregate
502 static bool nextRealType(SmallVectorImpl
<Type
*> &SubTypes
,
503 SmallVectorImpl
<unsigned> &Path
) {
505 if (!advanceToNextLeafType(SubTypes
, Path
))
508 assert(!Path
.empty() && "found a leaf but didn't set the path?");
509 } while (ExtractValueInst::getIndexedType(SubTypes
.back(), Path
.back())
510 ->isAggregateType());
516 /// Test if the given instruction is in a position to be optimized
517 /// with a tail-call. This roughly means that it's in a block with
518 /// a return and there's nothing that needs to be scheduled
519 /// between it and the return.
521 /// This function only tests target-independent requirements.
522 bool llvm::isInTailCallPosition(const CallBase
&Call
, const TargetMachine
&TM
) {
523 const BasicBlock
*ExitBB
= Call
.getParent();
524 const Instruction
*Term
= ExitBB
->getTerminator();
525 const ReturnInst
*Ret
= dyn_cast
<ReturnInst
>(Term
);
527 // The block must end in a return statement or unreachable.
529 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
530 // an unreachable, for now. The way tailcall optimization is currently
531 // implemented means it will add an epilogue followed by a jump. That is
532 // not profitable. Also, if the callee is a special function (e.g.
533 // longjmp on x86), it can end up causing miscompilation that has not
534 // been fully understood.
535 if (!Ret
&& ((!TM
.Options
.GuaranteedTailCallOpt
&&
536 Call
.getCallingConv() != CallingConv::Tail
&&
537 Call
.getCallingConv() != CallingConv::SwiftTail
) ||
538 !isa
<UnreachableInst
>(Term
)))
541 // If I will have a chain, make sure no other instruction that will have a
542 // chain interposes between I and the return.
543 // Check for all calls including speculatable functions.
544 for (BasicBlock::const_iterator BBI
= std::prev(ExitBB
->end(), 2);; --BBI
) {
547 // Debug info intrinsics do not get in the way of tail call optimization.
548 // Pseudo probe intrinsics do not block tail call optimization either.
549 if (BBI
->isDebugOrPseudoInst())
551 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
552 // call optimization.
553 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(BBI
))
554 if (II
->getIntrinsicID() == Intrinsic::lifetime_end
||
555 II
->getIntrinsicID() == Intrinsic::assume
||
556 II
->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl
)
558 if (BBI
->mayHaveSideEffects() || BBI
->mayReadFromMemory() ||
559 !isSafeToSpeculativelyExecute(&*BBI
))
563 const Function
*F
= ExitBB
->getParent();
564 return returnTypeIsEligibleForTailCall(
565 F
, &Call
, Ret
, *TM
.getSubtargetImpl(*F
)->getTargetLowering());
568 bool llvm::attributesPermitTailCall(const Function
*F
, const Instruction
*I
,
569 const ReturnInst
*Ret
,
570 const TargetLoweringBase
&TLI
,
571 bool *AllowDifferingSizes
) {
572 // ADS may be null, so don't write to it directly.
574 bool &ADS
= AllowDifferingSizes
? *AllowDifferingSizes
: DummyADS
;
577 AttrBuilder
CallerAttrs(F
->getContext(), F
->getAttributes().getRetAttrs());
578 AttrBuilder
CalleeAttrs(F
->getContext(),
579 cast
<CallInst
>(I
)->getAttributes().getRetAttrs());
581 // Following attributes are completely benign as far as calling convention
582 // goes, they shouldn't affect whether the call is a tail call.
583 for (const auto &Attr
: {Attribute::Alignment
, Attribute::Dereferenceable
,
584 Attribute::DereferenceableOrNull
, Attribute::NoAlias
,
585 Attribute::NonNull
, Attribute::NoUndef
}) {
586 CallerAttrs
.removeAttribute(Attr
);
587 CalleeAttrs
.removeAttribute(Attr
);
590 if (CallerAttrs
.contains(Attribute::ZExt
)) {
591 if (!CalleeAttrs
.contains(Attribute::ZExt
))
595 CallerAttrs
.removeAttribute(Attribute::ZExt
);
596 CalleeAttrs
.removeAttribute(Attribute::ZExt
);
597 } else if (CallerAttrs
.contains(Attribute::SExt
)) {
598 if (!CalleeAttrs
.contains(Attribute::SExt
))
602 CallerAttrs
.removeAttribute(Attribute::SExt
);
603 CalleeAttrs
.removeAttribute(Attribute::SExt
);
606 // Drop sext and zext return attributes if the result is not used.
607 // This enables tail calls for code like:
609 // define void @caller() {
611 // %unused_result = tail call zeroext i1 @callee()
612 // br label %retlabel
616 if (I
->use_empty()) {
617 CalleeAttrs
.removeAttribute(Attribute::SExt
);
618 CalleeAttrs
.removeAttribute(Attribute::ZExt
);
621 // If they're still different, there's some facet we don't understand
622 // (currently only "inreg", but in future who knows). It may be OK but the
623 // only safe option is to reject the tail call.
624 return CallerAttrs
== CalleeAttrs
;
627 /// Check whether B is a bitcast of a pointer type to another pointer type,
628 /// which is equal to A.
629 static bool isPointerBitcastEqualTo(const Value
*A
, const Value
*B
) {
630 assert(A
&& B
&& "Expected non-null inputs!");
632 auto *BitCastIn
= dyn_cast
<BitCastInst
>(B
);
637 if (!A
->getType()->isPointerTy() || !B
->getType()->isPointerTy())
640 return A
== BitCastIn
->getOperand(0);
643 bool llvm::returnTypeIsEligibleForTailCall(const Function
*F
,
644 const Instruction
*I
,
645 const ReturnInst
*Ret
,
646 const TargetLoweringBase
&TLI
) {
647 // If the block ends with a void return or unreachable, it doesn't matter
648 // what the call's return type is.
649 if (!Ret
|| Ret
->getNumOperands() == 0) return true;
651 // If the return value is undef, it doesn't matter what the call's
653 if (isa
<UndefValue
>(Ret
->getOperand(0))) return true;
655 // Make sure the attributes attached to each return are compatible.
656 bool AllowDifferingSizes
;
657 if (!attributesPermitTailCall(F
, I
, Ret
, TLI
, &AllowDifferingSizes
))
660 const Value
*RetVal
= Ret
->getOperand(0), *CallVal
= I
;
661 // Intrinsic like llvm.memcpy has no return value, but the expanded
662 // libcall may or may not have return value. On most platforms, it
663 // will be expanded as memcpy in libc, which returns the first
664 // argument. On other platforms like arm-none-eabi, memcpy may be
665 // expanded as library call without return value, like __aeabi_memcpy.
666 const CallInst
*Call
= cast
<CallInst
>(I
);
667 if (Function
*F
= Call
->getCalledFunction()) {
668 Intrinsic::ID IID
= F
->getIntrinsicID();
669 if (((IID
== Intrinsic::memcpy
&&
670 TLI
.getLibcallName(RTLIB::MEMCPY
) == StringRef("memcpy")) ||
671 (IID
== Intrinsic::memmove
&&
672 TLI
.getLibcallName(RTLIB::MEMMOVE
) == StringRef("memmove")) ||
673 (IID
== Intrinsic::memset
&&
674 TLI
.getLibcallName(RTLIB::MEMSET
) == StringRef("memset"))) &&
675 (RetVal
== Call
->getArgOperand(0) ||
676 isPointerBitcastEqualTo(RetVal
, Call
->getArgOperand(0))))
680 SmallVector
<unsigned, 4> RetPath
, CallPath
;
681 SmallVector
<Type
*, 4> RetSubTypes
, CallSubTypes
;
683 bool RetEmpty
= !firstRealType(RetVal
->getType(), RetSubTypes
, RetPath
);
684 bool CallEmpty
= !firstRealType(CallVal
->getType(), CallSubTypes
, CallPath
);
686 // Nothing's actually returned, it doesn't matter what the callee put there
687 // it's a valid tail call.
691 // Iterate pairwise through each of the value types making up the tail call
692 // and the corresponding return. For each one we want to know whether it's
693 // essentially going directly from the tail call to the ret, via operations
694 // that end up not generating any code.
696 // We allow a certain amount of covariance here. For example it's permitted
697 // for the tail call to define more bits than the ret actually cares about
698 // (e.g. via a truncate).
701 // We've exhausted the values produced by the tail call instruction, the
702 // rest are essentially undef. The type doesn't really matter, but we need
705 ExtractValueInst::getIndexedType(RetSubTypes
.back(), RetPath
.back());
706 CallVal
= UndefValue::get(SlotType
);
709 // The manipulations performed when we're looking through an insertvalue or
710 // an extractvalue would happen at the front of the RetPath list, so since
711 // we have to copy it anyway it's more efficient to create a reversed copy.
712 SmallVector
<unsigned, 4> TmpRetPath(llvm::reverse(RetPath
));
713 SmallVector
<unsigned, 4> TmpCallPath(llvm::reverse(CallPath
));
715 // Finally, we can check whether the value produced by the tail call at this
716 // index is compatible with the value we return.
717 if (!slotOnlyDiscardsData(RetVal
, CallVal
, TmpRetPath
, TmpCallPath
,
718 AllowDifferingSizes
, TLI
,
719 F
->getParent()->getDataLayout()))
722 CallEmpty
= !nextRealType(CallSubTypes
, CallPath
);
723 } while(nextRealType(RetSubTypes
, RetPath
));
728 static void collectEHScopeMembers(
729 DenseMap
<const MachineBasicBlock
*, int> &EHScopeMembership
, int EHScope
,
730 const MachineBasicBlock
*MBB
) {
731 SmallVector
<const MachineBasicBlock
*, 16> Worklist
= {MBB
};
732 while (!Worklist
.empty()) {
733 const MachineBasicBlock
*Visiting
= Worklist
.pop_back_val();
734 // Don't follow blocks which start new scopes.
735 if (Visiting
->isEHPad() && Visiting
!= MBB
)
738 // Add this MBB to our scope.
739 auto P
= EHScopeMembership
.insert(std::make_pair(Visiting
, EHScope
));
741 // Don't revisit blocks.
743 assert(P
.first
->second
== EHScope
&& "MBB is part of two scopes!");
747 // Returns are boundaries where scope transfer can occur, don't follow
749 if (Visiting
->isEHScopeReturnBlock())
752 append_range(Worklist
, Visiting
->successors());
756 DenseMap
<const MachineBasicBlock
*, int>
757 llvm::getEHScopeMembership(const MachineFunction
&MF
) {
758 DenseMap
<const MachineBasicBlock
*, int> EHScopeMembership
;
760 // We don't have anything to do if there aren't any EH pads.
761 if (!MF
.hasEHScopes())
762 return EHScopeMembership
;
764 int EntryBBNumber
= MF
.front().getNumber();
765 bool IsSEH
= isAsynchronousEHPersonality(
766 classifyEHPersonality(MF
.getFunction().getPersonalityFn()));
768 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
769 SmallVector
<const MachineBasicBlock
*, 16> EHScopeBlocks
;
770 SmallVector
<const MachineBasicBlock
*, 16> UnreachableBlocks
;
771 SmallVector
<const MachineBasicBlock
*, 16> SEHCatchPads
;
772 SmallVector
<std::pair
<const MachineBasicBlock
*, int>, 16> CatchRetSuccessors
;
773 for (const MachineBasicBlock
&MBB
: MF
) {
774 if (MBB
.isEHScopeEntry()) {
775 EHScopeBlocks
.push_back(&MBB
);
776 } else if (IsSEH
&& MBB
.isEHPad()) {
777 SEHCatchPads
.push_back(&MBB
);
778 } else if (MBB
.pred_empty()) {
779 UnreachableBlocks
.push_back(&MBB
);
782 MachineBasicBlock::const_iterator MBBI
= MBB
.getFirstTerminator();
784 // CatchPads are not scopes for SEH so do not consider CatchRet to
785 // transfer control to another scope.
786 if (MBBI
== MBB
.end() || MBBI
->getOpcode() != TII
->getCatchReturnOpcode())
789 // FIXME: SEH CatchPads are not necessarily in the parent function:
790 // they could be inside a finally block.
791 const MachineBasicBlock
*Successor
= MBBI
->getOperand(0).getMBB();
792 const MachineBasicBlock
*SuccessorColor
= MBBI
->getOperand(1).getMBB();
793 CatchRetSuccessors
.push_back(
794 {Successor
, IsSEH
? EntryBBNumber
: SuccessorColor
->getNumber()});
797 // We don't have anything to do if there aren't any EH pads.
798 if (EHScopeBlocks
.empty())
799 return EHScopeMembership
;
801 // Identify all the basic blocks reachable from the function entry.
802 collectEHScopeMembers(EHScopeMembership
, EntryBBNumber
, &MF
.front());
803 // All blocks not part of a scope are in the parent function.
804 for (const MachineBasicBlock
*MBB
: UnreachableBlocks
)
805 collectEHScopeMembers(EHScopeMembership
, EntryBBNumber
, MBB
);
806 // Next, identify all the blocks inside the scopes.
807 for (const MachineBasicBlock
*MBB
: EHScopeBlocks
)
808 collectEHScopeMembers(EHScopeMembership
, MBB
->getNumber(), MBB
);
809 // SEH CatchPads aren't really scopes, handle them separately.
810 for (const MachineBasicBlock
*MBB
: SEHCatchPads
)
811 collectEHScopeMembers(EHScopeMembership
, EntryBBNumber
, MBB
);
812 // Finally, identify all the targets of a catchret.
813 for (std::pair
<const MachineBasicBlock
*, int> CatchRetPair
:
815 collectEHScopeMembers(EHScopeMembership
, CatchRetPair
.second
,
817 return EHScopeMembership
;