1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Transforms/Utils/GlobalStatus.h"
32 /// Compute the linearized index of a member in a nested aggregate/struct/array
33 /// by recursing and accumulating CurIndex as long as there are indices in the
35 unsigned llvm::ComputeLinearIndex(Type
*Ty
,
36 const unsigned *Indices
,
37 const unsigned *IndicesEnd
,
39 // Base case: We're done.
40 if (Indices
&& Indices
== IndicesEnd
)
43 // Given a struct type, recursively traverse the elements.
44 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
45 for (StructType::element_iterator EB
= STy
->element_begin(),
47 EE
= STy
->element_end();
49 if (Indices
&& *Indices
== unsigned(EI
- EB
))
50 return ComputeLinearIndex(*EI
, Indices
+1, IndicesEnd
, CurIndex
);
51 CurIndex
= ComputeLinearIndex(*EI
, nullptr, nullptr, CurIndex
);
53 assert(!Indices
&& "Unexpected out of bound");
56 // Given an array type, recursively traverse the elements.
57 else if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
58 Type
*EltTy
= ATy
->getElementType();
59 unsigned NumElts
= ATy
->getNumElements();
60 // Compute the Linear offset when jumping one element of the array
61 unsigned EltLinearOffset
= ComputeLinearIndex(EltTy
, nullptr, nullptr, 0);
63 assert(*Indices
< NumElts
&& "Unexpected out of bound");
64 // If the indice is inside the array, compute the index to the requested
65 // elt and recurse inside the element with the end of the indices list
66 CurIndex
+= EltLinearOffset
* *Indices
;
67 return ComputeLinearIndex(EltTy
, Indices
+1, IndicesEnd
, CurIndex
);
69 CurIndex
+= EltLinearOffset
*NumElts
;
72 // We haven't found the type we're looking for, so keep searching.
76 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
77 /// EVTs that represent all the individual underlying
78 /// non-aggregate types that comprise it.
80 /// If Offsets is non-null, it points to a vector to be filled in
81 /// with the in-memory offsets of each of the individual values.
83 void llvm::ComputeValueVTs(const TargetLowering
&TLI
, const DataLayout
&DL
,
84 Type
*Ty
, SmallVectorImpl
<EVT
> &ValueVTs
,
85 SmallVectorImpl
<uint64_t> *Offsets
,
86 uint64_t StartingOffset
) {
87 // Given a struct type, recursively traverse the elements.
88 if (StructType
*STy
= dyn_cast
<StructType
>(Ty
)) {
89 const StructLayout
*SL
= DL
.getStructLayout(STy
);
90 for (StructType::element_iterator EB
= STy
->element_begin(),
92 EE
= STy
->element_end();
94 ComputeValueVTs(TLI
, DL
, *EI
, ValueVTs
, Offsets
,
95 StartingOffset
+ SL
->getElementOffset(EI
- EB
));
98 // Given an array type, recursively traverse the elements.
99 if (ArrayType
*ATy
= dyn_cast
<ArrayType
>(Ty
)) {
100 Type
*EltTy
= ATy
->getElementType();
101 uint64_t EltSize
= DL
.getTypeAllocSize(EltTy
);
102 for (unsigned i
= 0, e
= ATy
->getNumElements(); i
!= e
; ++i
)
103 ComputeValueVTs(TLI
, DL
, EltTy
, ValueVTs
, Offsets
,
104 StartingOffset
+ i
* EltSize
);
107 // Interpret void as zero return values.
110 // Base case: we can get an EVT for this LLVM IR type.
111 ValueVTs
.push_back(TLI
.getValueType(DL
, Ty
));
113 Offsets
->push_back(StartingOffset
);
116 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
117 GlobalValue
*llvm::ExtractTypeInfo(Value
*V
) {
118 V
= V
->stripPointerCasts();
119 GlobalValue
*GV
= dyn_cast
<GlobalValue
>(V
);
120 GlobalVariable
*Var
= dyn_cast
<GlobalVariable
>(V
);
122 if (Var
&& Var
->getName() == "llvm.eh.catch.all.value") {
123 assert(Var
->hasInitializer() &&
124 "The EH catch-all value must have an initializer");
125 Value
*Init
= Var
->getInitializer();
126 GV
= dyn_cast
<GlobalValue
>(Init
);
127 if (!GV
) V
= cast
<ConstantPointerNull
>(Init
);
130 assert((GV
|| isa
<ConstantPointerNull
>(V
)) &&
131 "TypeInfo must be a global variable or NULL");
135 /// hasInlineAsmMemConstraint - Return true if the inline asm instruction being
136 /// processed uses a memory 'm' constraint.
138 llvm::hasInlineAsmMemConstraint(InlineAsm::ConstraintInfoVector
&CInfos
,
139 const TargetLowering
&TLI
) {
140 for (unsigned i
= 0, e
= CInfos
.size(); i
!= e
; ++i
) {
141 InlineAsm::ConstraintInfo
&CI
= CInfos
[i
];
142 for (unsigned j
= 0, ee
= CI
.Codes
.size(); j
!= ee
; ++j
) {
143 TargetLowering::ConstraintType CType
= TLI
.getConstraintType(CI
.Codes
[j
]);
144 if (CType
== TargetLowering::C_Memory
)
148 // Indirect operand accesses access memory.
156 /// getFCmpCondCode - Return the ISD condition code corresponding to
157 /// the given LLVM IR floating-point condition code. This includes
158 /// consideration of global floating-point math flags.
160 ISD::CondCode
llvm::getFCmpCondCode(FCmpInst::Predicate Pred
) {
162 case FCmpInst::FCMP_FALSE
: return ISD::SETFALSE
;
163 case FCmpInst::FCMP_OEQ
: return ISD::SETOEQ
;
164 case FCmpInst::FCMP_OGT
: return ISD::SETOGT
;
165 case FCmpInst::FCMP_OGE
: return ISD::SETOGE
;
166 case FCmpInst::FCMP_OLT
: return ISD::SETOLT
;
167 case FCmpInst::FCMP_OLE
: return ISD::SETOLE
;
168 case FCmpInst::FCMP_ONE
: return ISD::SETONE
;
169 case FCmpInst::FCMP_ORD
: return ISD::SETO
;
170 case FCmpInst::FCMP_UNO
: return ISD::SETUO
;
171 case FCmpInst::FCMP_UEQ
: return ISD::SETUEQ
;
172 case FCmpInst::FCMP_UGT
: return ISD::SETUGT
;
173 case FCmpInst::FCMP_UGE
: return ISD::SETUGE
;
174 case FCmpInst::FCMP_ULT
: return ISD::SETULT
;
175 case FCmpInst::FCMP_ULE
: return ISD::SETULE
;
176 case FCmpInst::FCMP_UNE
: return ISD::SETUNE
;
177 case FCmpInst::FCMP_TRUE
: return ISD::SETTRUE
;
178 default: llvm_unreachable("Invalid FCmp predicate opcode!");
182 ISD::CondCode
llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC
) {
184 case ISD::SETOEQ
: case ISD::SETUEQ
: return ISD::SETEQ
;
185 case ISD::SETONE
: case ISD::SETUNE
: return ISD::SETNE
;
186 case ISD::SETOLT
: case ISD::SETULT
: return ISD::SETLT
;
187 case ISD::SETOLE
: case ISD::SETULE
: return ISD::SETLE
;
188 case ISD::SETOGT
: case ISD::SETUGT
: return ISD::SETGT
;
189 case ISD::SETOGE
: case ISD::SETUGE
: return ISD::SETGE
;
194 /// getICmpCondCode - Return the ISD condition code corresponding to
195 /// the given LLVM IR integer condition code.
197 ISD::CondCode
llvm::getICmpCondCode(ICmpInst::Predicate Pred
) {
199 case ICmpInst::ICMP_EQ
: return ISD::SETEQ
;
200 case ICmpInst::ICMP_NE
: return ISD::SETNE
;
201 case ICmpInst::ICMP_SLE
: return ISD::SETLE
;
202 case ICmpInst::ICMP_ULE
: return ISD::SETULE
;
203 case ICmpInst::ICMP_SGE
: return ISD::SETGE
;
204 case ICmpInst::ICMP_UGE
: return ISD::SETUGE
;
205 case ICmpInst::ICMP_SLT
: return ISD::SETLT
;
206 case ICmpInst::ICMP_ULT
: return ISD::SETULT
;
207 case ICmpInst::ICMP_SGT
: return ISD::SETGT
;
208 case ICmpInst::ICMP_UGT
: return ISD::SETUGT
;
210 llvm_unreachable("Invalid ICmp predicate opcode!");
214 static bool isNoopBitcast(Type
*T1
, Type
*T2
,
215 const TargetLoweringBase
& TLI
) {
216 return T1
== T2
|| (T1
->isPointerTy() && T2
->isPointerTy()) ||
217 (isa
<VectorType
>(T1
) && isa
<VectorType
>(T2
) &&
218 TLI
.isTypeLegal(EVT::getEVT(T1
)) && TLI
.isTypeLegal(EVT::getEVT(T2
)));
221 /// Look through operations that will be free to find the earliest source of
224 /// @param ValLoc If V has aggegate type, we will be interested in a particular
225 /// scalar component. This records its address; the reverse of this list gives a
226 /// sequence of indices appropriate for an extractvalue to locate the important
227 /// value. This value is updated during the function and on exit will indicate
228 /// similar information for the Value returned.
230 /// @param DataBits If this function looks through truncate instructions, this
231 /// will record the smallest size attained.
232 static const Value
*getNoopInput(const Value
*V
,
233 SmallVectorImpl
<unsigned> &ValLoc
,
235 const TargetLoweringBase
&TLI
,
236 const DataLayout
&DL
) {
238 // Try to look through V1; if V1 is not an instruction, it can't be looked
240 const Instruction
*I
= dyn_cast
<Instruction
>(V
);
241 if (!I
|| I
->getNumOperands() == 0) return V
;
242 const Value
*NoopInput
= nullptr;
244 Value
*Op
= I
->getOperand(0);
245 if (isa
<BitCastInst
>(I
)) {
246 // Look through truly no-op bitcasts.
247 if (isNoopBitcast(Op
->getType(), I
->getType(), TLI
))
249 } else if (isa
<GetElementPtrInst
>(I
)) {
250 // Look through getelementptr
251 if (cast
<GetElementPtrInst
>(I
)->hasAllZeroIndices())
253 } else if (isa
<IntToPtrInst
>(I
)) {
254 // Look through inttoptr.
255 // Make sure this isn't a truncating or extending cast. We could
256 // support this eventually, but don't bother for now.
257 if (!isa
<VectorType
>(I
->getType()) &&
258 DL
.getPointerSizeInBits() ==
259 cast
<IntegerType
>(Op
->getType())->getBitWidth())
261 } else if (isa
<PtrToIntInst
>(I
)) {
262 // Look through ptrtoint.
263 // Make sure this isn't a truncating or extending cast. We could
264 // support this eventually, but don't bother for now.
265 if (!isa
<VectorType
>(I
->getType()) &&
266 DL
.getPointerSizeInBits() ==
267 cast
<IntegerType
>(I
->getType())->getBitWidth())
269 } else if (isa
<TruncInst
>(I
) &&
270 TLI
.allowTruncateForTailCall(Op
->getType(), I
->getType())) {
271 DataBits
= std::min(DataBits
, I
->getType()->getPrimitiveSizeInBits());
273 } else if (auto CS
= ImmutableCallSite(I
)) {
274 const Value
*ReturnedOp
= CS
.getReturnedArgOperand();
275 if (ReturnedOp
&& isNoopBitcast(ReturnedOp
->getType(), I
->getType(), TLI
))
276 NoopInput
= ReturnedOp
;
277 } else if (const InsertValueInst
*IVI
= dyn_cast
<InsertValueInst
>(V
)) {
278 // Value may come from either the aggregate or the scalar
279 ArrayRef
<unsigned> InsertLoc
= IVI
->getIndices();
280 if (ValLoc
.size() >= InsertLoc
.size() &&
281 std::equal(InsertLoc
.begin(), InsertLoc
.end(), ValLoc
.rbegin())) {
282 // The type being inserted is a nested sub-type of the aggregate; we
283 // have to remove those initial indices to get the location we're
284 // interested in for the operand.
285 ValLoc
.resize(ValLoc
.size() - InsertLoc
.size());
286 NoopInput
= IVI
->getInsertedValueOperand();
288 // The struct we're inserting into has the value we're interested in, no
289 // change of address.
292 } else if (const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(V
)) {
293 // The part we're interested in will inevitably be some sub-section of the
294 // previous aggregate. Combine the two paths to obtain the true address of
296 ArrayRef
<unsigned> ExtractLoc
= EVI
->getIndices();
297 ValLoc
.append(ExtractLoc
.rbegin(), ExtractLoc
.rend());
300 // Terminate if we couldn't find anything to look through.
308 /// Return true if this scalar return value only has bits discarded on its path
309 /// from the "tail call" to the "ret". This includes the obvious noop
310 /// instructions handled by getNoopInput above as well as free truncations (or
311 /// extensions prior to the call).
312 static bool slotOnlyDiscardsData(const Value
*RetVal
, const Value
*CallVal
,
313 SmallVectorImpl
<unsigned> &RetIndices
,
314 SmallVectorImpl
<unsigned> &CallIndices
,
315 bool AllowDifferingSizes
,
316 const TargetLoweringBase
&TLI
,
317 const DataLayout
&DL
) {
319 // Trace the sub-value needed by the return value as far back up the graph as
320 // possible, in the hope that it will intersect with the value produced by the
321 // call. In the simple case with no "returned" attribute, the hope is actually
322 // that we end up back at the tail call instruction itself.
323 unsigned BitsRequired
= UINT_MAX
;
324 RetVal
= getNoopInput(RetVal
, RetIndices
, BitsRequired
, TLI
, DL
);
326 // If this slot in the value returned is undef, it doesn't matter what the
327 // call puts there, it'll be fine.
328 if (isa
<UndefValue
>(RetVal
))
331 // Now do a similar search up through the graph to find where the value
332 // actually returned by the "tail call" comes from. In the simple case without
333 // a "returned" attribute, the search will be blocked immediately and the loop
335 unsigned BitsProvided
= UINT_MAX
;
336 CallVal
= getNoopInput(CallVal
, CallIndices
, BitsProvided
, TLI
, DL
);
338 // There's no hope if we can't actually trace them to (the same part of!) the
340 if (CallVal
!= RetVal
|| CallIndices
!= RetIndices
)
343 // However, intervening truncates may have made the call non-tail. Make sure
344 // all the bits that are needed by the "ret" have been provided by the "tail
345 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
347 if (BitsProvided
< BitsRequired
||
348 (!AllowDifferingSizes
&& BitsProvided
!= BitsRequired
))
354 /// For an aggregate type, determine whether a given index is within bounds or
356 static bool indexReallyValid(CompositeType
*T
, unsigned Idx
) {
357 if (ArrayType
*AT
= dyn_cast
<ArrayType
>(T
))
358 return Idx
< AT
->getNumElements();
360 return Idx
< cast
<StructType
>(T
)->getNumElements();
363 /// Move the given iterators to the next leaf type in depth first traversal.
365 /// Performs a depth-first traversal of the type as specified by its arguments,
366 /// stopping at the next leaf node (which may be a legitimate scalar type or an
367 /// empty struct or array).
369 /// @param SubTypes List of the partial components making up the type from
370 /// outermost to innermost non-empty aggregate. The element currently
371 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
373 /// @param Path Set of extractvalue indices leading from the outermost type
374 /// (SubTypes[0]) to the leaf node currently represented.
376 /// @returns true if a new type was found, false otherwise. Calling this
377 /// function again on a finished iterator will repeatedly return
378 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
379 /// aggregate or a non-aggregate
380 static bool advanceToNextLeafType(SmallVectorImpl
<CompositeType
*> &SubTypes
,
381 SmallVectorImpl
<unsigned> &Path
) {
382 // First march back up the tree until we can successfully increment one of the
383 // coordinates in Path.
384 while (!Path
.empty() && !indexReallyValid(SubTypes
.back(), Path
.back() + 1)) {
389 // If we reached the top, then the iterator is done.
393 // We know there's *some* valid leaf now, so march back down the tree picking
394 // out the left-most element at each node.
396 Type
*DeeperType
= SubTypes
.back()->getTypeAtIndex(Path
.back());
397 while (DeeperType
->isAggregateType()) {
398 CompositeType
*CT
= cast
<CompositeType
>(DeeperType
);
399 if (!indexReallyValid(CT
, 0))
402 SubTypes
.push_back(CT
);
405 DeeperType
= CT
->getTypeAtIndex(0U);
411 /// Find the first non-empty, scalar-like type in Next and setup the iterator
414 /// Assuming Next is an aggregate of some kind, this function will traverse the
415 /// tree from left to right (i.e. depth-first) looking for the first
416 /// non-aggregate type which will play a role in function return.
418 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
419 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
420 /// i32 in that type.
421 static bool firstRealType(Type
*Next
,
422 SmallVectorImpl
<CompositeType
*> &SubTypes
,
423 SmallVectorImpl
<unsigned> &Path
) {
424 // First initialise the iterator components to the first "leaf" node
425 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
426 // despite nominally being an aggregate).
427 while (Next
->isAggregateType() &&
428 indexReallyValid(cast
<CompositeType
>(Next
), 0)) {
429 SubTypes
.push_back(cast
<CompositeType
>(Next
));
431 Next
= cast
<CompositeType
>(Next
)->getTypeAtIndex(0U);
434 // If there's no Path now, Next was originally scalar already (or empty
435 // leaf). We're done.
439 // Otherwise, use normal iteration to keep looking through the tree until we
440 // find a non-aggregate type.
441 while (SubTypes
.back()->getTypeAtIndex(Path
.back())->isAggregateType()) {
442 if (!advanceToNextLeafType(SubTypes
, Path
))
449 /// Set the iterator data-structures to the next non-empty, non-aggregate
451 static bool nextRealType(SmallVectorImpl
<CompositeType
*> &SubTypes
,
452 SmallVectorImpl
<unsigned> &Path
) {
454 if (!advanceToNextLeafType(SubTypes
, Path
))
457 assert(!Path
.empty() && "found a leaf but didn't set the path?");
458 } while (SubTypes
.back()->getTypeAtIndex(Path
.back())->isAggregateType());
464 /// Test if the given instruction is in a position to be optimized
465 /// with a tail-call. This roughly means that it's in a block with
466 /// a return and there's nothing that needs to be scheduled
467 /// between it and the return.
469 /// This function only tests target-independent requirements.
470 bool llvm::isInTailCallPosition(ImmutableCallSite CS
, const TargetMachine
&TM
) {
471 const Instruction
*I
= CS
.getInstruction();
472 const BasicBlock
*ExitBB
= I
->getParent();
473 const Instruction
*Term
= ExitBB
->getTerminator();
474 const ReturnInst
*Ret
= dyn_cast
<ReturnInst
>(Term
);
476 // The block must end in a return statement or unreachable.
478 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
479 // an unreachable, for now. The way tailcall optimization is currently
480 // implemented means it will add an epilogue followed by a jump. That is
481 // not profitable. Also, if the callee is a special function (e.g.
482 // longjmp on x86), it can end up causing miscompilation that has not
483 // been fully understood.
485 (!TM
.Options
.GuaranteedTailCallOpt
|| !isa
<UnreachableInst
>(Term
)))
488 // If I will have a chain, make sure no other instruction that will have a
489 // chain interposes between I and the return.
490 if (I
->mayHaveSideEffects() || I
->mayReadFromMemory() ||
491 !isSafeToSpeculativelyExecute(I
))
492 for (BasicBlock::const_iterator BBI
= std::prev(ExitBB
->end(), 2);; --BBI
) {
495 // Debug info intrinsics do not get in the way of tail call optimization.
496 if (isa
<DbgInfoIntrinsic
>(BBI
))
498 // A lifetime end intrinsic should not stop tail call optimization.
499 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(BBI
))
500 if (II
->getIntrinsicID() == Intrinsic::lifetime_end
)
502 if (BBI
->mayHaveSideEffects() || BBI
->mayReadFromMemory() ||
503 !isSafeToSpeculativelyExecute(&*BBI
))
507 const Function
*F
= ExitBB
->getParent();
508 return returnTypeIsEligibleForTailCall(
509 F
, I
, Ret
, *TM
.getSubtargetImpl(*F
)->getTargetLowering());
512 bool llvm::attributesPermitTailCall(const Function
*F
, const Instruction
*I
,
513 const ReturnInst
*Ret
,
514 const TargetLoweringBase
&TLI
,
515 bool *AllowDifferingSizes
) {
516 // ADS may be null, so don't write to it directly.
518 bool &ADS
= AllowDifferingSizes
? *AllowDifferingSizes
: DummyADS
;
521 AttrBuilder
CallerAttrs(F
->getAttributes(), AttributeList::ReturnIndex
);
522 AttrBuilder
CalleeAttrs(cast
<CallInst
>(I
)->getAttributes(),
523 AttributeList::ReturnIndex
);
525 // NoAlias and NonNull are completely benign as far as calling convention
526 // goes, they shouldn't affect whether the call is a tail call.
527 CallerAttrs
.removeAttribute(Attribute::NoAlias
);
528 CalleeAttrs
.removeAttribute(Attribute::NoAlias
);
529 CallerAttrs
.removeAttribute(Attribute::NonNull
);
530 CalleeAttrs
.removeAttribute(Attribute::NonNull
);
532 if (CallerAttrs
.contains(Attribute::ZExt
)) {
533 if (!CalleeAttrs
.contains(Attribute::ZExt
))
537 CallerAttrs
.removeAttribute(Attribute::ZExt
);
538 CalleeAttrs
.removeAttribute(Attribute::ZExt
);
539 } else if (CallerAttrs
.contains(Attribute::SExt
)) {
540 if (!CalleeAttrs
.contains(Attribute::SExt
))
544 CallerAttrs
.removeAttribute(Attribute::SExt
);
545 CalleeAttrs
.removeAttribute(Attribute::SExt
);
548 // Drop sext and zext return attributes if the result is not used.
549 // This enables tail calls for code like:
551 // define void @caller() {
553 // %unused_result = tail call zeroext i1 @callee()
554 // br label %retlabel
558 if (I
->use_empty()) {
559 CalleeAttrs
.removeAttribute(Attribute::SExt
);
560 CalleeAttrs
.removeAttribute(Attribute::ZExt
);
563 // If they're still different, there's some facet we don't understand
564 // (currently only "inreg", but in future who knows). It may be OK but the
565 // only safe option is to reject the tail call.
566 return CallerAttrs
== CalleeAttrs
;
569 bool llvm::returnTypeIsEligibleForTailCall(const Function
*F
,
570 const Instruction
*I
,
571 const ReturnInst
*Ret
,
572 const TargetLoweringBase
&TLI
) {
573 // If the block ends with a void return or unreachable, it doesn't matter
574 // what the call's return type is.
575 if (!Ret
|| Ret
->getNumOperands() == 0) return true;
577 // If the return value is undef, it doesn't matter what the call's
579 if (isa
<UndefValue
>(Ret
->getOperand(0))) return true;
581 // Make sure the attributes attached to each return are compatible.
582 bool AllowDifferingSizes
;
583 if (!attributesPermitTailCall(F
, I
, Ret
, TLI
, &AllowDifferingSizes
))
586 const Value
*RetVal
= Ret
->getOperand(0), *CallVal
= I
;
587 // Intrinsic like llvm.memcpy has no return value, but the expanded
588 // libcall may or may not have return value. On most platforms, it
589 // will be expanded as memcpy in libc, which returns the first
590 // argument. On other platforms like arm-none-eabi, memcpy may be
591 // expanded as library call without return value, like __aeabi_memcpy.
592 const CallInst
*Call
= cast
<CallInst
>(I
);
593 if (Function
*F
= Call
->getCalledFunction()) {
594 Intrinsic::ID IID
= F
->getIntrinsicID();
595 if (((IID
== Intrinsic::memcpy
&&
596 TLI
.getLibcallName(RTLIB::MEMCPY
) == StringRef("memcpy")) ||
597 (IID
== Intrinsic::memmove
&&
598 TLI
.getLibcallName(RTLIB::MEMMOVE
) == StringRef("memmove")) ||
599 (IID
== Intrinsic::memset
&&
600 TLI
.getLibcallName(RTLIB::MEMSET
) == StringRef("memset"))) &&
601 RetVal
== Call
->getArgOperand(0))
605 SmallVector
<unsigned, 4> RetPath
, CallPath
;
606 SmallVector
<CompositeType
*, 4> RetSubTypes
, CallSubTypes
;
608 bool RetEmpty
= !firstRealType(RetVal
->getType(), RetSubTypes
, RetPath
);
609 bool CallEmpty
= !firstRealType(CallVal
->getType(), CallSubTypes
, CallPath
);
611 // Nothing's actually returned, it doesn't matter what the callee put there
612 // it's a valid tail call.
616 // Iterate pairwise through each of the value types making up the tail call
617 // and the corresponding return. For each one we want to know whether it's
618 // essentially going directly from the tail call to the ret, via operations
619 // that end up not generating any code.
621 // We allow a certain amount of covariance here. For example it's permitted
622 // for the tail call to define more bits than the ret actually cares about
623 // (e.g. via a truncate).
626 // We've exhausted the values produced by the tail call instruction, the
627 // rest are essentially undef. The type doesn't really matter, but we need
629 Type
*SlotType
= RetSubTypes
.back()->getTypeAtIndex(RetPath
.back());
630 CallVal
= UndefValue::get(SlotType
);
633 // The manipulations performed when we're looking through an insertvalue or
634 // an extractvalue would happen at the front of the RetPath list, so since
635 // we have to copy it anyway it's more efficient to create a reversed copy.
636 SmallVector
<unsigned, 4> TmpRetPath(RetPath
.rbegin(), RetPath
.rend());
637 SmallVector
<unsigned, 4> TmpCallPath(CallPath
.rbegin(), CallPath
.rend());
639 // Finally, we can check whether the value produced by the tail call at this
640 // index is compatible with the value we return.
641 if (!slotOnlyDiscardsData(RetVal
, CallVal
, TmpRetPath
, TmpCallPath
,
642 AllowDifferingSizes
, TLI
,
643 F
->getParent()->getDataLayout()))
646 CallEmpty
= !nextRealType(CallSubTypes
, CallPath
);
647 } while(nextRealType(RetSubTypes
, RetPath
));
652 static void collectEHScopeMembers(
653 DenseMap
<const MachineBasicBlock
*, int> &EHScopeMembership
, int EHScope
,
654 const MachineBasicBlock
*MBB
) {
655 SmallVector
<const MachineBasicBlock
*, 16> Worklist
= {MBB
};
656 while (!Worklist
.empty()) {
657 const MachineBasicBlock
*Visiting
= Worklist
.pop_back_val();
658 // Don't follow blocks which start new scopes.
659 if (Visiting
->isEHPad() && Visiting
!= MBB
)
662 // Add this MBB to our scope.
663 auto P
= EHScopeMembership
.insert(std::make_pair(Visiting
, EHScope
));
665 // Don't revisit blocks.
667 assert(P
.first
->second
== EHScope
&& "MBB is part of two scopes!");
671 // Returns are boundaries where scope transfer can occur, don't follow
673 if (Visiting
->isEHScopeReturnBlock())
676 for (const MachineBasicBlock
*Succ
: Visiting
->successors())
677 Worklist
.push_back(Succ
);
681 DenseMap
<const MachineBasicBlock
*, int>
682 llvm::getEHScopeMembership(const MachineFunction
&MF
) {
683 DenseMap
<const MachineBasicBlock
*, int> EHScopeMembership
;
685 // We don't have anything to do if there aren't any EH pads.
686 if (!MF
.hasEHScopes())
687 return EHScopeMembership
;
689 int EntryBBNumber
= MF
.front().getNumber();
690 bool IsSEH
= isAsynchronousEHPersonality(
691 classifyEHPersonality(MF
.getFunction().getPersonalityFn()));
693 const TargetInstrInfo
*TII
= MF
.getSubtarget().getInstrInfo();
694 SmallVector
<const MachineBasicBlock
*, 16> EHScopeBlocks
;
695 SmallVector
<const MachineBasicBlock
*, 16> UnreachableBlocks
;
696 SmallVector
<const MachineBasicBlock
*, 16> SEHCatchPads
;
697 SmallVector
<std::pair
<const MachineBasicBlock
*, int>, 16> CatchRetSuccessors
;
698 for (const MachineBasicBlock
&MBB
: MF
) {
699 if (MBB
.isEHScopeEntry()) {
700 EHScopeBlocks
.push_back(&MBB
);
701 } else if (IsSEH
&& MBB
.isEHPad()) {
702 SEHCatchPads
.push_back(&MBB
);
703 } else if (MBB
.pred_empty()) {
704 UnreachableBlocks
.push_back(&MBB
);
707 MachineBasicBlock::const_iterator MBBI
= MBB
.getFirstTerminator();
709 // CatchPads are not scopes for SEH so do not consider CatchRet to
710 // transfer control to another scope.
711 if (MBBI
== MBB
.end() || MBBI
->getOpcode() != TII
->getCatchReturnOpcode())
714 // FIXME: SEH CatchPads are not necessarily in the parent function:
715 // they could be inside a finally block.
716 const MachineBasicBlock
*Successor
= MBBI
->getOperand(0).getMBB();
717 const MachineBasicBlock
*SuccessorColor
= MBBI
->getOperand(1).getMBB();
718 CatchRetSuccessors
.push_back(
719 {Successor
, IsSEH
? EntryBBNumber
: SuccessorColor
->getNumber()});
722 // We don't have anything to do if there aren't any EH pads.
723 if (EHScopeBlocks
.empty())
724 return EHScopeMembership
;
726 // Identify all the basic blocks reachable from the function entry.
727 collectEHScopeMembers(EHScopeMembership
, EntryBBNumber
, &MF
.front());
728 // All blocks not part of a scope are in the parent function.
729 for (const MachineBasicBlock
*MBB
: UnreachableBlocks
)
730 collectEHScopeMembers(EHScopeMembership
, EntryBBNumber
, MBB
);
731 // Next, identify all the blocks inside the scopes.
732 for (const MachineBasicBlock
*MBB
: EHScopeBlocks
)
733 collectEHScopeMembers(EHScopeMembership
, MBB
->getNumber(), MBB
);
734 // SEH CatchPads aren't really scopes, handle them separately.
735 for (const MachineBasicBlock
*MBB
: SEHCatchPads
)
736 collectEHScopeMembers(EHScopeMembership
, EntryBBNumber
, MBB
);
737 // Finally, identify all the targets of a catchret.
738 for (std::pair
<const MachineBasicBlock
*, int> CatchRetPair
:
740 collectEHScopeMembers(EHScopeMembership
, CatchRetPair
.second
,
742 return EHScopeMembership
;