[sanitizer] Improve FreeBSD ASLR detection
[llvm-project.git] / llvm / lib / CodeGen / Analysis.cpp
blobe8fef505e43d7f155171a5d34d542a994ba504b2
1 //===-- Analysis.cpp - CodeGen LLVM IR Analysis Utilities -----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines several CodeGen-specific LLVM IR analysis utilities.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/CodeGen/Analysis.h"
14 #include "llvm/Analysis/ValueTracking.h"
15 #include "llvm/CodeGen/MachineFunction.h"
16 #include "llvm/CodeGen/TargetInstrInfo.h"
17 #include "llvm/CodeGen/TargetLowering.h"
18 #include "llvm/CodeGen/TargetSubtargetInfo.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/DerivedTypes.h"
21 #include "llvm/IR/Function.h"
22 #include "llvm/IR/Instructions.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/LLVMContext.h"
25 #include "llvm/IR/Module.h"
26 #include "llvm/Support/ErrorHandling.h"
27 #include "llvm/Support/MathExtras.h"
28 #include "llvm/Target/TargetMachine.h"
29 #include "llvm/Transforms/Utils/GlobalStatus.h"
31 using namespace llvm;
33 /// Compute the linearized index of a member in a nested aggregate/struct/array
34 /// by recursing and accumulating CurIndex as long as there are indices in the
35 /// index list.
36 unsigned llvm::ComputeLinearIndex(Type *Ty,
37 const unsigned *Indices,
38 const unsigned *IndicesEnd,
39 unsigned CurIndex) {
40 // Base case: We're done.
41 if (Indices && Indices == IndicesEnd)
42 return CurIndex;
44 // Given a struct type, recursively traverse the elements.
45 if (StructType *STy = dyn_cast<StructType>(Ty)) {
46 for (auto I : llvm::enumerate(STy->elements())) {
47 Type *ET = I.value();
48 if (Indices && *Indices == I.index())
49 return ComputeLinearIndex(ET, Indices + 1, IndicesEnd, CurIndex);
50 CurIndex = ComputeLinearIndex(ET, nullptr, nullptr, CurIndex);
52 assert(!Indices && "Unexpected out of bound");
53 return CurIndex;
55 // Given an array type, recursively traverse the elements.
56 else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
57 Type *EltTy = ATy->getElementType();
58 unsigned NumElts = ATy->getNumElements();
59 // Compute the Linear offset when jumping one element of the array
60 unsigned EltLinearOffset = ComputeLinearIndex(EltTy, nullptr, nullptr, 0);
61 if (Indices) {
62 assert(*Indices < NumElts && "Unexpected out of bound");
63 // If the indice is inside the array, compute the index to the requested
64 // elt and recurse inside the element with the end of the indices list
65 CurIndex += EltLinearOffset* *Indices;
66 return ComputeLinearIndex(EltTy, Indices+1, IndicesEnd, CurIndex);
68 CurIndex += EltLinearOffset*NumElts;
69 return CurIndex;
71 // We haven't found the type we're looking for, so keep searching.
72 return CurIndex + 1;
75 /// ComputeValueVTs - Given an LLVM IR type, compute a sequence of
76 /// EVTs that represent all the individual underlying
77 /// non-aggregate types that comprise it.
78 ///
79 /// If Offsets is non-null, it points to a vector to be filled in
80 /// with the in-memory offsets of each of the individual values.
81 ///
82 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
83 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
84 SmallVectorImpl<EVT> *MemVTs,
85 SmallVectorImpl<uint64_t> *Offsets,
86 uint64_t StartingOffset) {
87 // Given a struct type, recursively traverse the elements.
88 if (StructType *STy = dyn_cast<StructType>(Ty)) {
89 // If the Offsets aren't needed, don't query the struct layout. This allows
90 // us to support structs with scalable vectors for operations that don't
91 // need offsets.
92 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
93 for (StructType::element_iterator EB = STy->element_begin(),
94 EI = EB,
95 EE = STy->element_end();
96 EI != EE; ++EI) {
97 // Don't compute the element offset if we didn't get a StructLayout above.
98 uint64_t EltOffset = SL ? SL->getElementOffset(EI - EB) : 0;
99 ComputeValueVTs(TLI, DL, *EI, ValueVTs, MemVTs, Offsets,
100 StartingOffset + EltOffset);
102 return;
104 // Given an array type, recursively traverse the elements.
105 if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
106 Type *EltTy = ATy->getElementType();
107 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
108 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
109 ComputeValueVTs(TLI, DL, EltTy, ValueVTs, MemVTs, Offsets,
110 StartingOffset + i * EltSize);
111 return;
113 // Interpret void as zero return values.
114 if (Ty->isVoidTy())
115 return;
116 // Base case: we can get an EVT for this LLVM IR type.
117 ValueVTs.push_back(TLI.getValueType(DL, Ty));
118 if (MemVTs)
119 MemVTs->push_back(TLI.getMemValueType(DL, Ty));
120 if (Offsets)
121 Offsets->push_back(StartingOffset);
124 void llvm::ComputeValueVTs(const TargetLowering &TLI, const DataLayout &DL,
125 Type *Ty, SmallVectorImpl<EVT> &ValueVTs,
126 SmallVectorImpl<uint64_t> *Offsets,
127 uint64_t StartingOffset) {
128 return ComputeValueVTs(TLI, DL, Ty, ValueVTs, /*MemVTs=*/nullptr, Offsets,
129 StartingOffset);
132 void llvm::computeValueLLTs(const DataLayout &DL, Type &Ty,
133 SmallVectorImpl<LLT> &ValueTys,
134 SmallVectorImpl<uint64_t> *Offsets,
135 uint64_t StartingOffset) {
136 // Given a struct type, recursively traverse the elements.
137 if (StructType *STy = dyn_cast<StructType>(&Ty)) {
138 // If the Offsets aren't needed, don't query the struct layout. This allows
139 // us to support structs with scalable vectors for operations that don't
140 // need offsets.
141 const StructLayout *SL = Offsets ? DL.getStructLayout(STy) : nullptr;
142 for (unsigned I = 0, E = STy->getNumElements(); I != E; ++I) {
143 uint64_t EltOffset = SL ? SL->getElementOffset(I) : 0;
144 computeValueLLTs(DL, *STy->getElementType(I), ValueTys, Offsets,
145 StartingOffset + EltOffset);
147 return;
149 // Given an array type, recursively traverse the elements.
150 if (ArrayType *ATy = dyn_cast<ArrayType>(&Ty)) {
151 Type *EltTy = ATy->getElementType();
152 uint64_t EltSize = DL.getTypeAllocSize(EltTy).getFixedValue();
153 for (unsigned i = 0, e = ATy->getNumElements(); i != e; ++i)
154 computeValueLLTs(DL, *EltTy, ValueTys, Offsets,
155 StartingOffset + i * EltSize);
156 return;
158 // Interpret void as zero return values.
159 if (Ty.isVoidTy())
160 return;
161 // Base case: we can get an LLT for this LLVM IR type.
162 ValueTys.push_back(getLLTForType(Ty, DL));
163 if (Offsets != nullptr)
164 Offsets->push_back(StartingOffset * 8);
167 /// ExtractTypeInfo - Returns the type info, possibly bitcast, encoded in V.
168 GlobalValue *llvm::ExtractTypeInfo(Value *V) {
169 V = V->stripPointerCasts();
170 GlobalValue *GV = dyn_cast<GlobalValue>(V);
171 GlobalVariable *Var = dyn_cast<GlobalVariable>(V);
173 if (Var && Var->getName() == "llvm.eh.catch.all.value") {
174 assert(Var->hasInitializer() &&
175 "The EH catch-all value must have an initializer");
176 Value *Init = Var->getInitializer();
177 GV = dyn_cast<GlobalValue>(Init);
178 if (!GV) V = cast<ConstantPointerNull>(Init);
181 assert((GV || isa<ConstantPointerNull>(V)) &&
182 "TypeInfo must be a global variable or NULL");
183 return GV;
186 /// getFCmpCondCode - Return the ISD condition code corresponding to
187 /// the given LLVM IR floating-point condition code. This includes
188 /// consideration of global floating-point math flags.
190 ISD::CondCode llvm::getFCmpCondCode(FCmpInst::Predicate Pred) {
191 switch (Pred) {
192 case FCmpInst::FCMP_FALSE: return ISD::SETFALSE;
193 case FCmpInst::FCMP_OEQ: return ISD::SETOEQ;
194 case FCmpInst::FCMP_OGT: return ISD::SETOGT;
195 case FCmpInst::FCMP_OGE: return ISD::SETOGE;
196 case FCmpInst::FCMP_OLT: return ISD::SETOLT;
197 case FCmpInst::FCMP_OLE: return ISD::SETOLE;
198 case FCmpInst::FCMP_ONE: return ISD::SETONE;
199 case FCmpInst::FCMP_ORD: return ISD::SETO;
200 case FCmpInst::FCMP_UNO: return ISD::SETUO;
201 case FCmpInst::FCMP_UEQ: return ISD::SETUEQ;
202 case FCmpInst::FCMP_UGT: return ISD::SETUGT;
203 case FCmpInst::FCMP_UGE: return ISD::SETUGE;
204 case FCmpInst::FCMP_ULT: return ISD::SETULT;
205 case FCmpInst::FCMP_ULE: return ISD::SETULE;
206 case FCmpInst::FCMP_UNE: return ISD::SETUNE;
207 case FCmpInst::FCMP_TRUE: return ISD::SETTRUE;
208 default: llvm_unreachable("Invalid FCmp predicate opcode!");
212 ISD::CondCode llvm::getFCmpCodeWithoutNaN(ISD::CondCode CC) {
213 switch (CC) {
214 case ISD::SETOEQ: case ISD::SETUEQ: return ISD::SETEQ;
215 case ISD::SETONE: case ISD::SETUNE: return ISD::SETNE;
216 case ISD::SETOLT: case ISD::SETULT: return ISD::SETLT;
217 case ISD::SETOLE: case ISD::SETULE: return ISD::SETLE;
218 case ISD::SETOGT: case ISD::SETUGT: return ISD::SETGT;
219 case ISD::SETOGE: case ISD::SETUGE: return ISD::SETGE;
220 default: return CC;
224 ISD::CondCode llvm::getICmpCondCode(ICmpInst::Predicate Pred) {
225 switch (Pred) {
226 case ICmpInst::ICMP_EQ: return ISD::SETEQ;
227 case ICmpInst::ICMP_NE: return ISD::SETNE;
228 case ICmpInst::ICMP_SLE: return ISD::SETLE;
229 case ICmpInst::ICMP_ULE: return ISD::SETULE;
230 case ICmpInst::ICMP_SGE: return ISD::SETGE;
231 case ICmpInst::ICMP_UGE: return ISD::SETUGE;
232 case ICmpInst::ICMP_SLT: return ISD::SETLT;
233 case ICmpInst::ICMP_ULT: return ISD::SETULT;
234 case ICmpInst::ICMP_SGT: return ISD::SETGT;
235 case ICmpInst::ICMP_UGT: return ISD::SETUGT;
236 default:
237 llvm_unreachable("Invalid ICmp predicate opcode!");
241 ICmpInst::Predicate llvm::getICmpCondCode(ISD::CondCode Pred) {
242 switch (Pred) {
243 case ISD::SETEQ:
244 return ICmpInst::ICMP_EQ;
245 case ISD::SETNE:
246 return ICmpInst::ICMP_NE;
247 case ISD::SETLE:
248 return ICmpInst::ICMP_SLE;
249 case ISD::SETULE:
250 return ICmpInst::ICMP_ULE;
251 case ISD::SETGE:
252 return ICmpInst::ICMP_SGE;
253 case ISD::SETUGE:
254 return ICmpInst::ICMP_UGE;
255 case ISD::SETLT:
256 return ICmpInst::ICMP_SLT;
257 case ISD::SETULT:
258 return ICmpInst::ICMP_ULT;
259 case ISD::SETGT:
260 return ICmpInst::ICMP_SGT;
261 case ISD::SETUGT:
262 return ICmpInst::ICMP_UGT;
263 default:
264 llvm_unreachable("Invalid ISD integer condition code!");
268 static bool isNoopBitcast(Type *T1, Type *T2,
269 const TargetLoweringBase& TLI) {
270 return T1 == T2 || (T1->isPointerTy() && T2->isPointerTy()) ||
271 (isa<VectorType>(T1) && isa<VectorType>(T2) &&
272 TLI.isTypeLegal(EVT::getEVT(T1)) && TLI.isTypeLegal(EVT::getEVT(T2)));
275 /// Look through operations that will be free to find the earliest source of
276 /// this value.
278 /// @param ValLoc If V has aggregate type, we will be interested in a particular
279 /// scalar component. This records its address; the reverse of this list gives a
280 /// sequence of indices appropriate for an extractvalue to locate the important
281 /// value. This value is updated during the function and on exit will indicate
282 /// similar information for the Value returned.
284 /// @param DataBits If this function looks through truncate instructions, this
285 /// will record the smallest size attained.
286 static const Value *getNoopInput(const Value *V,
287 SmallVectorImpl<unsigned> &ValLoc,
288 unsigned &DataBits,
289 const TargetLoweringBase &TLI,
290 const DataLayout &DL) {
291 while (true) {
292 // Try to look through V1; if V1 is not an instruction, it can't be looked
293 // through.
294 const Instruction *I = dyn_cast<Instruction>(V);
295 if (!I || I->getNumOperands() == 0) return V;
296 const Value *NoopInput = nullptr;
298 Value *Op = I->getOperand(0);
299 if (isa<BitCastInst>(I)) {
300 // Look through truly no-op bitcasts.
301 if (isNoopBitcast(Op->getType(), I->getType(), TLI))
302 NoopInput = Op;
303 } else if (isa<GetElementPtrInst>(I)) {
304 // Look through getelementptr
305 if (cast<GetElementPtrInst>(I)->hasAllZeroIndices())
306 NoopInput = Op;
307 } else if (isa<IntToPtrInst>(I)) {
308 // Look through inttoptr.
309 // Make sure this isn't a truncating or extending cast. We could
310 // support this eventually, but don't bother for now.
311 if (!isa<VectorType>(I->getType()) &&
312 DL.getPointerSizeInBits() ==
313 cast<IntegerType>(Op->getType())->getBitWidth())
314 NoopInput = Op;
315 } else if (isa<PtrToIntInst>(I)) {
316 // Look through ptrtoint.
317 // Make sure this isn't a truncating or extending cast. We could
318 // support this eventually, but don't bother for now.
319 if (!isa<VectorType>(I->getType()) &&
320 DL.getPointerSizeInBits() ==
321 cast<IntegerType>(I->getType())->getBitWidth())
322 NoopInput = Op;
323 } else if (isa<TruncInst>(I) &&
324 TLI.allowTruncateForTailCall(Op->getType(), I->getType())) {
325 DataBits = std::min((uint64_t)DataBits,
326 I->getType()->getPrimitiveSizeInBits().getFixedSize());
327 NoopInput = Op;
328 } else if (auto *CB = dyn_cast<CallBase>(I)) {
329 const Value *ReturnedOp = CB->getReturnedArgOperand();
330 if (ReturnedOp && isNoopBitcast(ReturnedOp->getType(), I->getType(), TLI))
331 NoopInput = ReturnedOp;
332 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(V)) {
333 // Value may come from either the aggregate or the scalar
334 ArrayRef<unsigned> InsertLoc = IVI->getIndices();
335 if (ValLoc.size() >= InsertLoc.size() &&
336 std::equal(InsertLoc.begin(), InsertLoc.end(), ValLoc.rbegin())) {
337 // The type being inserted is a nested sub-type of the aggregate; we
338 // have to remove those initial indices to get the location we're
339 // interested in for the operand.
340 ValLoc.resize(ValLoc.size() - InsertLoc.size());
341 NoopInput = IVI->getInsertedValueOperand();
342 } else {
343 // The struct we're inserting into has the value we're interested in, no
344 // change of address.
345 NoopInput = Op;
347 } else if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(V)) {
348 // The part we're interested in will inevitably be some sub-section of the
349 // previous aggregate. Combine the two paths to obtain the true address of
350 // our element.
351 ArrayRef<unsigned> ExtractLoc = EVI->getIndices();
352 ValLoc.append(ExtractLoc.rbegin(), ExtractLoc.rend());
353 NoopInput = Op;
355 // Terminate if we couldn't find anything to look through.
356 if (!NoopInput)
357 return V;
359 V = NoopInput;
363 /// Return true if this scalar return value only has bits discarded on its path
364 /// from the "tail call" to the "ret". This includes the obvious noop
365 /// instructions handled by getNoopInput above as well as free truncations (or
366 /// extensions prior to the call).
367 static bool slotOnlyDiscardsData(const Value *RetVal, const Value *CallVal,
368 SmallVectorImpl<unsigned> &RetIndices,
369 SmallVectorImpl<unsigned> &CallIndices,
370 bool AllowDifferingSizes,
371 const TargetLoweringBase &TLI,
372 const DataLayout &DL) {
374 // Trace the sub-value needed by the return value as far back up the graph as
375 // possible, in the hope that it will intersect with the value produced by the
376 // call. In the simple case with no "returned" attribute, the hope is actually
377 // that we end up back at the tail call instruction itself.
378 unsigned BitsRequired = UINT_MAX;
379 RetVal = getNoopInput(RetVal, RetIndices, BitsRequired, TLI, DL);
381 // If this slot in the value returned is undef, it doesn't matter what the
382 // call puts there, it'll be fine.
383 if (isa<UndefValue>(RetVal))
384 return true;
386 // Now do a similar search up through the graph to find where the value
387 // actually returned by the "tail call" comes from. In the simple case without
388 // a "returned" attribute, the search will be blocked immediately and the loop
389 // a Noop.
390 unsigned BitsProvided = UINT_MAX;
391 CallVal = getNoopInput(CallVal, CallIndices, BitsProvided, TLI, DL);
393 // There's no hope if we can't actually trace them to (the same part of!) the
394 // same value.
395 if (CallVal != RetVal || CallIndices != RetIndices)
396 return false;
398 // However, intervening truncates may have made the call non-tail. Make sure
399 // all the bits that are needed by the "ret" have been provided by the "tail
400 // call". FIXME: with sufficiently cunning bit-tracking, we could look through
401 // extensions too.
402 if (BitsProvided < BitsRequired ||
403 (!AllowDifferingSizes && BitsProvided != BitsRequired))
404 return false;
406 return true;
409 /// For an aggregate type, determine whether a given index is within bounds or
410 /// not.
411 static bool indexReallyValid(Type *T, unsigned Idx) {
412 if (ArrayType *AT = dyn_cast<ArrayType>(T))
413 return Idx < AT->getNumElements();
415 return Idx < cast<StructType>(T)->getNumElements();
418 /// Move the given iterators to the next leaf type in depth first traversal.
420 /// Performs a depth-first traversal of the type as specified by its arguments,
421 /// stopping at the next leaf node (which may be a legitimate scalar type or an
422 /// empty struct or array).
424 /// @param SubTypes List of the partial components making up the type from
425 /// outermost to innermost non-empty aggregate. The element currently
426 /// represented is SubTypes.back()->getTypeAtIndex(Path.back() - 1).
428 /// @param Path Set of extractvalue indices leading from the outermost type
429 /// (SubTypes[0]) to the leaf node currently represented.
431 /// @returns true if a new type was found, false otherwise. Calling this
432 /// function again on a finished iterator will repeatedly return
433 /// false. SubTypes.back()->getTypeAtIndex(Path.back()) is either an empty
434 /// aggregate or a non-aggregate
435 static bool advanceToNextLeafType(SmallVectorImpl<Type *> &SubTypes,
436 SmallVectorImpl<unsigned> &Path) {
437 // First march back up the tree until we can successfully increment one of the
438 // coordinates in Path.
439 while (!Path.empty() && !indexReallyValid(SubTypes.back(), Path.back() + 1)) {
440 Path.pop_back();
441 SubTypes.pop_back();
444 // If we reached the top, then the iterator is done.
445 if (Path.empty())
446 return false;
448 // We know there's *some* valid leaf now, so march back down the tree picking
449 // out the left-most element at each node.
450 ++Path.back();
451 Type *DeeperType =
452 ExtractValueInst::getIndexedType(SubTypes.back(), Path.back());
453 while (DeeperType->isAggregateType()) {
454 if (!indexReallyValid(DeeperType, 0))
455 return true;
457 SubTypes.push_back(DeeperType);
458 Path.push_back(0);
460 DeeperType = ExtractValueInst::getIndexedType(DeeperType, 0);
463 return true;
466 /// Find the first non-empty, scalar-like type in Next and setup the iterator
467 /// components.
469 /// Assuming Next is an aggregate of some kind, this function will traverse the
470 /// tree from left to right (i.e. depth-first) looking for the first
471 /// non-aggregate type which will play a role in function return.
473 /// For example, if Next was {[0 x i64], {{}, i32, {}}, i32} then we would setup
474 /// Path as [1, 1] and SubTypes as [Next, {{}, i32, {}}] to represent the first
475 /// i32 in that type.
476 static bool firstRealType(Type *Next, SmallVectorImpl<Type *> &SubTypes,
477 SmallVectorImpl<unsigned> &Path) {
478 // First initialise the iterator components to the first "leaf" node
479 // (i.e. node with no valid sub-type at any index, so {} does count as a leaf
480 // despite nominally being an aggregate).
481 while (Type *FirstInner = ExtractValueInst::getIndexedType(Next, 0)) {
482 SubTypes.push_back(Next);
483 Path.push_back(0);
484 Next = FirstInner;
487 // If there's no Path now, Next was originally scalar already (or empty
488 // leaf). We're done.
489 if (Path.empty())
490 return true;
492 // Otherwise, use normal iteration to keep looking through the tree until we
493 // find a non-aggregate type.
494 while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
495 ->isAggregateType()) {
496 if (!advanceToNextLeafType(SubTypes, Path))
497 return false;
500 return true;
503 /// Set the iterator data-structures to the next non-empty, non-aggregate
504 /// subtype.
505 static bool nextRealType(SmallVectorImpl<Type *> &SubTypes,
506 SmallVectorImpl<unsigned> &Path) {
507 do {
508 if (!advanceToNextLeafType(SubTypes, Path))
509 return false;
511 assert(!Path.empty() && "found a leaf but didn't set the path?");
512 } while (ExtractValueInst::getIndexedType(SubTypes.back(), Path.back())
513 ->isAggregateType());
515 return true;
519 /// Test if the given instruction is in a position to be optimized
520 /// with a tail-call. This roughly means that it's in a block with
521 /// a return and there's nothing that needs to be scheduled
522 /// between it and the return.
524 /// This function only tests target-independent requirements.
525 bool llvm::isInTailCallPosition(const CallBase &Call, const TargetMachine &TM) {
526 const BasicBlock *ExitBB = Call.getParent();
527 const Instruction *Term = ExitBB->getTerminator();
528 const ReturnInst *Ret = dyn_cast<ReturnInst>(Term);
530 // The block must end in a return statement or unreachable.
532 // FIXME: Decline tailcall if it's not guaranteed and if the block ends in
533 // an unreachable, for now. The way tailcall optimization is currently
534 // implemented means it will add an epilogue followed by a jump. That is
535 // not profitable. Also, if the callee is a special function (e.g.
536 // longjmp on x86), it can end up causing miscompilation that has not
537 // been fully understood.
538 if (!Ret && ((!TM.Options.GuaranteedTailCallOpt &&
539 Call.getCallingConv() != CallingConv::Tail &&
540 Call.getCallingConv() != CallingConv::SwiftTail) ||
541 !isa<UnreachableInst>(Term)))
542 return false;
544 // If I will have a chain, make sure no other instruction that will have a
545 // chain interposes between I and the return.
546 // Check for all calls including speculatable functions.
547 for (BasicBlock::const_iterator BBI = std::prev(ExitBB->end(), 2);; --BBI) {
548 if (&*BBI == &Call)
549 break;
550 // Debug info intrinsics do not get in the way of tail call optimization.
551 // Pseudo probe intrinsics do not block tail call optimization either.
552 if (BBI->isDebugOrPseudoInst())
553 continue;
554 // A lifetime end, assume or noalias.decl intrinsic should not stop tail
555 // call optimization.
556 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(BBI))
557 if (II->getIntrinsicID() == Intrinsic::lifetime_end ||
558 II->getIntrinsicID() == Intrinsic::assume ||
559 II->getIntrinsicID() == Intrinsic::experimental_noalias_scope_decl)
560 continue;
561 if (BBI->mayHaveSideEffects() || BBI->mayReadFromMemory() ||
562 !isSafeToSpeculativelyExecute(&*BBI))
563 return false;
566 const Function *F = ExitBB->getParent();
567 return returnTypeIsEligibleForTailCall(
568 F, &Call, Ret, *TM.getSubtargetImpl(*F)->getTargetLowering());
571 bool llvm::attributesPermitTailCall(const Function *F, const Instruction *I,
572 const ReturnInst *Ret,
573 const TargetLoweringBase &TLI,
574 bool *AllowDifferingSizes) {
575 // ADS may be null, so don't write to it directly.
576 bool DummyADS;
577 bool &ADS = AllowDifferingSizes ? *AllowDifferingSizes : DummyADS;
578 ADS = true;
580 AttrBuilder CallerAttrs(F->getContext(), F->getAttributes().getRetAttrs());
581 AttrBuilder CalleeAttrs(F->getContext(),
582 cast<CallInst>(I)->getAttributes().getRetAttrs());
584 // Following attributes are completely benign as far as calling convention
585 // goes, they shouldn't affect whether the call is a tail call.
586 for (const auto &Attr : {Attribute::Alignment, Attribute::Dereferenceable,
587 Attribute::DereferenceableOrNull, Attribute::NoAlias,
588 Attribute::NonNull}) {
589 CallerAttrs.removeAttribute(Attr);
590 CalleeAttrs.removeAttribute(Attr);
593 if (CallerAttrs.contains(Attribute::ZExt)) {
594 if (!CalleeAttrs.contains(Attribute::ZExt))
595 return false;
597 ADS = false;
598 CallerAttrs.removeAttribute(Attribute::ZExt);
599 CalleeAttrs.removeAttribute(Attribute::ZExt);
600 } else if (CallerAttrs.contains(Attribute::SExt)) {
601 if (!CalleeAttrs.contains(Attribute::SExt))
602 return false;
604 ADS = false;
605 CallerAttrs.removeAttribute(Attribute::SExt);
606 CalleeAttrs.removeAttribute(Attribute::SExt);
609 // Drop sext and zext return attributes if the result is not used.
610 // This enables tail calls for code like:
612 // define void @caller() {
613 // entry:
614 // %unused_result = tail call zeroext i1 @callee()
615 // br label %retlabel
616 // retlabel:
617 // ret void
618 // }
619 if (I->use_empty()) {
620 CalleeAttrs.removeAttribute(Attribute::SExt);
621 CalleeAttrs.removeAttribute(Attribute::ZExt);
624 // If they're still different, there's some facet we don't understand
625 // (currently only "inreg", but in future who knows). It may be OK but the
626 // only safe option is to reject the tail call.
627 return CallerAttrs == CalleeAttrs;
630 /// Check whether B is a bitcast of a pointer type to another pointer type,
631 /// which is equal to A.
632 static bool isPointerBitcastEqualTo(const Value *A, const Value *B) {
633 assert(A && B && "Expected non-null inputs!");
635 auto *BitCastIn = dyn_cast<BitCastInst>(B);
637 if (!BitCastIn)
638 return false;
640 if (!A->getType()->isPointerTy() || !B->getType()->isPointerTy())
641 return false;
643 return A == BitCastIn->getOperand(0);
646 bool llvm::returnTypeIsEligibleForTailCall(const Function *F,
647 const Instruction *I,
648 const ReturnInst *Ret,
649 const TargetLoweringBase &TLI) {
650 // If the block ends with a void return or unreachable, it doesn't matter
651 // what the call's return type is.
652 if (!Ret || Ret->getNumOperands() == 0) return true;
654 // If the return value is undef, it doesn't matter what the call's
655 // return type is.
656 if (isa<UndefValue>(Ret->getOperand(0))) return true;
658 // Make sure the attributes attached to each return are compatible.
659 bool AllowDifferingSizes;
660 if (!attributesPermitTailCall(F, I, Ret, TLI, &AllowDifferingSizes))
661 return false;
663 const Value *RetVal = Ret->getOperand(0), *CallVal = I;
664 // Intrinsic like llvm.memcpy has no return value, but the expanded
665 // libcall may or may not have return value. On most platforms, it
666 // will be expanded as memcpy in libc, which returns the first
667 // argument. On other platforms like arm-none-eabi, memcpy may be
668 // expanded as library call without return value, like __aeabi_memcpy.
669 const CallInst *Call = cast<CallInst>(I);
670 if (Function *F = Call->getCalledFunction()) {
671 Intrinsic::ID IID = F->getIntrinsicID();
672 if (((IID == Intrinsic::memcpy &&
673 TLI.getLibcallName(RTLIB::MEMCPY) == StringRef("memcpy")) ||
674 (IID == Intrinsic::memmove &&
675 TLI.getLibcallName(RTLIB::MEMMOVE) == StringRef("memmove")) ||
676 (IID == Intrinsic::memset &&
677 TLI.getLibcallName(RTLIB::MEMSET) == StringRef("memset"))) &&
678 (RetVal == Call->getArgOperand(0) ||
679 isPointerBitcastEqualTo(RetVal, Call->getArgOperand(0))))
680 return true;
683 SmallVector<unsigned, 4> RetPath, CallPath;
684 SmallVector<Type *, 4> RetSubTypes, CallSubTypes;
686 bool RetEmpty = !firstRealType(RetVal->getType(), RetSubTypes, RetPath);
687 bool CallEmpty = !firstRealType(CallVal->getType(), CallSubTypes, CallPath);
689 // Nothing's actually returned, it doesn't matter what the callee put there
690 // it's a valid tail call.
691 if (RetEmpty)
692 return true;
694 // Iterate pairwise through each of the value types making up the tail call
695 // and the corresponding return. For each one we want to know whether it's
696 // essentially going directly from the tail call to the ret, via operations
697 // that end up not generating any code.
699 // We allow a certain amount of covariance here. For example it's permitted
700 // for the tail call to define more bits than the ret actually cares about
701 // (e.g. via a truncate).
702 do {
703 if (CallEmpty) {
704 // We've exhausted the values produced by the tail call instruction, the
705 // rest are essentially undef. The type doesn't really matter, but we need
706 // *something*.
707 Type *SlotType =
708 ExtractValueInst::getIndexedType(RetSubTypes.back(), RetPath.back());
709 CallVal = UndefValue::get(SlotType);
712 // The manipulations performed when we're looking through an insertvalue or
713 // an extractvalue would happen at the front of the RetPath list, so since
714 // we have to copy it anyway it's more efficient to create a reversed copy.
715 SmallVector<unsigned, 4> TmpRetPath(llvm::reverse(RetPath));
716 SmallVector<unsigned, 4> TmpCallPath(llvm::reverse(CallPath));
718 // Finally, we can check whether the value produced by the tail call at this
719 // index is compatible with the value we return.
720 if (!slotOnlyDiscardsData(RetVal, CallVal, TmpRetPath, TmpCallPath,
721 AllowDifferingSizes, TLI,
722 F->getParent()->getDataLayout()))
723 return false;
725 CallEmpty = !nextRealType(CallSubTypes, CallPath);
726 } while(nextRealType(RetSubTypes, RetPath));
728 return true;
731 static void collectEHScopeMembers(
732 DenseMap<const MachineBasicBlock *, int> &EHScopeMembership, int EHScope,
733 const MachineBasicBlock *MBB) {
734 SmallVector<const MachineBasicBlock *, 16> Worklist = {MBB};
735 while (!Worklist.empty()) {
736 const MachineBasicBlock *Visiting = Worklist.pop_back_val();
737 // Don't follow blocks which start new scopes.
738 if (Visiting->isEHPad() && Visiting != MBB)
739 continue;
741 // Add this MBB to our scope.
742 auto P = EHScopeMembership.insert(std::make_pair(Visiting, EHScope));
744 // Don't revisit blocks.
745 if (!P.second) {
746 assert(P.first->second == EHScope && "MBB is part of two scopes!");
747 continue;
750 // Returns are boundaries where scope transfer can occur, don't follow
751 // successors.
752 if (Visiting->isEHScopeReturnBlock())
753 continue;
755 append_range(Worklist, Visiting->successors());
759 DenseMap<const MachineBasicBlock *, int>
760 llvm::getEHScopeMembership(const MachineFunction &MF) {
761 DenseMap<const MachineBasicBlock *, int> EHScopeMembership;
763 // We don't have anything to do if there aren't any EH pads.
764 if (!MF.hasEHScopes())
765 return EHScopeMembership;
767 int EntryBBNumber = MF.front().getNumber();
768 bool IsSEH = isAsynchronousEHPersonality(
769 classifyEHPersonality(MF.getFunction().getPersonalityFn()));
771 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
772 SmallVector<const MachineBasicBlock *, 16> EHScopeBlocks;
773 SmallVector<const MachineBasicBlock *, 16> UnreachableBlocks;
774 SmallVector<const MachineBasicBlock *, 16> SEHCatchPads;
775 SmallVector<std::pair<const MachineBasicBlock *, int>, 16> CatchRetSuccessors;
776 for (const MachineBasicBlock &MBB : MF) {
777 if (MBB.isEHScopeEntry()) {
778 EHScopeBlocks.push_back(&MBB);
779 } else if (IsSEH && MBB.isEHPad()) {
780 SEHCatchPads.push_back(&MBB);
781 } else if (MBB.pred_empty()) {
782 UnreachableBlocks.push_back(&MBB);
785 MachineBasicBlock::const_iterator MBBI = MBB.getFirstTerminator();
787 // CatchPads are not scopes for SEH so do not consider CatchRet to
788 // transfer control to another scope.
789 if (MBBI == MBB.end() || MBBI->getOpcode() != TII->getCatchReturnOpcode())
790 continue;
792 // FIXME: SEH CatchPads are not necessarily in the parent function:
793 // they could be inside a finally block.
794 const MachineBasicBlock *Successor = MBBI->getOperand(0).getMBB();
795 const MachineBasicBlock *SuccessorColor = MBBI->getOperand(1).getMBB();
796 CatchRetSuccessors.push_back(
797 {Successor, IsSEH ? EntryBBNumber : SuccessorColor->getNumber()});
800 // We don't have anything to do if there aren't any EH pads.
801 if (EHScopeBlocks.empty())
802 return EHScopeMembership;
804 // Identify all the basic blocks reachable from the function entry.
805 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, &MF.front());
806 // All blocks not part of a scope are in the parent function.
807 for (const MachineBasicBlock *MBB : UnreachableBlocks)
808 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
809 // Next, identify all the blocks inside the scopes.
810 for (const MachineBasicBlock *MBB : EHScopeBlocks)
811 collectEHScopeMembers(EHScopeMembership, MBB->getNumber(), MBB);
812 // SEH CatchPads aren't really scopes, handle them separately.
813 for (const MachineBasicBlock *MBB : SEHCatchPads)
814 collectEHScopeMembers(EHScopeMembership, EntryBBNumber, MBB);
815 // Finally, identify all the targets of a catchret.
816 for (std::pair<const MachineBasicBlock *, int> CatchRetPair :
817 CatchRetSuccessors)
818 collectEHScopeMembers(EHScopeMembership, CatchRetPair.second,
819 CatchRetPair.first);
820 return EHScopeMembership;