[InstCombine] Signed saturation patterns
[llvm-core.git] / lib / Transforms / IPO / FunctionAttrs.cpp
blobb174c63a577b199c5e93c71dfe0e235e1c25d4e1
1 //===- FunctionAttrs.cpp - Pass which marks functions attributes ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements interprocedural passes which walk the
11 /// call-graph deducing and/or propagating function attributes.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Transforms/IPO/FunctionAttrs.h"
16 #include "llvm/ADT/SCCIterator.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SetVector.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/BasicAliasAnalysis.h"
25 #include "llvm/Analysis/CGSCCPassManager.h"
26 #include "llvm/Analysis/CallGraph.h"
27 #include "llvm/Analysis/CallGraphSCCPass.h"
28 #include "llvm/Analysis/CaptureTracking.h"
29 #include "llvm/Analysis/LazyCallGraph.h"
30 #include "llvm/Analysis/MemoryBuiltins.h"
31 #include "llvm/Analysis/MemoryLocation.h"
32 #include "llvm/Analysis/ValueTracking.h"
33 #include "llvm/IR/Argument.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/Function.h"
40 #include "llvm/IR/InstIterator.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Metadata.h"
46 #include "llvm/IR/PassManager.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/Use.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/Debug.h"
56 #include "llvm/Support/ErrorHandling.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/IPO.h"
59 #include <cassert>
60 #include <iterator>
61 #include <map>
62 #include <vector>
64 using namespace llvm;
66 #define DEBUG_TYPE "functionattrs"
68 STATISTIC(NumReadNone, "Number of functions marked readnone");
69 STATISTIC(NumReadOnly, "Number of functions marked readonly");
70 STATISTIC(NumWriteOnly, "Number of functions marked writeonly");
71 STATISTIC(NumNoCapture, "Number of arguments marked nocapture");
72 STATISTIC(NumReturned, "Number of arguments marked returned");
73 STATISTIC(NumReadNoneArg, "Number of arguments marked readnone");
74 STATISTIC(NumReadOnlyArg, "Number of arguments marked readonly");
75 STATISTIC(NumNoAlias, "Number of function returns marked noalias");
76 STATISTIC(NumNonNullReturn, "Number of function returns marked nonnull");
77 STATISTIC(NumNoRecurse, "Number of functions marked as norecurse");
78 STATISTIC(NumNoUnwind, "Number of functions marked as nounwind");
79 STATISTIC(NumNoFree, "Number of functions marked as nofree");
81 static cl::opt<bool> EnableNonnullArgPropagation(
82 "enable-nonnull-arg-prop", cl::init(true), cl::Hidden,
83 cl::desc("Try to propagate nonnull argument attributes from callsites to "
84 "caller functions."));
86 static cl::opt<bool> DisableNoUnwindInference(
87 "disable-nounwind-inference", cl::Hidden,
88 cl::desc("Stop inferring nounwind attribute during function-attrs pass"));
90 static cl::opt<bool> DisableNoFreeInference(
91 "disable-nofree-inference", cl::Hidden,
92 cl::desc("Stop inferring nofree attribute during function-attrs pass"));
94 namespace {
96 using SCCNodeSet = SmallSetVector<Function *, 8>;
98 } // end anonymous namespace
100 /// Returns the memory access attribute for function F using AAR for AA results,
101 /// where SCCNodes is the current SCC.
103 /// If ThisBody is true, this function may examine the function body and will
104 /// return a result pertaining to this copy of the function. If it is false, the
105 /// result will be based only on AA results for the function declaration; it
106 /// will be assumed that some other (perhaps less optimized) version of the
107 /// function may be selected at link time.
108 static MemoryAccessKind checkFunctionMemoryAccess(Function &F, bool ThisBody,
109 AAResults &AAR,
110 const SCCNodeSet &SCCNodes) {
111 FunctionModRefBehavior MRB = AAR.getModRefBehavior(&F);
112 if (MRB == FMRB_DoesNotAccessMemory)
113 // Already perfect!
114 return MAK_ReadNone;
116 if (!ThisBody) {
117 if (AliasAnalysis::onlyReadsMemory(MRB))
118 return MAK_ReadOnly;
120 if (AliasAnalysis::doesNotReadMemory(MRB))
121 return MAK_WriteOnly;
123 // Conservatively assume it reads and writes to memory.
124 return MAK_MayWrite;
127 // Scan the function body for instructions that may read or write memory.
128 bool ReadsMemory = false;
129 bool WritesMemory = false;
130 for (inst_iterator II = inst_begin(F), E = inst_end(F); II != E; ++II) {
131 Instruction *I = &*II;
133 // Some instructions can be ignored even if they read or write memory.
134 // Detect these now, skipping to the next instruction if one is found.
135 if (auto *Call = dyn_cast<CallBase>(I)) {
136 // Ignore calls to functions in the same SCC, as long as the call sites
137 // don't have operand bundles. Calls with operand bundles are allowed to
138 // have memory effects not described by the memory effects of the call
139 // target.
140 if (!Call->hasOperandBundles() && Call->getCalledFunction() &&
141 SCCNodes.count(Call->getCalledFunction()))
142 continue;
143 FunctionModRefBehavior MRB = AAR.getModRefBehavior(Call);
144 ModRefInfo MRI = createModRefInfo(MRB);
146 // If the call doesn't access memory, we're done.
147 if (isNoModRef(MRI))
148 continue;
150 if (!AliasAnalysis::onlyAccessesArgPointees(MRB)) {
151 // The call could access any memory. If that includes writes, note it.
152 if (isModSet(MRI))
153 WritesMemory = true;
154 // If it reads, note it.
155 if (isRefSet(MRI))
156 ReadsMemory = true;
157 continue;
160 // Check whether all pointer arguments point to local memory, and
161 // ignore calls that only access local memory.
162 for (CallSite::arg_iterator CI = Call->arg_begin(), CE = Call->arg_end();
163 CI != CE; ++CI) {
164 Value *Arg = *CI;
165 if (!Arg->getType()->isPtrOrPtrVectorTy())
166 continue;
168 AAMDNodes AAInfo;
169 I->getAAMetadata(AAInfo);
170 MemoryLocation Loc(Arg, LocationSize::unknown(), AAInfo);
172 // Skip accesses to local or constant memory as they don't impact the
173 // externally visible mod/ref behavior.
174 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
175 continue;
177 if (isModSet(MRI))
178 // Writes non-local memory.
179 WritesMemory = true;
180 if (isRefSet(MRI))
181 // Ok, it reads non-local memory.
182 ReadsMemory = true;
184 continue;
185 } else if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
186 // Ignore non-volatile loads from local memory. (Atomic is okay here.)
187 if (!LI->isVolatile()) {
188 MemoryLocation Loc = MemoryLocation::get(LI);
189 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
190 continue;
192 } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
193 // Ignore non-volatile stores to local memory. (Atomic is okay here.)
194 if (!SI->isVolatile()) {
195 MemoryLocation Loc = MemoryLocation::get(SI);
196 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
197 continue;
199 } else if (VAArgInst *VI = dyn_cast<VAArgInst>(I)) {
200 // Ignore vaargs on local memory.
201 MemoryLocation Loc = MemoryLocation::get(VI);
202 if (AAR.pointsToConstantMemory(Loc, /*OrLocal=*/true))
203 continue;
206 // Any remaining instructions need to be taken seriously! Check if they
207 // read or write memory.
209 // Writes memory, remember that.
210 WritesMemory |= I->mayWriteToMemory();
212 // If this instruction may read memory, remember that.
213 ReadsMemory |= I->mayReadFromMemory();
216 if (WritesMemory) {
217 if (!ReadsMemory)
218 return MAK_WriteOnly;
219 else
220 return MAK_MayWrite;
223 return ReadsMemory ? MAK_ReadOnly : MAK_ReadNone;
226 MemoryAccessKind llvm::computeFunctionBodyMemoryAccess(Function &F,
227 AAResults &AAR) {
228 return checkFunctionMemoryAccess(F, /*ThisBody=*/true, AAR, {});
231 /// Deduce readonly/readnone attributes for the SCC.
232 template <typename AARGetterT>
233 static bool addReadAttrs(const SCCNodeSet &SCCNodes, AARGetterT &&AARGetter) {
234 // Check if any of the functions in the SCC read or write memory. If they
235 // write memory then they can't be marked readnone or readonly.
236 bool ReadsMemory = false;
237 bool WritesMemory = false;
238 for (Function *F : SCCNodes) {
239 // Call the callable parameter to look up AA results for this function.
240 AAResults &AAR = AARGetter(*F);
242 // Non-exact function definitions may not be selected at link time, and an
243 // alternative version that writes to memory may be selected. See the
244 // comment on GlobalValue::isDefinitionExact for more details.
245 switch (checkFunctionMemoryAccess(*F, F->hasExactDefinition(),
246 AAR, SCCNodes)) {
247 case MAK_MayWrite:
248 return false;
249 case MAK_ReadOnly:
250 ReadsMemory = true;
251 break;
252 case MAK_WriteOnly:
253 WritesMemory = true;
254 break;
255 case MAK_ReadNone:
256 // Nothing to do!
257 break;
261 // If the SCC contains both functions that read and functions that write, then
262 // we cannot add readonly attributes.
263 if (ReadsMemory && WritesMemory)
264 return false;
266 // Success! Functions in this SCC do not access memory, or only read memory.
267 // Give them the appropriate attribute.
268 bool MadeChange = false;
270 for (Function *F : SCCNodes) {
271 if (F->doesNotAccessMemory())
272 // Already perfect!
273 continue;
275 if (F->onlyReadsMemory() && ReadsMemory)
276 // No change.
277 continue;
279 if (F->doesNotReadMemory() && WritesMemory)
280 continue;
282 MadeChange = true;
284 // Clear out any existing attributes.
285 F->removeFnAttr(Attribute::ReadOnly);
286 F->removeFnAttr(Attribute::ReadNone);
287 F->removeFnAttr(Attribute::WriteOnly);
289 if (!WritesMemory && !ReadsMemory) {
290 // Clear out any "access range attributes" if readnone was deduced.
291 F->removeFnAttr(Attribute::ArgMemOnly);
292 F->removeFnAttr(Attribute::InaccessibleMemOnly);
293 F->removeFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
296 // Add in the new attribute.
297 if (WritesMemory && !ReadsMemory)
298 F->addFnAttr(Attribute::WriteOnly);
299 else
300 F->addFnAttr(ReadsMemory ? Attribute::ReadOnly : Attribute::ReadNone);
302 if (WritesMemory && !ReadsMemory)
303 ++NumWriteOnly;
304 else if (ReadsMemory)
305 ++NumReadOnly;
306 else
307 ++NumReadNone;
310 return MadeChange;
313 namespace {
315 /// For a given pointer Argument, this retains a list of Arguments of functions
316 /// in the same SCC that the pointer data flows into. We use this to build an
317 /// SCC of the arguments.
318 struct ArgumentGraphNode {
319 Argument *Definition;
320 SmallVector<ArgumentGraphNode *, 4> Uses;
323 class ArgumentGraph {
324 // We store pointers to ArgumentGraphNode objects, so it's important that
325 // that they not move around upon insert.
326 using ArgumentMapTy = std::map<Argument *, ArgumentGraphNode>;
328 ArgumentMapTy ArgumentMap;
330 // There is no root node for the argument graph, in fact:
331 // void f(int *x, int *y) { if (...) f(x, y); }
332 // is an example where the graph is disconnected. The SCCIterator requires a
333 // single entry point, so we maintain a fake ("synthetic") root node that
334 // uses every node. Because the graph is directed and nothing points into
335 // the root, it will not participate in any SCCs (except for its own).
336 ArgumentGraphNode SyntheticRoot;
338 public:
339 ArgumentGraph() { SyntheticRoot.Definition = nullptr; }
341 using iterator = SmallVectorImpl<ArgumentGraphNode *>::iterator;
343 iterator begin() { return SyntheticRoot.Uses.begin(); }
344 iterator end() { return SyntheticRoot.Uses.end(); }
345 ArgumentGraphNode *getEntryNode() { return &SyntheticRoot; }
347 ArgumentGraphNode *operator[](Argument *A) {
348 ArgumentGraphNode &Node = ArgumentMap[A];
349 Node.Definition = A;
350 SyntheticRoot.Uses.push_back(&Node);
351 return &Node;
355 /// This tracker checks whether callees are in the SCC, and if so it does not
356 /// consider that a capture, instead adding it to the "Uses" list and
357 /// continuing with the analysis.
358 struct ArgumentUsesTracker : public CaptureTracker {
359 ArgumentUsesTracker(const SCCNodeSet &SCCNodes) : SCCNodes(SCCNodes) {}
361 void tooManyUses() override { Captured = true; }
363 bool captured(const Use *U) override {
364 CallSite CS(U->getUser());
365 if (!CS.getInstruction()) {
366 Captured = true;
367 return true;
370 Function *F = CS.getCalledFunction();
371 if (!F || !F->hasExactDefinition() || !SCCNodes.count(F)) {
372 Captured = true;
373 return true;
376 // Note: the callee and the two successor blocks *follow* the argument
377 // operands. This means there is no need to adjust UseIndex to account for
378 // these.
380 unsigned UseIndex =
381 std::distance(const_cast<const Use *>(CS.arg_begin()), U);
383 assert(UseIndex < CS.data_operands_size() &&
384 "Indirect function calls should have been filtered above!");
386 if (UseIndex >= CS.getNumArgOperands()) {
387 // Data operand, but not a argument operand -- must be a bundle operand
388 assert(CS.hasOperandBundles() && "Must be!");
390 // CaptureTracking told us that we're being captured by an operand bundle
391 // use. In this case it does not matter if the callee is within our SCC
392 // or not -- we've been captured in some unknown way, and we have to be
393 // conservative.
394 Captured = true;
395 return true;
398 if (UseIndex >= F->arg_size()) {
399 assert(F->isVarArg() && "More params than args in non-varargs call");
400 Captured = true;
401 return true;
404 Uses.push_back(&*std::next(F->arg_begin(), UseIndex));
405 return false;
408 // True only if certainly captured (used outside our SCC).
409 bool Captured = false;
411 // Uses within our SCC.
412 SmallVector<Argument *, 4> Uses;
414 const SCCNodeSet &SCCNodes;
417 } // end anonymous namespace
419 namespace llvm {
421 template <> struct GraphTraits<ArgumentGraphNode *> {
422 using NodeRef = ArgumentGraphNode *;
423 using ChildIteratorType = SmallVectorImpl<ArgumentGraphNode *>::iterator;
425 static NodeRef getEntryNode(NodeRef A) { return A; }
426 static ChildIteratorType child_begin(NodeRef N) { return N->Uses.begin(); }
427 static ChildIteratorType child_end(NodeRef N) { return N->Uses.end(); }
430 template <>
431 struct GraphTraits<ArgumentGraph *> : public GraphTraits<ArgumentGraphNode *> {
432 static NodeRef getEntryNode(ArgumentGraph *AG) { return AG->getEntryNode(); }
434 static ChildIteratorType nodes_begin(ArgumentGraph *AG) {
435 return AG->begin();
438 static ChildIteratorType nodes_end(ArgumentGraph *AG) { return AG->end(); }
441 } // end namespace llvm
443 /// Returns Attribute::None, Attribute::ReadOnly or Attribute::ReadNone.
444 static Attribute::AttrKind
445 determinePointerReadAttrs(Argument *A,
446 const SmallPtrSet<Argument *, 8> &SCCNodes) {
447 SmallVector<Use *, 32> Worklist;
448 SmallPtrSet<Use *, 32> Visited;
450 // inalloca arguments are always clobbered by the call.
451 if (A->hasInAllocaAttr())
452 return Attribute::None;
454 bool IsRead = false;
455 // We don't need to track IsWritten. If A is written to, return immediately.
457 for (Use &U : A->uses()) {
458 Visited.insert(&U);
459 Worklist.push_back(&U);
462 while (!Worklist.empty()) {
463 Use *U = Worklist.pop_back_val();
464 Instruction *I = cast<Instruction>(U->getUser());
466 switch (I->getOpcode()) {
467 case Instruction::BitCast:
468 case Instruction::GetElementPtr:
469 case Instruction::PHI:
470 case Instruction::Select:
471 case Instruction::AddrSpaceCast:
472 // The original value is not read/written via this if the new value isn't.
473 for (Use &UU : I->uses())
474 if (Visited.insert(&UU).second)
475 Worklist.push_back(&UU);
476 break;
478 case Instruction::Call:
479 case Instruction::Invoke: {
480 bool Captures = true;
482 if (I->getType()->isVoidTy())
483 Captures = false;
485 auto AddUsersToWorklistIfCapturing = [&] {
486 if (Captures)
487 for (Use &UU : I->uses())
488 if (Visited.insert(&UU).second)
489 Worklist.push_back(&UU);
492 CallSite CS(I);
493 if (CS.doesNotAccessMemory()) {
494 AddUsersToWorklistIfCapturing();
495 continue;
498 Function *F = CS.getCalledFunction();
499 if (!F) {
500 if (CS.onlyReadsMemory()) {
501 IsRead = true;
502 AddUsersToWorklistIfCapturing();
503 continue;
505 return Attribute::None;
508 // Note: the callee and the two successor blocks *follow* the argument
509 // operands. This means there is no need to adjust UseIndex to account
510 // for these.
512 unsigned UseIndex = std::distance(CS.arg_begin(), U);
514 // U cannot be the callee operand use: since we're exploring the
515 // transitive uses of an Argument, having such a use be a callee would
516 // imply the CallSite is an indirect call or invoke; and we'd take the
517 // early exit above.
518 assert(UseIndex < CS.data_operands_size() &&
519 "Data operand use expected!");
521 bool IsOperandBundleUse = UseIndex >= CS.getNumArgOperands();
523 if (UseIndex >= F->arg_size() && !IsOperandBundleUse) {
524 assert(F->isVarArg() && "More params than args in non-varargs call");
525 return Attribute::None;
528 Captures &= !CS.doesNotCapture(UseIndex);
530 // Since the optimizer (by design) cannot see the data flow corresponding
531 // to a operand bundle use, these cannot participate in the optimistic SCC
532 // analysis. Instead, we model the operand bundle uses as arguments in
533 // call to a function external to the SCC.
534 if (IsOperandBundleUse ||
535 !SCCNodes.count(&*std::next(F->arg_begin(), UseIndex))) {
537 // The accessors used on CallSite here do the right thing for calls and
538 // invokes with operand bundles.
540 if (!CS.onlyReadsMemory() && !CS.onlyReadsMemory(UseIndex))
541 return Attribute::None;
542 if (!CS.doesNotAccessMemory(UseIndex))
543 IsRead = true;
546 AddUsersToWorklistIfCapturing();
547 break;
550 case Instruction::Load:
551 // A volatile load has side effects beyond what readonly can be relied
552 // upon.
553 if (cast<LoadInst>(I)->isVolatile())
554 return Attribute::None;
556 IsRead = true;
557 break;
559 case Instruction::ICmp:
560 case Instruction::Ret:
561 break;
563 default:
564 return Attribute::None;
568 return IsRead ? Attribute::ReadOnly : Attribute::ReadNone;
571 /// Deduce returned attributes for the SCC.
572 static bool addArgumentReturnedAttrs(const SCCNodeSet &SCCNodes) {
573 bool Changed = false;
575 // Check each function in turn, determining if an argument is always returned.
576 for (Function *F : SCCNodes) {
577 // We can infer and propagate function attributes only when we know that the
578 // definition we'll get at link time is *exactly* the definition we see now.
579 // For more details, see GlobalValue::mayBeDerefined.
580 if (!F->hasExactDefinition())
581 continue;
583 if (F->getReturnType()->isVoidTy())
584 continue;
586 // There is nothing to do if an argument is already marked as 'returned'.
587 if (llvm::any_of(F->args(),
588 [](const Argument &Arg) { return Arg.hasReturnedAttr(); }))
589 continue;
591 auto FindRetArg = [&]() -> Value * {
592 Value *RetArg = nullptr;
593 for (BasicBlock &BB : *F)
594 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator())) {
595 // Note that stripPointerCasts should look through functions with
596 // returned arguments.
597 Value *RetVal = Ret->getReturnValue()->stripPointerCasts();
598 if (!isa<Argument>(RetVal) || RetVal->getType() != F->getReturnType())
599 return nullptr;
601 if (!RetArg)
602 RetArg = RetVal;
603 else if (RetArg != RetVal)
604 return nullptr;
607 return RetArg;
610 if (Value *RetArg = FindRetArg()) {
611 auto *A = cast<Argument>(RetArg);
612 A->addAttr(Attribute::Returned);
613 ++NumReturned;
614 Changed = true;
618 return Changed;
621 /// If a callsite has arguments that are also arguments to the parent function,
622 /// try to propagate attributes from the callsite's arguments to the parent's
623 /// arguments. This may be important because inlining can cause information loss
624 /// when attribute knowledge disappears with the inlined call.
625 static bool addArgumentAttrsFromCallsites(Function &F) {
626 if (!EnableNonnullArgPropagation)
627 return false;
629 bool Changed = false;
631 // For an argument attribute to transfer from a callsite to the parent, the
632 // call must be guaranteed to execute every time the parent is called.
633 // Conservatively, just check for calls in the entry block that are guaranteed
634 // to execute.
635 // TODO: This could be enhanced by testing if the callsite post-dominates the
636 // entry block or by doing simple forward walks or backward walks to the
637 // callsite.
638 BasicBlock &Entry = F.getEntryBlock();
639 for (Instruction &I : Entry) {
640 if (auto CS = CallSite(&I)) {
641 if (auto *CalledFunc = CS.getCalledFunction()) {
642 for (auto &CSArg : CalledFunc->args()) {
643 if (!CSArg.hasNonNullAttr())
644 continue;
646 // If the non-null callsite argument operand is an argument to 'F'
647 // (the caller) and the call is guaranteed to execute, then the value
648 // must be non-null throughout 'F'.
649 auto *FArg = dyn_cast<Argument>(CS.getArgOperand(CSArg.getArgNo()));
650 if (FArg && !FArg->hasNonNullAttr()) {
651 FArg->addAttr(Attribute::NonNull);
652 Changed = true;
657 if (!isGuaranteedToTransferExecutionToSuccessor(&I))
658 break;
661 return Changed;
664 static bool addReadAttr(Argument *A, Attribute::AttrKind R) {
665 assert((R == Attribute::ReadOnly || R == Attribute::ReadNone)
666 && "Must be a Read attribute.");
667 assert(A && "Argument must not be null.");
669 // If the argument already has the attribute, nothing needs to be done.
670 if (A->hasAttribute(R))
671 return false;
673 // Otherwise, remove potentially conflicting attribute, add the new one,
674 // and update statistics.
675 A->removeAttr(Attribute::WriteOnly);
676 A->removeAttr(Attribute::ReadOnly);
677 A->removeAttr(Attribute::ReadNone);
678 A->addAttr(R);
679 R == Attribute::ReadOnly ? ++NumReadOnlyArg : ++NumReadNoneArg;
680 return true;
683 /// Deduce nocapture attributes for the SCC.
684 static bool addArgumentAttrs(const SCCNodeSet &SCCNodes) {
685 bool Changed = false;
687 ArgumentGraph AG;
689 // Check each function in turn, determining which pointer arguments are not
690 // captured.
691 for (Function *F : SCCNodes) {
692 // We can infer and propagate function attributes only when we know that the
693 // definition we'll get at link time is *exactly* the definition we see now.
694 // For more details, see GlobalValue::mayBeDerefined.
695 if (!F->hasExactDefinition())
696 continue;
698 Changed |= addArgumentAttrsFromCallsites(*F);
700 // Functions that are readonly (or readnone) and nounwind and don't return
701 // a value can't capture arguments. Don't analyze them.
702 if (F->onlyReadsMemory() && F->doesNotThrow() &&
703 F->getReturnType()->isVoidTy()) {
704 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
705 ++A) {
706 if (A->getType()->isPointerTy() && !A->hasNoCaptureAttr()) {
707 A->addAttr(Attribute::NoCapture);
708 ++NumNoCapture;
709 Changed = true;
712 continue;
715 for (Function::arg_iterator A = F->arg_begin(), E = F->arg_end(); A != E;
716 ++A) {
717 if (!A->getType()->isPointerTy())
718 continue;
719 bool HasNonLocalUses = false;
720 if (!A->hasNoCaptureAttr()) {
721 ArgumentUsesTracker Tracker(SCCNodes);
722 PointerMayBeCaptured(&*A, &Tracker);
723 if (!Tracker.Captured) {
724 if (Tracker.Uses.empty()) {
725 // If it's trivially not captured, mark it nocapture now.
726 A->addAttr(Attribute::NoCapture);
727 ++NumNoCapture;
728 Changed = true;
729 } else {
730 // If it's not trivially captured and not trivially not captured,
731 // then it must be calling into another function in our SCC. Save
732 // its particulars for Argument-SCC analysis later.
733 ArgumentGraphNode *Node = AG[&*A];
734 for (Argument *Use : Tracker.Uses) {
735 Node->Uses.push_back(AG[Use]);
736 if (Use != &*A)
737 HasNonLocalUses = true;
741 // Otherwise, it's captured. Don't bother doing SCC analysis on it.
743 if (!HasNonLocalUses && !A->onlyReadsMemory()) {
744 // Can we determine that it's readonly/readnone without doing an SCC?
745 // Note that we don't allow any calls at all here, or else our result
746 // will be dependent on the iteration order through the functions in the
747 // SCC.
748 SmallPtrSet<Argument *, 8> Self;
749 Self.insert(&*A);
750 Attribute::AttrKind R = determinePointerReadAttrs(&*A, Self);
751 if (R != Attribute::None)
752 Changed = addReadAttr(A, R);
757 // The graph we've collected is partial because we stopped scanning for
758 // argument uses once we solved the argument trivially. These partial nodes
759 // show up as ArgumentGraphNode objects with an empty Uses list, and for
760 // these nodes the final decision about whether they capture has already been
761 // made. If the definition doesn't have a 'nocapture' attribute by now, it
762 // captures.
764 for (scc_iterator<ArgumentGraph *> I = scc_begin(&AG); !I.isAtEnd(); ++I) {
765 const std::vector<ArgumentGraphNode *> &ArgumentSCC = *I;
766 if (ArgumentSCC.size() == 1) {
767 if (!ArgumentSCC[0]->Definition)
768 continue; // synthetic root node
770 // eg. "void f(int* x) { if (...) f(x); }"
771 if (ArgumentSCC[0]->Uses.size() == 1 &&
772 ArgumentSCC[0]->Uses[0] == ArgumentSCC[0]) {
773 Argument *A = ArgumentSCC[0]->Definition;
774 A->addAttr(Attribute::NoCapture);
775 ++NumNoCapture;
776 Changed = true;
778 continue;
781 bool SCCCaptured = false;
782 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
783 I != E && !SCCCaptured; ++I) {
784 ArgumentGraphNode *Node = *I;
785 if (Node->Uses.empty()) {
786 if (!Node->Definition->hasNoCaptureAttr())
787 SCCCaptured = true;
790 if (SCCCaptured)
791 continue;
793 SmallPtrSet<Argument *, 8> ArgumentSCCNodes;
794 // Fill ArgumentSCCNodes with the elements of the ArgumentSCC. Used for
795 // quickly looking up whether a given Argument is in this ArgumentSCC.
796 for (ArgumentGraphNode *I : ArgumentSCC) {
797 ArgumentSCCNodes.insert(I->Definition);
800 for (auto I = ArgumentSCC.begin(), E = ArgumentSCC.end();
801 I != E && !SCCCaptured; ++I) {
802 ArgumentGraphNode *N = *I;
803 for (ArgumentGraphNode *Use : N->Uses) {
804 Argument *A = Use->Definition;
805 if (A->hasNoCaptureAttr() || ArgumentSCCNodes.count(A))
806 continue;
807 SCCCaptured = true;
808 break;
811 if (SCCCaptured)
812 continue;
814 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
815 Argument *A = ArgumentSCC[i]->Definition;
816 A->addAttr(Attribute::NoCapture);
817 ++NumNoCapture;
818 Changed = true;
821 // We also want to compute readonly/readnone. With a small number of false
822 // negatives, we can assume that any pointer which is captured isn't going
823 // to be provably readonly or readnone, since by definition we can't
824 // analyze all uses of a captured pointer.
826 // The false negatives happen when the pointer is captured by a function
827 // that promises readonly/readnone behaviour on the pointer, then the
828 // pointer's lifetime ends before anything that writes to arbitrary memory.
829 // Also, a readonly/readnone pointer may be returned, but returning a
830 // pointer is capturing it.
832 Attribute::AttrKind ReadAttr = Attribute::ReadNone;
833 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
834 Argument *A = ArgumentSCC[i]->Definition;
835 Attribute::AttrKind K = determinePointerReadAttrs(A, ArgumentSCCNodes);
836 if (K == Attribute::ReadNone)
837 continue;
838 if (K == Attribute::ReadOnly) {
839 ReadAttr = Attribute::ReadOnly;
840 continue;
842 ReadAttr = K;
843 break;
846 if (ReadAttr != Attribute::None) {
847 for (unsigned i = 0, e = ArgumentSCC.size(); i != e; ++i) {
848 Argument *A = ArgumentSCC[i]->Definition;
849 Changed = addReadAttr(A, ReadAttr);
854 return Changed;
857 /// Tests whether a function is "malloc-like".
859 /// A function is "malloc-like" if it returns either null or a pointer that
860 /// doesn't alias any other pointer visible to the caller.
861 static bool isFunctionMallocLike(Function *F, const SCCNodeSet &SCCNodes) {
862 SmallSetVector<Value *, 8> FlowsToReturn;
863 for (BasicBlock &BB : *F)
864 if (ReturnInst *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
865 FlowsToReturn.insert(Ret->getReturnValue());
867 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
868 Value *RetVal = FlowsToReturn[i];
870 if (Constant *C = dyn_cast<Constant>(RetVal)) {
871 if (!C->isNullValue() && !isa<UndefValue>(C))
872 return false;
874 continue;
877 if (isa<Argument>(RetVal))
878 return false;
880 if (Instruction *RVI = dyn_cast<Instruction>(RetVal))
881 switch (RVI->getOpcode()) {
882 // Extend the analysis by looking upwards.
883 case Instruction::BitCast:
884 case Instruction::GetElementPtr:
885 case Instruction::AddrSpaceCast:
886 FlowsToReturn.insert(RVI->getOperand(0));
887 continue;
888 case Instruction::Select: {
889 SelectInst *SI = cast<SelectInst>(RVI);
890 FlowsToReturn.insert(SI->getTrueValue());
891 FlowsToReturn.insert(SI->getFalseValue());
892 continue;
894 case Instruction::PHI: {
895 PHINode *PN = cast<PHINode>(RVI);
896 for (Value *IncValue : PN->incoming_values())
897 FlowsToReturn.insert(IncValue);
898 continue;
901 // Check whether the pointer came from an allocation.
902 case Instruction::Alloca:
903 break;
904 case Instruction::Call:
905 case Instruction::Invoke: {
906 CallSite CS(RVI);
907 if (CS.hasRetAttr(Attribute::NoAlias))
908 break;
909 if (CS.getCalledFunction() && SCCNodes.count(CS.getCalledFunction()))
910 break;
911 LLVM_FALLTHROUGH;
913 default:
914 return false; // Did not come from an allocation.
917 if (PointerMayBeCaptured(RetVal, false, /*StoreCaptures=*/false))
918 return false;
921 return true;
924 /// Deduce noalias attributes for the SCC.
925 static bool addNoAliasAttrs(const SCCNodeSet &SCCNodes) {
926 // Check each function in turn, determining which functions return noalias
927 // pointers.
928 for (Function *F : SCCNodes) {
929 // Already noalias.
930 if (F->returnDoesNotAlias())
931 continue;
933 // We can infer and propagate function attributes only when we know that the
934 // definition we'll get at link time is *exactly* the definition we see now.
935 // For more details, see GlobalValue::mayBeDerefined.
936 if (!F->hasExactDefinition())
937 return false;
939 // We annotate noalias return values, which are only applicable to
940 // pointer types.
941 if (!F->getReturnType()->isPointerTy())
942 continue;
944 if (!isFunctionMallocLike(F, SCCNodes))
945 return false;
948 bool MadeChange = false;
949 for (Function *F : SCCNodes) {
950 if (F->returnDoesNotAlias() ||
951 !F->getReturnType()->isPointerTy())
952 continue;
954 F->setReturnDoesNotAlias();
955 ++NumNoAlias;
956 MadeChange = true;
959 return MadeChange;
962 /// Tests whether this function is known to not return null.
964 /// Requires that the function returns a pointer.
966 /// Returns true if it believes the function will not return a null, and sets
967 /// \p Speculative based on whether the returned conclusion is a speculative
968 /// conclusion due to SCC calls.
969 static bool isReturnNonNull(Function *F, const SCCNodeSet &SCCNodes,
970 bool &Speculative) {
971 assert(F->getReturnType()->isPointerTy() &&
972 "nonnull only meaningful on pointer types");
973 Speculative = false;
975 SmallSetVector<Value *, 8> FlowsToReturn;
976 for (BasicBlock &BB : *F)
977 if (auto *Ret = dyn_cast<ReturnInst>(BB.getTerminator()))
978 FlowsToReturn.insert(Ret->getReturnValue());
980 auto &DL = F->getParent()->getDataLayout();
982 for (unsigned i = 0; i != FlowsToReturn.size(); ++i) {
983 Value *RetVal = FlowsToReturn[i];
985 // If this value is locally known to be non-null, we're good
986 if (isKnownNonZero(RetVal, DL))
987 continue;
989 // Otherwise, we need to look upwards since we can't make any local
990 // conclusions.
991 Instruction *RVI = dyn_cast<Instruction>(RetVal);
992 if (!RVI)
993 return false;
994 switch (RVI->getOpcode()) {
995 // Extend the analysis by looking upwards.
996 case Instruction::BitCast:
997 case Instruction::GetElementPtr:
998 case Instruction::AddrSpaceCast:
999 FlowsToReturn.insert(RVI->getOperand(0));
1000 continue;
1001 case Instruction::Select: {
1002 SelectInst *SI = cast<SelectInst>(RVI);
1003 FlowsToReturn.insert(SI->getTrueValue());
1004 FlowsToReturn.insert(SI->getFalseValue());
1005 continue;
1007 case Instruction::PHI: {
1008 PHINode *PN = cast<PHINode>(RVI);
1009 for (int i = 0, e = PN->getNumIncomingValues(); i != e; ++i)
1010 FlowsToReturn.insert(PN->getIncomingValue(i));
1011 continue;
1013 case Instruction::Call:
1014 case Instruction::Invoke: {
1015 CallSite CS(RVI);
1016 Function *Callee = CS.getCalledFunction();
1017 // A call to a node within the SCC is assumed to return null until
1018 // proven otherwise
1019 if (Callee && SCCNodes.count(Callee)) {
1020 Speculative = true;
1021 continue;
1023 return false;
1025 default:
1026 return false; // Unknown source, may be null
1028 llvm_unreachable("should have either continued or returned");
1031 return true;
1034 /// Deduce nonnull attributes for the SCC.
1035 static bool addNonNullAttrs(const SCCNodeSet &SCCNodes) {
1036 // Speculative that all functions in the SCC return only nonnull
1037 // pointers. We may refute this as we analyze functions.
1038 bool SCCReturnsNonNull = true;
1040 bool MadeChange = false;
1042 // Check each function in turn, determining which functions return nonnull
1043 // pointers.
1044 for (Function *F : SCCNodes) {
1045 // Already nonnull.
1046 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1047 Attribute::NonNull))
1048 continue;
1050 // We can infer and propagate function attributes only when we know that the
1051 // definition we'll get at link time is *exactly* the definition we see now.
1052 // For more details, see GlobalValue::mayBeDerefined.
1053 if (!F->hasExactDefinition())
1054 return false;
1056 // We annotate nonnull return values, which are only applicable to
1057 // pointer types.
1058 if (!F->getReturnType()->isPointerTy())
1059 continue;
1061 bool Speculative = false;
1062 if (isReturnNonNull(F, SCCNodes, Speculative)) {
1063 if (!Speculative) {
1064 // Mark the function eagerly since we may discover a function
1065 // which prevents us from speculating about the entire SCC
1066 LLVM_DEBUG(dbgs() << "Eagerly marking " << F->getName()
1067 << " as nonnull\n");
1068 F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1069 ++NumNonNullReturn;
1070 MadeChange = true;
1072 continue;
1074 // At least one function returns something which could be null, can't
1075 // speculate any more.
1076 SCCReturnsNonNull = false;
1079 if (SCCReturnsNonNull) {
1080 for (Function *F : SCCNodes) {
1081 if (F->getAttributes().hasAttribute(AttributeList::ReturnIndex,
1082 Attribute::NonNull) ||
1083 !F->getReturnType()->isPointerTy())
1084 continue;
1086 LLVM_DEBUG(dbgs() << "SCC marking " << F->getName() << " as nonnull\n");
1087 F->addAttribute(AttributeList::ReturnIndex, Attribute::NonNull);
1088 ++NumNonNullReturn;
1089 MadeChange = true;
1093 return MadeChange;
1096 namespace {
1098 /// Collects a set of attribute inference requests and performs them all in one
1099 /// go on a single SCC Node. Inference involves scanning function bodies
1100 /// looking for instructions that violate attribute assumptions.
1101 /// As soon as all the bodies are fine we are free to set the attribute.
1102 /// Customization of inference for individual attributes is performed by
1103 /// providing a handful of predicates for each attribute.
1104 class AttributeInferer {
1105 public:
1106 /// Describes a request for inference of a single attribute.
1107 struct InferenceDescriptor {
1109 /// Returns true if this function does not have to be handled.
1110 /// General intent for this predicate is to provide an optimization
1111 /// for functions that do not need this attribute inference at all
1112 /// (say, for functions that already have the attribute).
1113 std::function<bool(const Function &)> SkipFunction;
1115 /// Returns true if this instruction violates attribute assumptions.
1116 std::function<bool(Instruction &)> InstrBreaksAttribute;
1118 /// Sets the inferred attribute for this function.
1119 std::function<void(Function &)> SetAttribute;
1121 /// Attribute we derive.
1122 Attribute::AttrKind AKind;
1124 /// If true, only "exact" definitions can be used to infer this attribute.
1125 /// See GlobalValue::isDefinitionExact.
1126 bool RequiresExactDefinition;
1128 InferenceDescriptor(Attribute::AttrKind AK,
1129 std::function<bool(const Function &)> SkipFunc,
1130 std::function<bool(Instruction &)> InstrScan,
1131 std::function<void(Function &)> SetAttr,
1132 bool ReqExactDef)
1133 : SkipFunction(SkipFunc), InstrBreaksAttribute(InstrScan),
1134 SetAttribute(SetAttr), AKind(AK),
1135 RequiresExactDefinition(ReqExactDef) {}
1138 private:
1139 SmallVector<InferenceDescriptor, 4> InferenceDescriptors;
1141 public:
1142 void registerAttrInference(InferenceDescriptor AttrInference) {
1143 InferenceDescriptors.push_back(AttrInference);
1146 bool run(const SCCNodeSet &SCCNodes);
1149 /// Perform all the requested attribute inference actions according to the
1150 /// attribute predicates stored before.
1151 bool AttributeInferer::run(const SCCNodeSet &SCCNodes) {
1152 SmallVector<InferenceDescriptor, 4> InferInSCC = InferenceDescriptors;
1153 // Go through all the functions in SCC and check corresponding attribute
1154 // assumptions for each of them. Attributes that are invalid for this SCC
1155 // will be removed from InferInSCC.
1156 for (Function *F : SCCNodes) {
1158 // No attributes whose assumptions are still valid - done.
1159 if (InferInSCC.empty())
1160 return false;
1162 // Check if our attributes ever need scanning/can be scanned.
1163 llvm::erase_if(InferInSCC, [F](const InferenceDescriptor &ID) {
1164 if (ID.SkipFunction(*F))
1165 return false;
1167 // Remove from further inference (invalidate) when visiting a function
1168 // that has no instructions to scan/has an unsuitable definition.
1169 return F->isDeclaration() ||
1170 (ID.RequiresExactDefinition && !F->hasExactDefinition());
1173 // For each attribute still in InferInSCC that doesn't explicitly skip F,
1174 // set up the F instructions scan to verify assumptions of the attribute.
1175 SmallVector<InferenceDescriptor, 4> InferInThisFunc;
1176 llvm::copy_if(
1177 InferInSCC, std::back_inserter(InferInThisFunc),
1178 [F](const InferenceDescriptor &ID) { return !ID.SkipFunction(*F); });
1180 if (InferInThisFunc.empty())
1181 continue;
1183 // Start instruction scan.
1184 for (Instruction &I : instructions(*F)) {
1185 llvm::erase_if(InferInThisFunc, [&](const InferenceDescriptor &ID) {
1186 if (!ID.InstrBreaksAttribute(I))
1187 return false;
1188 // Remove attribute from further inference on any other functions
1189 // because attribute assumptions have just been violated.
1190 llvm::erase_if(InferInSCC, [&ID](const InferenceDescriptor &D) {
1191 return D.AKind == ID.AKind;
1193 // Remove attribute from the rest of current instruction scan.
1194 return true;
1197 if (InferInThisFunc.empty())
1198 break;
1202 if (InferInSCC.empty())
1203 return false;
1205 bool Changed = false;
1206 for (Function *F : SCCNodes)
1207 // At this point InferInSCC contains only functions that were either:
1208 // - explicitly skipped from scan/inference, or
1209 // - verified to have no instructions that break attribute assumptions.
1210 // Hence we just go and force the attribute for all non-skipped functions.
1211 for (auto &ID : InferInSCC) {
1212 if (ID.SkipFunction(*F))
1213 continue;
1214 Changed = true;
1215 ID.SetAttribute(*F);
1217 return Changed;
1220 } // end anonymous namespace
1222 /// Helper for non-Convergent inference predicate InstrBreaksAttribute.
1223 static bool InstrBreaksNonConvergent(Instruction &I,
1224 const SCCNodeSet &SCCNodes) {
1225 const CallSite CS(&I);
1226 // Breaks non-convergent assumption if CS is a convergent call to a function
1227 // not in the SCC.
1228 return CS && CS.isConvergent() && SCCNodes.count(CS.getCalledFunction()) == 0;
1231 /// Helper for NoUnwind inference predicate InstrBreaksAttribute.
1232 static bool InstrBreaksNonThrowing(Instruction &I, const SCCNodeSet &SCCNodes) {
1233 if (!I.mayThrow())
1234 return false;
1235 if (const auto *CI = dyn_cast<CallInst>(&I)) {
1236 if (Function *Callee = CI->getCalledFunction()) {
1237 // I is a may-throw call to a function inside our SCC. This doesn't
1238 // invalidate our current working assumption that the SCC is no-throw; we
1239 // just have to scan that other function.
1240 if (SCCNodes.count(Callee) > 0)
1241 return false;
1244 return true;
1247 /// Helper for NoFree inference predicate InstrBreaksAttribute.
1248 static bool InstrBreaksNoFree(Instruction &I, const SCCNodeSet &SCCNodes) {
1249 CallSite CS(&I);
1250 if (!CS)
1251 return false;
1253 Function *Callee = CS.getCalledFunction();
1254 if (!Callee)
1255 return true;
1257 if (Callee->doesNotFreeMemory())
1258 return false;
1260 if (SCCNodes.count(Callee) > 0)
1261 return false;
1263 return true;
1266 /// Infer attributes from all functions in the SCC by scanning every
1267 /// instruction for compliance to the attribute assumptions. Currently it
1268 /// does:
1269 /// - removal of Convergent attribute
1270 /// - addition of NoUnwind attribute
1272 /// Returns true if any changes to function attributes were made.
1273 static bool inferAttrsFromFunctionBodies(const SCCNodeSet &SCCNodes) {
1275 AttributeInferer AI;
1277 // Request to remove the convergent attribute from all functions in the SCC
1278 // if every callsite within the SCC is not convergent (except for calls
1279 // to functions within the SCC).
1280 // Note: Removal of the attr from the callsites will happen in
1281 // InstCombineCalls separately.
1282 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1283 Attribute::Convergent,
1284 // Skip non-convergent functions.
1285 [](const Function &F) { return !F.isConvergent(); },
1286 // Instructions that break non-convergent assumption.
1287 [SCCNodes](Instruction &I) {
1288 return InstrBreaksNonConvergent(I, SCCNodes);
1290 [](Function &F) {
1291 LLVM_DEBUG(dbgs() << "Removing convergent attr from fn " << F.getName()
1292 << "\n");
1293 F.setNotConvergent();
1295 /* RequiresExactDefinition= */ false});
1297 if (!DisableNoUnwindInference)
1298 // Request to infer nounwind attribute for all the functions in the SCC if
1299 // every callsite within the SCC is not throwing (except for calls to
1300 // functions within the SCC). Note that nounwind attribute suffers from
1301 // derefinement - results may change depending on how functions are
1302 // optimized. Thus it can be inferred only from exact definitions.
1303 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1304 Attribute::NoUnwind,
1305 // Skip non-throwing functions.
1306 [](const Function &F) { return F.doesNotThrow(); },
1307 // Instructions that break non-throwing assumption.
1308 [SCCNodes](Instruction &I) {
1309 return InstrBreaksNonThrowing(I, SCCNodes);
1311 [](Function &F) {
1312 LLVM_DEBUG(dbgs()
1313 << "Adding nounwind attr to fn " << F.getName() << "\n");
1314 F.setDoesNotThrow();
1315 ++NumNoUnwind;
1317 /* RequiresExactDefinition= */ true});
1319 if (!DisableNoFreeInference)
1320 // Request to infer nofree attribute for all the functions in the SCC if
1321 // every callsite within the SCC does not directly or indirectly free
1322 // memory (except for calls to functions within the SCC). Note that nofree
1323 // attribute suffers from derefinement - results may change depending on
1324 // how functions are optimized. Thus it can be inferred only from exact
1325 // definitions.
1326 AI.registerAttrInference(AttributeInferer::InferenceDescriptor{
1327 Attribute::NoFree,
1328 // Skip functions known not to free memory.
1329 [](const Function &F) { return F.doesNotFreeMemory(); },
1330 // Instructions that break non-deallocating assumption.
1331 [SCCNodes](Instruction &I) {
1332 return InstrBreaksNoFree(I, SCCNodes);
1334 [](Function &F) {
1335 LLVM_DEBUG(dbgs()
1336 << "Adding nofree attr to fn " << F.getName() << "\n");
1337 F.setDoesNotFreeMemory();
1338 ++NumNoFree;
1340 /* RequiresExactDefinition= */ true});
1342 // Perform all the requested attribute inference actions.
1343 return AI.run(SCCNodes);
1346 static bool setDoesNotRecurse(Function &F) {
1347 if (F.doesNotRecurse())
1348 return false;
1349 F.setDoesNotRecurse();
1350 ++NumNoRecurse;
1351 return true;
1354 static bool addNoRecurseAttrs(const SCCNodeSet &SCCNodes) {
1355 // Try and identify functions that do not recurse.
1357 // If the SCC contains multiple nodes we know for sure there is recursion.
1358 if (SCCNodes.size() != 1)
1359 return false;
1361 Function *F = *SCCNodes.begin();
1362 if (!F || !F->hasExactDefinition() || F->doesNotRecurse())
1363 return false;
1365 // If all of the calls in F are identifiable and are to norecurse functions, F
1366 // is norecurse. This check also detects self-recursion as F is not currently
1367 // marked norecurse, so any called from F to F will not be marked norecurse.
1368 for (auto &BB : *F)
1369 for (auto &I : BB.instructionsWithoutDebug())
1370 if (auto CS = CallSite(&I)) {
1371 Function *Callee = CS.getCalledFunction();
1372 if (!Callee || Callee == F || !Callee->doesNotRecurse())
1373 // Function calls a potentially recursive function.
1374 return false;
1377 // Every call was to a non-recursive function other than this function, and
1378 // we have no indirect recursion as the SCC size is one. This function cannot
1379 // recurse.
1380 return setDoesNotRecurse(*F);
1383 template <typename AARGetterT>
1384 static bool deriveAttrsInPostOrder(SCCNodeSet &SCCNodes,
1385 AARGetterT &&AARGetter,
1386 bool HasUnknownCall) {
1387 bool Changed = false;
1389 // Bail if the SCC only contains optnone functions.
1390 if (SCCNodes.empty())
1391 return Changed;
1393 Changed |= addArgumentReturnedAttrs(SCCNodes);
1394 Changed |= addReadAttrs(SCCNodes, AARGetter);
1395 Changed |= addArgumentAttrs(SCCNodes);
1397 // If we have no external nodes participating in the SCC, we can deduce some
1398 // more precise attributes as well.
1399 if (!HasUnknownCall) {
1400 Changed |= addNoAliasAttrs(SCCNodes);
1401 Changed |= addNonNullAttrs(SCCNodes);
1402 Changed |= inferAttrsFromFunctionBodies(SCCNodes);
1403 Changed |= addNoRecurseAttrs(SCCNodes);
1406 return Changed;
1409 PreservedAnalyses PostOrderFunctionAttrsPass::run(LazyCallGraph::SCC &C,
1410 CGSCCAnalysisManager &AM,
1411 LazyCallGraph &CG,
1412 CGSCCUpdateResult &) {
1413 FunctionAnalysisManager &FAM =
1414 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
1416 // We pass a lambda into functions to wire them up to the analysis manager
1417 // for getting function analyses.
1418 auto AARGetter = [&](Function &F) -> AAResults & {
1419 return FAM.getResult<AAManager>(F);
1422 // Fill SCCNodes with the elements of the SCC. Also track whether there are
1423 // any external or opt-none nodes that will prevent us from optimizing any
1424 // part of the SCC.
1425 SCCNodeSet SCCNodes;
1426 bool HasUnknownCall = false;
1427 for (LazyCallGraph::Node &N : C) {
1428 Function &F = N.getFunction();
1429 if (F.hasOptNone() || F.hasFnAttribute(Attribute::Naked)) {
1430 // Treat any function we're trying not to optimize as if it were an
1431 // indirect call and omit it from the node set used below.
1432 HasUnknownCall = true;
1433 continue;
1435 // Track whether any functions in this SCC have an unknown call edge.
1436 // Note: if this is ever a performance hit, we can common it with
1437 // subsequent routines which also do scans over the instructions of the
1438 // function.
1439 if (!HasUnknownCall)
1440 for (Instruction &I : instructions(F))
1441 if (auto CS = CallSite(&I))
1442 if (!CS.getCalledFunction()) {
1443 HasUnknownCall = true;
1444 break;
1447 SCCNodes.insert(&F);
1450 if (deriveAttrsInPostOrder(SCCNodes, AARGetter, HasUnknownCall))
1451 return PreservedAnalyses::none();
1453 return PreservedAnalyses::all();
1456 namespace {
1458 struct PostOrderFunctionAttrsLegacyPass : public CallGraphSCCPass {
1459 // Pass identification, replacement for typeid
1460 static char ID;
1462 PostOrderFunctionAttrsLegacyPass() : CallGraphSCCPass(ID) {
1463 initializePostOrderFunctionAttrsLegacyPassPass(
1464 *PassRegistry::getPassRegistry());
1467 bool runOnSCC(CallGraphSCC &SCC) override;
1469 void getAnalysisUsage(AnalysisUsage &AU) const override {
1470 AU.setPreservesCFG();
1471 AU.addRequired<AssumptionCacheTracker>();
1472 getAAResultsAnalysisUsage(AU);
1473 CallGraphSCCPass::getAnalysisUsage(AU);
1477 } // end anonymous namespace
1479 char PostOrderFunctionAttrsLegacyPass::ID = 0;
1480 INITIALIZE_PASS_BEGIN(PostOrderFunctionAttrsLegacyPass, "functionattrs",
1481 "Deduce function attributes", false, false)
1482 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1483 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1484 INITIALIZE_PASS_END(PostOrderFunctionAttrsLegacyPass, "functionattrs",
1485 "Deduce function attributes", false, false)
1487 Pass *llvm::createPostOrderFunctionAttrsLegacyPass() {
1488 return new PostOrderFunctionAttrsLegacyPass();
1491 template <typename AARGetterT>
1492 static bool runImpl(CallGraphSCC &SCC, AARGetterT AARGetter) {
1494 // Fill SCCNodes with the elements of the SCC. Used for quickly looking up
1495 // whether a given CallGraphNode is in this SCC. Also track whether there are
1496 // any external or opt-none nodes that will prevent us from optimizing any
1497 // part of the SCC.
1498 SCCNodeSet SCCNodes;
1499 bool ExternalNode = false;
1500 for (CallGraphNode *I : SCC) {
1501 Function *F = I->getFunction();
1502 if (!F || F->hasOptNone() || F->hasFnAttribute(Attribute::Naked)) {
1503 // External node or function we're trying not to optimize - we both avoid
1504 // transform them and avoid leveraging information they provide.
1505 ExternalNode = true;
1506 continue;
1509 SCCNodes.insert(F);
1512 return deriveAttrsInPostOrder(SCCNodes, AARGetter, ExternalNode);
1515 bool PostOrderFunctionAttrsLegacyPass::runOnSCC(CallGraphSCC &SCC) {
1516 if (skipSCC(SCC))
1517 return false;
1518 return runImpl(SCC, LegacyAARGetter(*this));
1521 namespace {
1523 struct ReversePostOrderFunctionAttrsLegacyPass : public ModulePass {
1524 // Pass identification, replacement for typeid
1525 static char ID;
1527 ReversePostOrderFunctionAttrsLegacyPass() : ModulePass(ID) {
1528 initializeReversePostOrderFunctionAttrsLegacyPassPass(
1529 *PassRegistry::getPassRegistry());
1532 bool runOnModule(Module &M) override;
1534 void getAnalysisUsage(AnalysisUsage &AU) const override {
1535 AU.setPreservesCFG();
1536 AU.addRequired<CallGraphWrapperPass>();
1537 AU.addPreserved<CallGraphWrapperPass>();
1541 } // end anonymous namespace
1543 char ReversePostOrderFunctionAttrsLegacyPass::ID = 0;
1545 INITIALIZE_PASS_BEGIN(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs",
1546 "Deduce function attributes in RPO", false, false)
1547 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
1548 INITIALIZE_PASS_END(ReversePostOrderFunctionAttrsLegacyPass, "rpo-functionattrs",
1549 "Deduce function attributes in RPO", false, false)
1551 Pass *llvm::createReversePostOrderFunctionAttrsPass() {
1552 return new ReversePostOrderFunctionAttrsLegacyPass();
1555 static bool addNoRecurseAttrsTopDown(Function &F) {
1556 // We check the preconditions for the function prior to calling this to avoid
1557 // the cost of building up a reversible post-order list. We assert them here
1558 // to make sure none of the invariants this relies on were violated.
1559 assert(!F.isDeclaration() && "Cannot deduce norecurse without a definition!");
1560 assert(!F.doesNotRecurse() &&
1561 "This function has already been deduced as norecurs!");
1562 assert(F.hasInternalLinkage() &&
1563 "Can only do top-down deduction for internal linkage functions!");
1565 // If F is internal and all of its uses are calls from a non-recursive
1566 // functions, then none of its calls could in fact recurse without going
1567 // through a function marked norecurse, and so we can mark this function too
1568 // as norecurse. Note that the uses must actually be calls -- otherwise
1569 // a pointer to this function could be returned from a norecurse function but
1570 // this function could be recursively (indirectly) called. Note that this
1571 // also detects if F is directly recursive as F is not yet marked as
1572 // a norecurse function.
1573 for (auto *U : F.users()) {
1574 auto *I = dyn_cast<Instruction>(U);
1575 if (!I)
1576 return false;
1577 CallSite CS(I);
1578 if (!CS || !CS.getParent()->getParent()->doesNotRecurse())
1579 return false;
1581 return setDoesNotRecurse(F);
1584 static bool deduceFunctionAttributeInRPO(Module &M, CallGraph &CG) {
1585 // We only have a post-order SCC traversal (because SCCs are inherently
1586 // discovered in post-order), so we accumulate them in a vector and then walk
1587 // it in reverse. This is simpler than using the RPO iterator infrastructure
1588 // because we need to combine SCC detection and the PO walk of the call
1589 // graph. We can also cheat egregiously because we're primarily interested in
1590 // synthesizing norecurse and so we can only save the singular SCCs as SCCs
1591 // with multiple functions in them will clearly be recursive.
1592 SmallVector<Function *, 16> Worklist;
1593 for (scc_iterator<CallGraph *> I = scc_begin(&CG); !I.isAtEnd(); ++I) {
1594 if (I->size() != 1)
1595 continue;
1597 Function *F = I->front()->getFunction();
1598 if (F && !F->isDeclaration() && !F->doesNotRecurse() &&
1599 F->hasInternalLinkage())
1600 Worklist.push_back(F);
1603 bool Changed = false;
1604 for (auto *F : llvm::reverse(Worklist))
1605 Changed |= addNoRecurseAttrsTopDown(*F);
1607 return Changed;
1610 bool ReversePostOrderFunctionAttrsLegacyPass::runOnModule(Module &M) {
1611 if (skipModule(M))
1612 return false;
1614 auto &CG = getAnalysis<CallGraphWrapperPass>().getCallGraph();
1616 return deduceFunctionAttributeInRPO(M, CG);
1619 PreservedAnalyses
1620 ReversePostOrderFunctionAttrsPass::run(Module &M, ModuleAnalysisManager &AM) {
1621 auto &CG = AM.getResult<CallGraphAnalysis>(M);
1623 if (!deduceFunctionAttributeInRPO(M, CG))
1624 return PreservedAnalyses::all();
1626 PreservedAnalyses PA;
1627 PA.preserve<CallGraphAnalysis>();
1628 return PA;