[docs] Add LICENSE.txt to the root of the mono-repo
[llvm-project.git] / llvm / lib / Transforms / IPO / Attributor.cpp
blobfdbed1b62fdcda7591d69bc04a66f8f1e4de8ef4
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass that deduces and/or propagates
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/Attributor.h"
18 #include "llvm/ADT/PointerIntPair.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/TinyPtrVector.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CallGraphSCCPass.h"
25 #include "llvm/Analysis/InlineCost.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MustExecute.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/Constants.h"
31 #include "llvm/IR/GlobalValue.h"
32 #include "llvm/IR/GlobalVariable.h"
33 #include "llvm/IR/Instruction.h"
34 #include "llvm/IR/Instructions.h"
35 #include "llvm/IR/IntrinsicInst.h"
36 #include "llvm/IR/ValueHandle.h"
37 #include "llvm/InitializePasses.h"
38 #include "llvm/Support/Casting.h"
39 #include "llvm/Support/CommandLine.h"
40 #include "llvm/Support/Debug.h"
41 #include "llvm/Support/DebugCounter.h"
42 #include "llvm/Support/FileSystem.h"
43 #include "llvm/Support/GraphWriter.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
46 #include "llvm/Transforms/Utils/Cloning.h"
47 #include "llvm/Transforms/Utils/Local.h"
49 #ifdef EXPENSIVE_CHECKS
50 #include "llvm/IR/Verifier.h"
51 #endif
53 #include <cassert>
54 #include <string>
56 using namespace llvm;
58 #define DEBUG_TYPE "attributor"
60 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
61 "Determine what attributes are manifested in the IR");
63 STATISTIC(NumFnDeleted, "Number of function deleted");
64 STATISTIC(NumFnWithExactDefinition,
65 "Number of functions with exact definitions");
66 STATISTIC(NumFnWithoutExactDefinition,
67 "Number of functions without exact definitions");
68 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
69 STATISTIC(NumAttributesTimedOut,
70 "Number of abstract attributes timed out before fixpoint");
71 STATISTIC(NumAttributesValidFixpoint,
72 "Number of abstract attributes in a valid fixpoint state");
73 STATISTIC(NumAttributesManifested,
74 "Number of abstract attributes manifested in IR");
76 // TODO: Determine a good default value.
78 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
79 // (when run with the first 5 abstract attributes). The results also indicate
80 // that we never reach 32 iterations but always find a fixpoint sooner.
82 // This will become more evolved once we perform two interleaved fixpoint
83 // iterations: bottom-up and top-down.
84 static cl::opt<unsigned>
85 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
86 cl::desc("Maximal number of fixpoint iterations."),
87 cl::init(32));
89 static cl::opt<unsigned, true> MaxInitializationChainLengthX(
90 "attributor-max-initialization-chain-length", cl::Hidden,
91 cl::desc(
92 "Maximal number of chained initializations (to avoid stack overflows)"),
93 cl::location(MaxInitializationChainLength), cl::init(1024));
94 unsigned llvm::MaxInitializationChainLength;
96 static cl::opt<bool> VerifyMaxFixpointIterations(
97 "attributor-max-iterations-verify", cl::Hidden,
98 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
99 cl::init(false));
101 static cl::opt<bool> AnnotateDeclarationCallSites(
102 "attributor-annotate-decl-cs", cl::Hidden,
103 cl::desc("Annotate call sites of function declarations."), cl::init(false));
105 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
106 cl::init(true), cl::Hidden);
108 static cl::opt<bool>
109 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
110 cl::desc("Allow the Attributor to create shallow "
111 "wrappers for non-exact definitions."),
112 cl::init(false));
114 static cl::opt<bool>
115 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
116 cl::desc("Allow the Attributor to use IP information "
117 "derived from non-exact functions via cloning"),
118 cl::init(false));
120 // These options can only used for debug builds.
121 #ifndef NDEBUG
122 static cl::list<std::string>
123 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
124 cl::desc("Comma seperated list of attribute names that are "
125 "allowed to be seeded."),
126 cl::CommaSeparated);
128 static cl::list<std::string> FunctionSeedAllowList(
129 "attributor-function-seed-allow-list", cl::Hidden,
130 cl::desc("Comma seperated list of function names that are "
131 "allowed to be seeded."),
132 cl::CommaSeparated);
133 #endif
135 static cl::opt<bool>
136 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
137 cl::desc("Dump the dependency graph to dot files."),
138 cl::init(false));
140 static cl::opt<std::string> DepGraphDotFileNamePrefix(
141 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
142 cl::desc("The prefix used for the CallGraph dot file names."));
144 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
145 cl::desc("View the dependency graph."),
146 cl::init(false));
148 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
149 cl::desc("Print attribute dependencies"),
150 cl::init(false));
152 static cl::opt<bool> EnableCallSiteSpecific(
153 "attributor-enable-call-site-specific-deduction", cl::Hidden,
154 cl::desc("Allow the Attributor to do call site specific analysis"),
155 cl::init(false));
157 static cl::opt<bool>
158 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
159 cl::desc("Print Attributor's internal call graph"),
160 cl::init(false));
162 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
163 cl::Hidden,
164 cl::desc("Try to simplify all loads."),
165 cl::init(true));
167 /// Logic operators for the change status enum class.
169 ///{
170 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
171 return L == ChangeStatus::CHANGED ? L : R;
173 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
174 L = L | R;
175 return L;
177 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
178 return L == ChangeStatus::UNCHANGED ? L : R;
180 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
181 L = L & R;
182 return L;
184 ///}
186 bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
187 const AbstractAttribute &QueryingAA) {
188 // We are looking for volatile instructions or non-relaxed atomics.
189 if (const auto *CB = dyn_cast<CallBase>(&I)) {
190 if (CB->hasFnAttr(Attribute::NoSync))
191 return true;
193 // Non-convergent and readnone imply nosync.
194 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
195 return true;
197 if (AANoSync::isNoSyncIntrinsic(&I))
198 return true;
200 const auto &NoSyncAA = A.getAAFor<AANoSync>(
201 QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
202 return NoSyncAA.isAssumedNoSync();
205 if (!I.mayReadOrWriteMemory())
206 return true;
208 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
211 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
212 const Value &V, bool ForAnalysisOnly) {
213 // TODO: See the AAInstanceInfo class comment.
214 if (!ForAnalysisOnly)
215 return false;
216 auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
217 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
218 return InstanceInfoAA.isAssumedUniqueForAnalysis();
221 Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty,
222 const TargetLibraryInfo *TLI) {
223 if (isa<AllocaInst>(Obj))
224 return UndefValue::get(&Ty);
225 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
226 return Init;
227 auto *GV = dyn_cast<GlobalVariable>(&Obj);
228 if (!GV)
229 return nullptr;
230 if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
231 return nullptr;
232 if (!GV->hasInitializer())
233 return UndefValue::get(&Ty);
234 return dyn_cast_or_null<Constant>(getWithType(*GV->getInitializer(), Ty));
237 bool AA::isValidInScope(const Value &V, const Function *Scope) {
238 if (isa<Constant>(V))
239 return true;
240 if (auto *I = dyn_cast<Instruction>(&V))
241 return I->getFunction() == Scope;
242 if (auto *A = dyn_cast<Argument>(&V))
243 return A->getParent() == Scope;
244 return false;
247 bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
248 InformationCache &InfoCache) {
249 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
250 return true;
251 const Function *Scope = nullptr;
252 const Instruction *CtxI = VAC.getCtxI();
253 if (CtxI)
254 Scope = CtxI->getFunction();
255 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
256 return A->getParent() == Scope;
257 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
258 if (I->getFunction() == Scope) {
259 if (const DominatorTree *DT =
260 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
261 *Scope))
262 return DT->dominates(I, CtxI);
263 // Local dominance check mostly for the old PM passes.
264 if (CtxI && I->getParent() == CtxI->getParent())
265 return llvm::any_of(
266 make_range(I->getIterator(), I->getParent()->end()),
267 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
270 return false;
273 Value *AA::getWithType(Value &V, Type &Ty) {
274 if (V.getType() == &Ty)
275 return &V;
276 if (isa<PoisonValue>(V))
277 return PoisonValue::get(&Ty);
278 if (isa<UndefValue>(V))
279 return UndefValue::get(&Ty);
280 if (auto *C = dyn_cast<Constant>(&V)) {
281 if (C->isNullValue())
282 return Constant::getNullValue(&Ty);
283 if (C->getType()->isPointerTy() && Ty.isPointerTy())
284 return ConstantExpr::getPointerCast(C, &Ty);
285 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
286 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
287 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
288 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
289 return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
292 return nullptr;
295 Optional<Value *>
296 AA::combineOptionalValuesInAAValueLatice(const Optional<Value *> &A,
297 const Optional<Value *> &B, Type *Ty) {
298 if (A == B)
299 return A;
300 if (!B)
301 return A;
302 if (*B == nullptr)
303 return nullptr;
304 if (!A)
305 return Ty ? getWithType(**B, *Ty) : nullptr;
306 if (*A == nullptr)
307 return nullptr;
308 if (!Ty)
309 Ty = (*A)->getType();
310 if (isa_and_nonnull<UndefValue>(*A))
311 return getWithType(**B, *Ty);
312 if (isa<UndefValue>(*B))
313 return A;
314 if (*A && *B && *A == getWithType(**B, *Ty))
315 return A;
316 return nullptr;
319 template <bool IsLoad, typename Ty>
320 static bool getPotentialCopiesOfMemoryValue(
321 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
322 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
323 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
324 bool OnlyExact) {
325 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
326 << " (only exact: " << OnlyExact << ")\n";);
328 Value &Ptr = *I.getPointerOperand();
329 SmallSetVector<Value *, 8> Objects;
330 if (!AA::getAssumedUnderlyingObjects(A, Ptr, Objects, QueryingAA, &I,
331 UsedAssumedInformation)) {
332 LLVM_DEBUG(
333 dbgs() << "Underlying objects stored into could not be determined\n";);
334 return false;
337 // Containers to remember the pointer infos and new copies while we are not
338 // sure that we can find all of them. If we abort we want to avoid spurious
339 // dependences and potential copies in the provided container.
340 SmallVector<const AAPointerInfo *> PIs;
341 SmallVector<Value *> NewCopies;
342 SmallVector<Instruction *> NewCopyOrigins;
344 const auto *TLI =
345 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
346 LLVM_DEBUG(dbgs() << "Visit " << Objects.size() << " objects:\n");
347 for (Value *Obj : Objects) {
348 LLVM_DEBUG(dbgs() << "Visit underlying object " << *Obj << "\n");
349 if (isa<UndefValue>(Obj))
350 continue;
351 if (isa<ConstantPointerNull>(Obj)) {
352 // A null pointer access can be undefined but any offset from null may
353 // be OK. We do not try to optimize the latter.
354 if (!NullPointerIsDefined(I.getFunction(),
355 Ptr.getType()->getPointerAddressSpace()) &&
356 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
357 AA::Interprocedural) == Obj)
358 continue;
359 LLVM_DEBUG(
360 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
361 return false;
363 // TODO: Use assumed noalias return.
364 if (!isa<AllocaInst>(Obj) && !isa<GlobalVariable>(Obj) &&
365 !(IsLoad ? isAllocationFn(Obj, TLI) : isNoAliasCall(Obj))) {
366 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << *Obj
367 << "\n";);
368 return false;
370 if (auto *GV = dyn_cast<GlobalVariable>(Obj))
371 if (!GV->hasLocalLinkage() &&
372 !(GV->isConstant() && GV->hasInitializer())) {
373 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
374 "linkage, not supported yet: "
375 << *Obj << "\n";);
376 return false;
379 bool NullOnly = true;
380 bool NullRequired = false;
381 auto CheckForNullOnlyAndUndef = [&](Optional<Value *> V, bool IsExact) {
382 if (!V || *V == nullptr)
383 NullOnly = false;
384 else if (isa<UndefValue>(*V))
385 /* No op */;
386 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
387 NullRequired = !IsExact;
388 else
389 NullOnly = false;
392 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
393 if ((IsLoad && !Acc.isWrite()) || (!IsLoad && !Acc.isRead()))
394 return true;
395 if (IsLoad && Acc.isWrittenValueYetUndetermined())
396 return true;
397 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
398 if (OnlyExact && !IsExact && !NullOnly &&
399 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
400 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
401 << ", abort!\n");
402 return false;
404 if (NullRequired && !NullOnly) {
405 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
406 "one, however found non-null one: "
407 << *Acc.getRemoteInst() << ", abort!\n");
408 return false;
410 if (IsLoad) {
411 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
412 if (!Acc.isWrittenValueUnknown()) {
413 NewCopies.push_back(Acc.getWrittenValue());
414 NewCopyOrigins.push_back(Acc.getRemoteInst());
415 return true;
417 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
418 if (!SI) {
419 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
420 "instruction not supported yet: "
421 << *Acc.getRemoteInst() << "\n";);
422 return false;
424 NewCopies.push_back(SI->getValueOperand());
425 NewCopyOrigins.push_back(SI);
426 } else {
427 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
428 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
429 if (!LI && OnlyExact) {
430 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
431 "instruction not supported yet: "
432 << *Acc.getRemoteInst() << "\n";);
433 return false;
435 NewCopies.push_back(Acc.getRemoteInst());
437 return true;
440 // If the value has been written to we don't need the initial value of the
441 // object.
442 bool HasBeenWrittenTo = false;
444 auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(*Obj),
445 DepClassTy::NONE);
446 if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess,
447 HasBeenWrittenTo)) {
448 LLVM_DEBUG(
449 dbgs()
450 << "Failed to verify all interfering accesses for underlying object: "
451 << *Obj << "\n");
452 return false;
455 if (IsLoad && !HasBeenWrittenTo) {
456 Value *InitialValue = AA::getInitialValueForObj(*Obj, *I.getType(), TLI);
457 if (!InitialValue)
458 return false;
459 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
460 if (NullRequired && !NullOnly) {
461 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
462 "null or undef, abort!\n");
463 return false;
466 NewCopies.push_back(InitialValue);
467 NewCopyOrigins.push_back(nullptr);
470 PIs.push_back(&PI);
473 // Only if we were successful collection all potential copies we record
474 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
475 // given PotentialCopies container.
476 for (const auto *PI : PIs) {
477 if (!PI->getState().isAtFixpoint())
478 UsedAssumedInformation = true;
479 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
481 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
482 PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
484 return true;
487 bool AA::getPotentiallyLoadedValues(
488 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
489 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
490 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
491 bool OnlyExact) {
492 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
493 A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
494 UsedAssumedInformation, OnlyExact);
497 bool AA::getPotentialCopiesOfStoredValue(
498 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
499 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
500 bool OnlyExact) {
501 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
502 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
503 A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
504 UsedAssumedInformation, OnlyExact);
507 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
508 const AbstractAttribute &QueryingAA,
509 bool RequireReadNone, bool &IsKnown) {
511 IRPosition::Kind Kind = IRP.getPositionKind();
512 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
513 const auto &MemLocAA =
514 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
515 if (MemLocAA.isAssumedReadNone()) {
516 IsKnown = MemLocAA.isKnownReadNone();
517 if (!IsKnown)
518 A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
519 return true;
523 const auto &MemBehaviorAA =
524 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
525 if (MemBehaviorAA.isAssumedReadNone() ||
526 (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) {
527 IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone()
528 : MemBehaviorAA.isKnownReadOnly();
529 if (!IsKnown)
530 A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
531 return true;
534 return false;
537 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
538 const AbstractAttribute &QueryingAA, bool &IsKnown) {
539 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
540 /* RequireReadNone */ false, IsKnown);
542 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
543 const AbstractAttribute &QueryingAA, bool &IsKnown) {
544 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
545 /* RequireReadNone */ true, IsKnown);
548 static bool
549 isPotentiallyReachable(Attributor &A, const Instruction &FromI,
550 const Instruction *ToI, const Function &ToFn,
551 const AbstractAttribute &QueryingAA,
552 std::function<bool(const Function &F)> GoBackwardsCB) {
553 LLVM_DEBUG(dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName()
554 << " from " << FromI << " [GBCB: " << bool(GoBackwardsCB)
555 << "]\n");
557 // TODO: If we can go arbitrarily backwards we will eventually reach an
558 // entry point that can reach ToI. Only once this takes a set of blocks
559 // through which we cannot go, or once we track internal functions not
560 // accessible from the outside, it makes sense to perform backwards analysis
561 // in the absence of a GoBackwardsCB.
562 if (!GoBackwardsCB) {
563 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
564 << " is not checked backwards, abort\n");
565 return true;
568 SmallPtrSet<const Instruction *, 8> Visited;
569 SmallVector<const Instruction *> Worklist;
570 Worklist.push_back(&FromI);
572 while (!Worklist.empty()) {
573 const Instruction *CurFromI = Worklist.pop_back_val();
574 if (!Visited.insert(CurFromI).second)
575 continue;
577 const Function *FromFn = CurFromI->getFunction();
578 if (FromFn == &ToFn) {
579 if (!ToI)
580 return true;
581 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
582 << " intraprocedurally\n");
583 const auto &ReachabilityAA = A.getAAFor<AAReachability>(
584 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
585 bool Result = ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI);
586 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
587 << (Result ? "can potentially " : "cannot ") << "reach "
588 << *ToI << " [Intra]\n");
589 if (Result)
590 return true;
593 // Check if the current instruction is already known to reach the ToFn.
594 const auto &FnReachabilityAA = A.getAAFor<AAFunctionReachability>(
595 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
596 bool Result = FnReachabilityAA.instructionCanReach(
597 A, *CurFromI, ToFn);
598 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
599 << " " << (Result ? "can potentially " : "cannot ")
600 << "reach @" << ToFn.getName() << " [FromFn]\n");
601 if (Result)
602 return true;
604 // If we do not go backwards from the FromFn we are done here and so far we
605 // could not find a way to reach ToFn/ToI.
606 if (!GoBackwardsCB(*FromFn))
607 continue;
609 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
610 << FromFn->getName() << "\n");
612 auto CheckCallSite = [&](AbstractCallSite ACS) {
613 CallBase *CB = ACS.getInstruction();
614 if (!CB)
615 return false;
617 if (isa<InvokeInst>(CB))
618 return false;
620 Instruction *Inst = CB->getNextNonDebugInstruction();
621 Worklist.push_back(Inst);
622 return true;
625 bool UsedAssumedInformation = false;
626 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
627 /* RequireAllCallSites */ true,
628 &QueryingAA, UsedAssumedInformation);
629 if (Result) {
630 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
631 << " in @" << FromFn->getName()
632 << " failed, give up\n");
633 return true;
636 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
637 << " in @" << FromFn->getName()
638 << " worklist size is: " << Worklist.size() << "\n");
640 return false;
643 bool AA::isPotentiallyReachable(
644 Attributor &A, const Instruction &FromI, const Instruction &ToI,
645 const AbstractAttribute &QueryingAA,
646 std::function<bool(const Function &F)> GoBackwardsCB) {
647 LLVM_DEBUG(dbgs() << "[AA] isPotentiallyReachable " << ToI << " from "
648 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "]\n");
649 const Function *ToFn = ToI.getFunction();
650 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
651 GoBackwardsCB);
654 bool AA::isPotentiallyReachable(
655 Attributor &A, const Instruction &FromI, const Function &ToFn,
656 const AbstractAttribute &QueryingAA,
657 std::function<bool(const Function &F)> GoBackwardsCB) {
658 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
659 GoBackwardsCB);
662 /// Return true if \p New is equal or worse than \p Old.
663 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
664 if (!Old.isIntAttribute())
665 return true;
667 return Old.getValueAsInt() >= New.getValueAsInt();
670 /// Return true if the information provided by \p Attr was added to the
671 /// attribute list \p Attrs. This is only the case if it was not already present
672 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
673 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
674 AttributeList &Attrs, int AttrIdx,
675 bool ForceReplace = false) {
677 if (Attr.isEnumAttribute()) {
678 Attribute::AttrKind Kind = Attr.getKindAsEnum();
679 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
680 if (!ForceReplace &&
681 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
682 return false;
683 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
684 return true;
686 if (Attr.isStringAttribute()) {
687 StringRef Kind = Attr.getKindAsString();
688 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
689 if (!ForceReplace &&
690 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
691 return false;
692 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
693 return true;
695 if (Attr.isIntAttribute()) {
696 Attribute::AttrKind Kind = Attr.getKindAsEnum();
697 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
698 if (!ForceReplace &&
699 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
700 return false;
701 Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind);
702 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
703 return true;
706 llvm_unreachable("Expected enum or string attribute!");
709 Argument *IRPosition::getAssociatedArgument() const {
710 if (getPositionKind() == IRP_ARGUMENT)
711 return cast<Argument>(&getAnchorValue());
713 // Not an Argument and no argument number means this is not a call site
714 // argument, thus we cannot find a callback argument to return.
715 int ArgNo = getCallSiteArgNo();
716 if (ArgNo < 0)
717 return nullptr;
719 // Use abstract call sites to make the connection between the call site
720 // values and the ones in callbacks. If a callback was found that makes use
721 // of the underlying call site operand, we want the corresponding callback
722 // callee argument and not the direct callee argument.
723 Optional<Argument *> CBCandidateArg;
724 SmallVector<const Use *, 4> CallbackUses;
725 const auto &CB = cast<CallBase>(getAnchorValue());
726 AbstractCallSite::getCallbackUses(CB, CallbackUses);
727 for (const Use *U : CallbackUses) {
728 AbstractCallSite ACS(U);
729 assert(ACS && ACS.isCallbackCall());
730 if (!ACS.getCalledFunction())
731 continue;
733 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
735 // Test if the underlying call site operand is argument number u of the
736 // callback callee.
737 if (ACS.getCallArgOperandNo(u) != ArgNo)
738 continue;
740 assert(ACS.getCalledFunction()->arg_size() > u &&
741 "ACS mapped into var-args arguments!");
742 if (CBCandidateArg) {
743 CBCandidateArg = nullptr;
744 break;
746 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
750 // If we found a unique callback candidate argument, return it.
751 if (CBCandidateArg && CBCandidateArg.value())
752 return CBCandidateArg.value();
754 // If no callbacks were found, or none used the underlying call site operand
755 // exclusively, use the direct callee argument if available.
756 const Function *Callee = CB.getCalledFunction();
757 if (Callee && Callee->arg_size() > unsigned(ArgNo))
758 return Callee->getArg(ArgNo);
760 return nullptr;
763 ChangeStatus AbstractAttribute::update(Attributor &A) {
764 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
765 if (getState().isAtFixpoint())
766 return HasChanged;
768 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
770 HasChanged = updateImpl(A);
772 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
773 << "\n");
775 return HasChanged;
778 ChangeStatus
779 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
780 const ArrayRef<Attribute> &DeducedAttrs,
781 bool ForceReplace) {
782 Function *ScopeFn = IRP.getAnchorScope();
783 IRPosition::Kind PK = IRP.getPositionKind();
785 // In the following some generic code that will manifest attributes in
786 // DeducedAttrs if they improve the current IR. Due to the different
787 // annotation positions we use the underlying AttributeList interface.
789 AttributeList Attrs;
790 switch (PK) {
791 case IRPosition::IRP_INVALID:
792 case IRPosition::IRP_FLOAT:
793 return ChangeStatus::UNCHANGED;
794 case IRPosition::IRP_ARGUMENT:
795 case IRPosition::IRP_FUNCTION:
796 case IRPosition::IRP_RETURNED:
797 Attrs = ScopeFn->getAttributes();
798 break;
799 case IRPosition::IRP_CALL_SITE:
800 case IRPosition::IRP_CALL_SITE_RETURNED:
801 case IRPosition::IRP_CALL_SITE_ARGUMENT:
802 Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
803 break;
806 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
807 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
808 for (const Attribute &Attr : DeducedAttrs) {
809 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace))
810 continue;
812 HasChanged = ChangeStatus::CHANGED;
815 if (HasChanged == ChangeStatus::UNCHANGED)
816 return HasChanged;
818 switch (PK) {
819 case IRPosition::IRP_ARGUMENT:
820 case IRPosition::IRP_FUNCTION:
821 case IRPosition::IRP_RETURNED:
822 ScopeFn->setAttributes(Attrs);
823 break;
824 case IRPosition::IRP_CALL_SITE:
825 case IRPosition::IRP_CALL_SITE_RETURNED:
826 case IRPosition::IRP_CALL_SITE_ARGUMENT:
827 cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
828 break;
829 case IRPosition::IRP_INVALID:
830 case IRPosition::IRP_FLOAT:
831 break;
834 return HasChanged;
837 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
838 const IRPosition
839 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
841 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
842 IRPositions.emplace_back(IRP);
844 // Helper to determine if operand bundles on a call site are benin or
845 // potentially problematic. We handle only llvm.assume for now.
846 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
847 return (isa<IntrinsicInst>(CB) &&
848 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
851 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
852 switch (IRP.getPositionKind()) {
853 case IRPosition::IRP_INVALID:
854 case IRPosition::IRP_FLOAT:
855 case IRPosition::IRP_FUNCTION:
856 return;
857 case IRPosition::IRP_ARGUMENT:
858 case IRPosition::IRP_RETURNED:
859 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
860 return;
861 case IRPosition::IRP_CALL_SITE:
862 assert(CB && "Expected call site!");
863 // TODO: We need to look at the operand bundles similar to the redirection
864 // in CallBase.
865 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
866 if (const Function *Callee = CB->getCalledFunction())
867 IRPositions.emplace_back(IRPosition::function(*Callee));
868 return;
869 case IRPosition::IRP_CALL_SITE_RETURNED:
870 assert(CB && "Expected call site!");
871 // TODO: We need to look at the operand bundles similar to the redirection
872 // in CallBase.
873 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
874 if (const Function *Callee = CB->getCalledFunction()) {
875 IRPositions.emplace_back(IRPosition::returned(*Callee));
876 IRPositions.emplace_back(IRPosition::function(*Callee));
877 for (const Argument &Arg : Callee->args())
878 if (Arg.hasReturnedAttr()) {
879 IRPositions.emplace_back(
880 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
881 IRPositions.emplace_back(
882 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
883 IRPositions.emplace_back(IRPosition::argument(Arg));
887 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
888 return;
889 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
890 assert(CB && "Expected call site!");
891 // TODO: We need to look at the operand bundles similar to the redirection
892 // in CallBase.
893 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
894 const Function *Callee = CB->getCalledFunction();
895 if (Callee) {
896 if (Argument *Arg = IRP.getAssociatedArgument())
897 IRPositions.emplace_back(IRPosition::argument(*Arg));
898 IRPositions.emplace_back(IRPosition::function(*Callee));
901 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
902 return;
907 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
908 bool IgnoreSubsumingPositions, Attributor *A) const {
909 SmallVector<Attribute, 4> Attrs;
910 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
911 for (Attribute::AttrKind AK : AKs)
912 if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
913 return true;
914 // The first position returned by the SubsumingPositionIterator is
915 // always the position itself. If we ignore subsuming positions we
916 // are done after the first iteration.
917 if (IgnoreSubsumingPositions)
918 break;
920 if (A)
921 for (Attribute::AttrKind AK : AKs)
922 if (getAttrsFromAssumes(AK, Attrs, *A))
923 return true;
924 return false;
927 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
928 SmallVectorImpl<Attribute> &Attrs,
929 bool IgnoreSubsumingPositions, Attributor *A) const {
930 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
931 for (Attribute::AttrKind AK : AKs)
932 EquivIRP.getAttrsFromIRAttr(AK, Attrs);
933 // The first position returned by the SubsumingPositionIterator is
934 // always the position itself. If we ignore subsuming positions we
935 // are done after the first iteration.
936 if (IgnoreSubsumingPositions)
937 break;
939 if (A)
940 for (Attribute::AttrKind AK : AKs)
941 getAttrsFromAssumes(AK, Attrs, *A);
944 bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
945 SmallVectorImpl<Attribute> &Attrs) const {
946 if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
947 return false;
949 AttributeList AttrList;
950 if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
951 AttrList = CB->getAttributes();
952 else
953 AttrList = getAssociatedFunction()->getAttributes();
955 bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK);
956 if (HasAttr)
957 Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK));
958 return HasAttr;
961 bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK,
962 SmallVectorImpl<Attribute> &Attrs,
963 Attributor &A) const {
964 assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!");
965 Value &AssociatedValue = getAssociatedValue();
967 const Assume2KnowledgeMap &A2K =
968 A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
970 // Check if we found any potential assume use, if not we don't need to create
971 // explorer iterators.
972 if (A2K.empty())
973 return false;
975 LLVMContext &Ctx = AssociatedValue.getContext();
976 unsigned AttrsSize = Attrs.size();
977 MustBeExecutedContextExplorer &Explorer =
978 A.getInfoCache().getMustBeExecutedContextExplorer();
979 auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI());
980 for (const auto &It : A2K)
981 if (Explorer.findInContextOf(It.first, EIt, EEnd))
982 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
983 return AttrsSize != Attrs.size();
986 void IRPosition::verify() {
987 #ifdef EXPENSIVE_CHECKS
988 switch (getPositionKind()) {
989 case IRP_INVALID:
990 assert((CBContext == nullptr) &&
991 "Invalid position must not have CallBaseContext!");
992 assert(!Enc.getOpaqueValue() &&
993 "Expected a nullptr for an invalid position!");
994 return;
995 case IRP_FLOAT:
996 assert((!isa<Argument>(&getAssociatedValue())) &&
997 "Expected specialized kind for argument values!");
998 return;
999 case IRP_RETURNED:
1000 assert(isa<Function>(getAsValuePtr()) &&
1001 "Expected function for a 'returned' position!");
1002 assert(getAsValuePtr() == &getAssociatedValue() &&
1003 "Associated value mismatch!");
1004 return;
1005 case IRP_CALL_SITE_RETURNED:
1006 assert((CBContext == nullptr) &&
1007 "'call site returned' position must not have CallBaseContext!");
1008 assert((isa<CallBase>(getAsValuePtr())) &&
1009 "Expected call base for 'call site returned' position!");
1010 assert(getAsValuePtr() == &getAssociatedValue() &&
1011 "Associated value mismatch!");
1012 return;
1013 case IRP_CALL_SITE:
1014 assert((CBContext == nullptr) &&
1015 "'call site function' position must not have CallBaseContext!");
1016 assert((isa<CallBase>(getAsValuePtr())) &&
1017 "Expected call base for 'call site function' position!");
1018 assert(getAsValuePtr() == &getAssociatedValue() &&
1019 "Associated value mismatch!");
1020 return;
1021 case IRP_FUNCTION:
1022 assert(isa<Function>(getAsValuePtr()) &&
1023 "Expected function for a 'function' position!");
1024 assert(getAsValuePtr() == &getAssociatedValue() &&
1025 "Associated value mismatch!");
1026 return;
1027 case IRP_ARGUMENT:
1028 assert(isa<Argument>(getAsValuePtr()) &&
1029 "Expected argument for a 'argument' position!");
1030 assert(getAsValuePtr() == &getAssociatedValue() &&
1031 "Associated value mismatch!");
1032 return;
1033 case IRP_CALL_SITE_ARGUMENT: {
1034 assert((CBContext == nullptr) &&
1035 "'call site argument' position must not have CallBaseContext!");
1036 Use *U = getAsUsePtr();
1037 (void)U; // Silence unused variable warning.
1038 assert(U && "Expected use for a 'call site argument' position!");
1039 assert(isa<CallBase>(U->getUser()) &&
1040 "Expected call base user for a 'call site argument' position!");
1041 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1042 "Expected call base argument operand for a 'call site argument' "
1043 "position");
1044 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1045 unsigned(getCallSiteArgNo()) &&
1046 "Argument number mismatch!");
1047 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1048 return;
1051 #endif
1054 Optional<Constant *>
1055 Attributor::getAssumedConstant(const IRPosition &IRP,
1056 const AbstractAttribute &AA,
1057 bool &UsedAssumedInformation) {
1058 // First check all callbacks provided by outside AAs. If any of them returns
1059 // a non-null value that is different from the associated value, or None, we
1060 // assume it's simplified.
1061 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1062 Optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1063 if (!SimplifiedV)
1064 return llvm::None;
1065 if (isa_and_nonnull<Constant>(*SimplifiedV))
1066 return cast<Constant>(*SimplifiedV);
1067 return nullptr;
1069 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1070 return C;
1071 SmallVector<AA::ValueAndContext> Values;
1072 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1073 AA::ValueScope::Interprocedural,
1074 UsedAssumedInformation)) {
1075 if (Values.empty())
1076 return llvm::None;
1077 if (auto *C = dyn_cast_or_null<Constant>(
1078 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1079 return C;
1081 return nullptr;
1084 Optional<Value *> Attributor::getAssumedSimplified(const IRPosition &IRP,
1085 const AbstractAttribute *AA,
1086 bool &UsedAssumedInformation,
1087 AA::ValueScope S) {
1088 // First check all callbacks provided by outside AAs. If any of them returns
1089 // a non-null value that is different from the associated value, or None, we
1090 // assume it's simplified.
1091 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1092 return CB(IRP, AA, UsedAssumedInformation);
1094 SmallVector<AA::ValueAndContext> Values;
1095 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1096 return &IRP.getAssociatedValue();
1097 if (Values.empty())
1098 return llvm::None;
1099 if (AA)
1100 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1101 return V;
1102 if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1103 IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1104 return nullptr;
1105 return &IRP.getAssociatedValue();
1108 bool Attributor::getAssumedSimplifiedValues(
1109 const IRPosition &IRP, const AbstractAttribute *AA,
1110 SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1111 bool &UsedAssumedInformation) {
1112 // First check all callbacks provided by outside AAs. If any of them returns
1113 // a non-null value that is different from the associated value, or None, we
1114 // assume it's simplified.
1115 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1116 for (const auto &CB : SimplificationCBs) {
1117 Optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1118 if (!CBResult.has_value())
1119 continue;
1120 Value *V = CBResult.value();
1121 if (!V)
1122 return false;
1123 if ((S & AA::ValueScope::Interprocedural) ||
1124 AA::isValidInScope(*V, IRP.getAnchorScope()))
1125 Values.push_back(AA::ValueAndContext{*V, nullptr});
1126 else
1127 return false;
1129 if (!SimplificationCBs.empty())
1130 return true;
1132 // If no high-level/outside simplification occurred, use AAPotentialValues.
1133 const auto &PotentialValuesAA =
1134 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1135 if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
1136 return false;
1137 UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
1138 return true;
1141 Optional<Value *> Attributor::translateArgumentToCallSiteContent(
1142 Optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1143 bool &UsedAssumedInformation) {
1144 if (!V)
1145 return V;
1146 if (*V == nullptr || isa<Constant>(*V))
1147 return V;
1148 if (auto *Arg = dyn_cast<Argument>(*V))
1149 if (CB.getCalledFunction() == Arg->getParent())
1150 if (!Arg->hasPointeeInMemoryValueAttr())
1151 return getAssumedSimplified(
1152 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1153 UsedAssumedInformation, AA::Intraprocedural);
1154 return nullptr;
1157 Attributor::~Attributor() {
1158 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1159 // thus we cannot delete them. We can, and want to, destruct them though.
1160 for (auto &DepAA : DG.SyntheticRoot.Deps) {
1161 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
1162 AA->~AbstractAttribute();
1166 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1167 const AAIsDead *FnLivenessAA,
1168 bool &UsedAssumedInformation,
1169 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1170 const IRPosition &IRP = AA.getIRPosition();
1171 if (!Functions.count(IRP.getAnchorScope()))
1172 return false;
1173 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1174 CheckBBLivenessOnly, DepClass);
1177 bool Attributor::isAssumedDead(const Use &U,
1178 const AbstractAttribute *QueryingAA,
1179 const AAIsDead *FnLivenessAA,
1180 bool &UsedAssumedInformation,
1181 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1182 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1183 if (!UserI)
1184 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1185 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1187 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1188 // For call site argument uses we can check if the argument is
1189 // unused/dead.
1190 if (CB->isArgOperand(&U)) {
1191 const IRPosition &CSArgPos =
1192 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1193 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1194 UsedAssumedInformation, CheckBBLivenessOnly,
1195 DepClass);
1197 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1198 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1199 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1200 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1201 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1202 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1203 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1204 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1205 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1206 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1207 const IRPosition IRP = IRPosition::inst(*SI);
1208 const AAIsDead &IsDeadAA =
1209 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1210 if (IsDeadAA.isRemovableStore()) {
1211 if (QueryingAA)
1212 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1213 if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
1214 UsedAssumedInformation = true;
1215 return true;
1220 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1221 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1224 bool Attributor::isAssumedDead(const Instruction &I,
1225 const AbstractAttribute *QueryingAA,
1226 const AAIsDead *FnLivenessAA,
1227 bool &UsedAssumedInformation,
1228 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1229 const IRPosition::CallBaseContext *CBCtx =
1230 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1232 if (ManifestAddedBlocks.contains(I.getParent()))
1233 return false;
1235 if (!FnLivenessAA)
1236 FnLivenessAA =
1237 lookupAAFor<AAIsDead>(IRPosition::function(*I.getFunction(), CBCtx),
1238 QueryingAA, DepClassTy::NONE);
1240 // If we have a context instruction and a liveness AA we use it.
1241 if (FnLivenessAA &&
1242 FnLivenessAA->getIRPosition().getAnchorScope() == I.getFunction() &&
1243 (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1244 : FnLivenessAA->isAssumedDead(&I))) {
1245 if (QueryingAA)
1246 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1247 if (!FnLivenessAA->isKnownDead(&I))
1248 UsedAssumedInformation = true;
1249 return true;
1252 if (CheckBBLivenessOnly)
1253 return false;
1255 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1256 const AAIsDead &IsDeadAA =
1257 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1258 // Don't check liveness for AAIsDead.
1259 if (QueryingAA == &IsDeadAA)
1260 return false;
1262 if (IsDeadAA.isAssumedDead()) {
1263 if (QueryingAA)
1264 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1265 if (!IsDeadAA.isKnownDead())
1266 UsedAssumedInformation = true;
1267 return true;
1270 return false;
1273 bool Attributor::isAssumedDead(const IRPosition &IRP,
1274 const AbstractAttribute *QueryingAA,
1275 const AAIsDead *FnLivenessAA,
1276 bool &UsedAssumedInformation,
1277 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1278 Instruction *CtxI = IRP.getCtxI();
1279 if (CtxI &&
1280 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1281 /* CheckBBLivenessOnly */ true,
1282 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1283 return true;
1285 if (CheckBBLivenessOnly)
1286 return false;
1288 // If we haven't succeeded we query the specific liveness info for the IRP.
1289 const AAIsDead *IsDeadAA;
1290 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1291 IsDeadAA = &getOrCreateAAFor<AAIsDead>(
1292 IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
1293 QueryingAA, DepClassTy::NONE);
1294 else
1295 IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1296 // Don't check liveness for AAIsDead.
1297 if (QueryingAA == IsDeadAA)
1298 return false;
1300 if (IsDeadAA->isAssumedDead()) {
1301 if (QueryingAA)
1302 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1303 if (!IsDeadAA->isKnownDead())
1304 UsedAssumedInformation = true;
1305 return true;
1308 return false;
1311 bool Attributor::isAssumedDead(const BasicBlock &BB,
1312 const AbstractAttribute *QueryingAA,
1313 const AAIsDead *FnLivenessAA,
1314 DepClassTy DepClass) {
1315 if (!FnLivenessAA)
1316 FnLivenessAA = lookupAAFor<AAIsDead>(IRPosition::function(*BB.getParent()),
1317 QueryingAA, DepClassTy::NONE);
1318 if (FnLivenessAA->isAssumedDead(&BB)) {
1319 if (QueryingAA)
1320 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1321 return true;
1324 return false;
1327 bool Attributor::checkForAllUses(
1328 function_ref<bool(const Use &, bool &)> Pred,
1329 const AbstractAttribute &QueryingAA, const Value &V,
1330 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1331 bool IgnoreDroppableUses,
1332 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1334 // Check the trivial case first as it catches void values.
1335 if (V.use_empty())
1336 return true;
1338 const IRPosition &IRP = QueryingAA.getIRPosition();
1339 SmallVector<const Use *, 16> Worklist;
1340 SmallPtrSet<const Use *, 16> Visited;
1342 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1343 for (const Use &UU : V.uses()) {
1344 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1345 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1346 "rejected by the equivalence call back: "
1347 << *UU << "!\n");
1348 return false;
1351 Worklist.push_back(&UU);
1353 return true;
1356 AddUsers(V, /* OldUse */ nullptr);
1358 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1359 << " initial uses to check\n");
1361 const Function *ScopeFn = IRP.getAnchorScope();
1362 const auto *LivenessAA =
1363 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1364 DepClassTy::NONE)
1365 : nullptr;
1367 while (!Worklist.empty()) {
1368 const Use *U = Worklist.pop_back_val();
1369 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1370 continue;
1371 LLVM_DEBUG({
1372 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1373 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1374 << "\n";
1375 else
1376 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1377 << "\n";
1379 bool UsedAssumedInformation = false;
1380 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1381 CheckBBLivenessOnly, LivenessDepClass)) {
1382 LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
1383 continue;
1385 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1386 LLVM_DEBUG(dbgs() << "[Attributor] Droppable user, skip!\n");
1387 continue;
1390 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1391 if (&SI->getOperandUse(0) == U) {
1392 if (!Visited.insert(U).second)
1393 continue;
1394 SmallSetVector<Value *, 4> PotentialCopies;
1395 if (AA::getPotentialCopiesOfStoredValue(
1396 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1397 /* OnlyExact */ true)) {
1398 LLVM_DEBUG(dbgs() << "[Attributor] Value is stored, continue with "
1399 << PotentialCopies.size()
1400 << " potential copies instead!\n");
1401 for (Value *PotentialCopy : PotentialCopies)
1402 if (!AddUsers(*PotentialCopy, U))
1403 return false;
1404 continue;
1409 bool Follow = false;
1410 if (!Pred(*U, Follow))
1411 return false;
1412 if (!Follow)
1413 continue;
1415 User &Usr = *U->getUser();
1416 AddUsers(Usr, /* OldUse */ nullptr);
1418 auto *RI = dyn_cast<ReturnInst>(&Usr);
1419 if (!RI)
1420 continue;
1422 Function &F = *RI->getFunction();
1423 auto CallSitePred = [&](AbstractCallSite ACS) {
1424 return AddUsers(*ACS.getInstruction(), U);
1426 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1427 &QueryingAA, UsedAssumedInformation)) {
1428 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1429 "to all call sites: "
1430 << *RI << "\n");
1431 return false;
1435 return true;
1438 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1439 const AbstractAttribute &QueryingAA,
1440 bool RequireAllCallSites,
1441 bool &UsedAssumedInformation) {
1442 // We can try to determine information from
1443 // the call sites. However, this is only possible all call sites are known,
1444 // hence the function has internal linkage.
1445 const IRPosition &IRP = QueryingAA.getIRPosition();
1446 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1447 if (!AssociatedFunction) {
1448 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1449 << "\n");
1450 return false;
1453 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1454 &QueryingAA, UsedAssumedInformation);
1457 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1458 const Function &Fn,
1459 bool RequireAllCallSites,
1460 const AbstractAttribute *QueryingAA,
1461 bool &UsedAssumedInformation) {
1462 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1463 LLVM_DEBUG(
1464 dbgs()
1465 << "[Attributor] Function " << Fn.getName()
1466 << " has no internal linkage, hence not all call sites are known\n");
1467 return false;
1470 SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
1471 for (unsigned u = 0; u < Uses.size(); ++u) {
1472 const Use &U = *Uses[u];
1473 LLVM_DEBUG({
1474 if (auto *Fn = dyn_cast<Function>(U))
1475 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1476 << *U.getUser() << "\n";
1477 else
1478 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1479 << "\n";
1481 if (isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1482 /* CheckBBLivenessOnly */ true)) {
1483 LLVM_DEBUG(dbgs() << "[Attributor] Dead use, skip!\n");
1484 continue;
1486 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1487 if (CE->isCast() && CE->getType()->isPointerTy()) {
1488 LLVM_DEBUG(
1489 dbgs() << "[Attributor] Use, is constant cast expression, add "
1490 << CE->getNumUses()
1491 << " uses of that expression instead!\n");
1492 for (const Use &CEU : CE->uses())
1493 Uses.push_back(&CEU);
1494 continue;
1498 AbstractCallSite ACS(&U);
1499 if (!ACS) {
1500 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1501 << " has non call site use " << *U.get() << " in "
1502 << *U.getUser() << "\n");
1503 // BlockAddress users are allowed.
1504 if (isa<BlockAddress>(U.getUser()))
1505 continue;
1506 return false;
1509 const Use *EffectiveUse =
1510 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1511 if (!ACS.isCallee(EffectiveUse)) {
1512 if (!RequireAllCallSites) {
1513 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1514 << " is not a call of " << Fn.getName()
1515 << ", skip use\n");
1516 continue;
1518 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1519 << " is an invalid use of " << Fn.getName() << "\n");
1520 return false;
1523 // Make sure the arguments that can be matched between the call site and the
1524 // callee argee on their type. It is unlikely they do not and it doesn't
1525 // make sense for all attributes to know/care about this.
1526 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1527 unsigned MinArgsParams =
1528 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1529 for (unsigned u = 0; u < MinArgsParams; ++u) {
1530 Value *CSArgOp = ACS.getCallArgOperand(u);
1531 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1532 LLVM_DEBUG(
1533 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1534 << u << "@" << Fn.getName() << ": "
1535 << *Fn.getArg(u)->getType() << " vs. "
1536 << *ACS.getCallArgOperand(u)->getType() << "\n");
1537 return false;
1541 if (Pred(ACS))
1542 continue;
1544 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1545 << *ACS.getInstruction() << "\n");
1546 return false;
1549 return true;
1552 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1553 // TODO: Maintain a cache of Values that are
1554 // on the pathway from a Argument to a Instruction that would effect the
1555 // liveness/return state etc.
1556 return EnableCallSiteSpecific;
1559 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
1560 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
1561 const AbstractAttribute &QueryingAA) {
1563 const IRPosition &IRP = QueryingAA.getIRPosition();
1564 // Since we need to provide return instructions we have to have an exact
1565 // definition.
1566 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1567 if (!AssociatedFunction)
1568 return false;
1570 // If this is a call site query we use the call site specific return values
1571 // and liveness information.
1572 // TODO: use the function scope once we have call site AAReturnedValues.
1573 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1574 const auto &AARetVal =
1575 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1576 if (!AARetVal.getState().isValidState())
1577 return false;
1579 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
1582 bool Attributor::checkForAllReturnedValues(
1583 function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) {
1585 const IRPosition &IRP = QueryingAA.getIRPosition();
1586 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1587 if (!AssociatedFunction)
1588 return false;
1590 // TODO: use the function scope once we have call site AAReturnedValues.
1591 const IRPosition &QueryIRP = IRPosition::function(
1592 *AssociatedFunction, QueryingAA.getCallBaseContext());
1593 const auto &AARetVal =
1594 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1595 if (!AARetVal.getState().isValidState())
1596 return false;
1598 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
1599 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
1600 return Pred(RV);
1604 static bool checkForAllInstructionsImpl(
1605 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
1606 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
1607 const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
1608 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
1609 bool CheckPotentiallyDead = false) {
1610 for (unsigned Opcode : Opcodes) {
1611 // Check if we have instructions with this opcode at all first.
1612 auto *Insts = OpcodeInstMap.lookup(Opcode);
1613 if (!Insts)
1614 continue;
1616 for (Instruction *I : *Insts) {
1617 // Skip dead instructions.
1618 if (A && !CheckPotentiallyDead &&
1619 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
1620 UsedAssumedInformation, CheckBBLivenessOnly)) {
1621 LLVM_DEBUG(dbgs() << "[Attributor] Instruction " << *I
1622 << " is potentially dead, skip!\n";);
1623 continue;
1626 if (!Pred(*I))
1627 return false;
1630 return true;
1633 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
1634 const Function *Fn,
1635 const AbstractAttribute &QueryingAA,
1636 const ArrayRef<unsigned> &Opcodes,
1637 bool &UsedAssumedInformation,
1638 bool CheckBBLivenessOnly,
1639 bool CheckPotentiallyDead) {
1640 // Since we need to provide instructions we have to have an exact definition.
1641 if (!Fn || Fn->isDeclaration())
1642 return false;
1644 // TODO: use the function scope once we have call site AAReturnedValues.
1645 const IRPosition &QueryIRP = IRPosition::function(*Fn);
1646 const auto *LivenessAA =
1647 (CheckBBLivenessOnly || CheckPotentiallyDead)
1648 ? nullptr
1649 : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
1651 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
1652 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
1653 LivenessAA, Opcodes, UsedAssumedInformation,
1654 CheckBBLivenessOnly, CheckPotentiallyDead))
1655 return false;
1657 return true;
1660 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
1661 const AbstractAttribute &QueryingAA,
1662 const ArrayRef<unsigned> &Opcodes,
1663 bool &UsedAssumedInformation,
1664 bool CheckBBLivenessOnly,
1665 bool CheckPotentiallyDead) {
1666 const IRPosition &IRP = QueryingAA.getIRPosition();
1667 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1668 return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
1669 UsedAssumedInformation, CheckBBLivenessOnly,
1670 CheckPotentiallyDead);
1673 bool Attributor::checkForAllReadWriteInstructions(
1674 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
1675 bool &UsedAssumedInformation) {
1677 const Function *AssociatedFunction =
1678 QueryingAA.getIRPosition().getAssociatedFunction();
1679 if (!AssociatedFunction)
1680 return false;
1682 // TODO: use the function scope once we have call site AAReturnedValues.
1683 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1684 const auto &LivenessAA =
1685 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
1687 for (Instruction *I :
1688 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
1689 // Skip dead instructions.
1690 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA,
1691 UsedAssumedInformation))
1692 continue;
1694 if (!Pred(*I))
1695 return false;
1698 return true;
1701 void Attributor::runTillFixpoint() {
1702 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
1703 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
1704 << DG.SyntheticRoot.Deps.size()
1705 << " abstract attributes.\n");
1707 // Now that all abstract attributes are collected and initialized we start
1708 // the abstract analysis.
1710 unsigned IterationCounter = 1;
1711 unsigned MaxIterations =
1712 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
1714 SmallVector<AbstractAttribute *, 32> ChangedAAs;
1715 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
1716 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
1718 do {
1719 // Remember the size to determine new attributes.
1720 size_t NumAAs = DG.SyntheticRoot.Deps.size();
1721 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
1722 << ", Worklist size: " << Worklist.size() << "\n");
1724 // For invalid AAs we can fix dependent AAs that have a required dependence,
1725 // thereby folding long dependence chains in a single step without the need
1726 // to run updates.
1727 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
1728 AbstractAttribute *InvalidAA = InvalidAAs[u];
1730 // Check the dependences to fast track invalidation.
1731 LLVM_DEBUG(dbgs() << "[Attributor] InvalidAA: " << *InvalidAA << " has "
1732 << InvalidAA->Deps.size()
1733 << " required & optional dependences\n");
1734 while (!InvalidAA->Deps.empty()) {
1735 const auto &Dep = InvalidAA->Deps.back();
1736 InvalidAA->Deps.pop_back();
1737 AbstractAttribute *DepAA = cast<AbstractAttribute>(Dep.getPointer());
1738 if (Dep.getInt() == unsigned(DepClassTy::OPTIONAL)) {
1739 LLVM_DEBUG(dbgs() << " - recompute: " << *DepAA);
1740 Worklist.insert(DepAA);
1741 continue;
1743 LLVM_DEBUG(dbgs() << " - invalidate: " << *DepAA);
1744 DepAA->getState().indicatePessimisticFixpoint();
1745 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
1746 if (!DepAA->getState().isValidState())
1747 InvalidAAs.insert(DepAA);
1748 else
1749 ChangedAAs.push_back(DepAA);
1753 // Add all abstract attributes that are potentially dependent on one that
1754 // changed to the work list.
1755 for (AbstractAttribute *ChangedAA : ChangedAAs)
1756 while (!ChangedAA->Deps.empty()) {
1757 Worklist.insert(
1758 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
1759 ChangedAA->Deps.pop_back();
1762 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
1763 << ", Worklist+Dependent size: " << Worklist.size()
1764 << "\n");
1766 // Reset the changed and invalid set.
1767 ChangedAAs.clear();
1768 InvalidAAs.clear();
1770 // Update all abstract attribute in the work list and record the ones that
1771 // changed.
1772 for (AbstractAttribute *AA : Worklist) {
1773 const auto &AAState = AA->getState();
1774 if (!AAState.isAtFixpoint())
1775 if (updateAA(*AA) == ChangeStatus::CHANGED)
1776 ChangedAAs.push_back(AA);
1778 // Use the InvalidAAs vector to propagate invalid states fast transitively
1779 // without requiring updates.
1780 if (!AAState.isValidState())
1781 InvalidAAs.insert(AA);
1784 // Add attributes to the changed set if they have been created in the last
1785 // iteration.
1786 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
1787 DG.SyntheticRoot.end());
1789 // Reset the work list and repopulate with the changed abstract attributes.
1790 // Note that dependent ones are added above.
1791 Worklist.clear();
1792 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
1793 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
1794 QueryAAsAwaitingUpdate.end());
1795 QueryAAsAwaitingUpdate.clear();
1797 } while (!Worklist.empty() &&
1798 (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
1800 if (IterationCounter > MaxIterations && !Functions.empty()) {
1801 auto Remark = [&](OptimizationRemarkMissed ORM) {
1802 return ORM << "Attributor did not reach a fixpoint after "
1803 << ore::NV("Iterations", MaxIterations) << " iterations.";
1805 Function *F = Functions.front();
1806 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
1809 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
1810 << IterationCounter << "/" << MaxIterations
1811 << " iterations\n");
1813 // Reset abstract arguments not settled in a sound fixpoint by now. This
1814 // happens when we stopped the fixpoint iteration early. Note that only the
1815 // ones marked as "changed" *and* the ones transitively depending on them
1816 // need to be reverted to a pessimistic state. Others might not be in a
1817 // fixpoint state but we can use the optimistic results for them anyway.
1818 SmallPtrSet<AbstractAttribute *, 32> Visited;
1819 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
1820 AbstractAttribute *ChangedAA = ChangedAAs[u];
1821 if (!Visited.insert(ChangedAA).second)
1822 continue;
1824 AbstractState &State = ChangedAA->getState();
1825 if (!State.isAtFixpoint()) {
1826 State.indicatePessimisticFixpoint();
1828 NumAttributesTimedOut++;
1831 while (!ChangedAA->Deps.empty()) {
1832 ChangedAAs.push_back(
1833 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
1834 ChangedAA->Deps.pop_back();
1838 LLVM_DEBUG({
1839 if (!Visited.empty())
1840 dbgs() << "\n[Attributor] Finalized " << Visited.size()
1841 << " abstract attributes.\n";
1844 if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
1845 errs() << "\n[Attributor] Fixpoint iteration done after: "
1846 << IterationCounter << "/" << MaxIterations << " iterations\n";
1847 llvm_unreachable("The fixpoint was not reached with exactly the number of "
1848 "specified iterations!");
1852 void Attributor::registerForUpdate(AbstractAttribute &AA) {
1853 assert(AA.isQueryAA() &&
1854 "Non-query AAs should not be required to register for updates!");
1855 QueryAAsAwaitingUpdate.insert(&AA);
1858 ChangeStatus Attributor::manifestAttributes() {
1859 TimeTraceScope TimeScope("Attributor::manifestAttributes");
1860 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
1862 unsigned NumManifested = 0;
1863 unsigned NumAtFixpoint = 0;
1864 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
1865 for (auto &DepAA : DG.SyntheticRoot.Deps) {
1866 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
1867 AbstractState &State = AA->getState();
1869 // If there is not already a fixpoint reached, we can now take the
1870 // optimistic state. This is correct because we enforced a pessimistic one
1871 // on abstract attributes that were transitively dependent on a changed one
1872 // already above.
1873 if (!State.isAtFixpoint())
1874 State.indicateOptimisticFixpoint();
1876 // We must not manifest Attributes that use Callbase info.
1877 if (AA->hasCallBaseContext())
1878 continue;
1879 // If the state is invalid, we do not try to manifest it.
1880 if (!State.isValidState())
1881 continue;
1883 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
1884 continue;
1886 // Skip dead code.
1887 bool UsedAssumedInformation = false;
1888 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
1889 /* CheckBBLivenessOnly */ true))
1890 continue;
1891 // Check if the manifest debug counter that allows skipping manifestation of
1892 // AAs
1893 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
1894 continue;
1895 // Manifest the state and record if we changed the IR.
1896 ChangeStatus LocalChange = AA->manifest(*this);
1897 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
1898 AA->trackStatistics();
1899 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
1900 << "\n");
1902 ManifestChange = ManifestChange | LocalChange;
1904 NumAtFixpoint++;
1905 NumManifested += (LocalChange == ChangeStatus::CHANGED);
1908 (void)NumManifested;
1909 (void)NumAtFixpoint;
1910 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
1911 << " arguments while " << NumAtFixpoint
1912 << " were in a valid fixpoint state\n");
1914 NumAttributesManifested += NumManifested;
1915 NumAttributesValidFixpoint += NumAtFixpoint;
1917 (void)NumFinalAAs;
1918 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
1919 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size(); ++u)
1920 errs() << "Unexpected abstract attribute: "
1921 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
1922 << " :: "
1923 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
1924 ->getIRPosition()
1925 .getAssociatedValue()
1926 << "\n";
1927 llvm_unreachable("Expected the final number of abstract attributes to "
1928 "remain unchanged!");
1930 return ManifestChange;
1933 void Attributor::identifyDeadInternalFunctions() {
1934 // Early exit if we don't intend to delete functions.
1935 if (!Configuration.DeleteFns)
1936 return;
1938 // Identify dead internal functions and delete them. This happens outside
1939 // the other fixpoint analysis as we might treat potentially dead functions
1940 // as live to lower the number of iterations. If they happen to be dead, the
1941 // below fixpoint loop will identify and eliminate them.
1942 SmallVector<Function *, 8> InternalFns;
1943 for (Function *F : Functions)
1944 if (F->hasLocalLinkage())
1945 InternalFns.push_back(F);
1947 SmallPtrSet<Function *, 8> LiveInternalFns;
1948 bool FoundLiveInternal = true;
1949 while (FoundLiveInternal) {
1950 FoundLiveInternal = false;
1951 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
1952 Function *F = InternalFns[u];
1953 if (!F)
1954 continue;
1956 bool UsedAssumedInformation = false;
1957 if (checkForAllCallSites(
1958 [&](AbstractCallSite ACS) {
1959 Function *Callee = ACS.getInstruction()->getFunction();
1960 return ToBeDeletedFunctions.count(Callee) ||
1961 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
1962 !LiveInternalFns.count(Callee));
1964 *F, true, nullptr, UsedAssumedInformation)) {
1965 continue;
1968 LiveInternalFns.insert(F);
1969 InternalFns[u] = nullptr;
1970 FoundLiveInternal = true;
1974 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
1975 if (Function *F = InternalFns[u])
1976 ToBeDeletedFunctions.insert(F);
1979 ChangeStatus Attributor::cleanupIR() {
1980 TimeTraceScope TimeScope("Attributor::cleanupIR");
1981 // Delete stuff at the end to avoid invalid references and a nice order.
1982 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
1983 << ToBeDeletedFunctions.size() << " functions and "
1984 << ToBeDeletedBlocks.size() << " blocks and "
1985 << ToBeDeletedInsts.size() << " instructions and "
1986 << ToBeChangedValues.size() << " values and "
1987 << ToBeChangedUses.size() << " uses. To insert "
1988 << ToBeChangedToUnreachableInsts.size()
1989 << " unreachables.\n"
1990 << "Preserve manifest added " << ManifestAddedBlocks.size()
1991 << " blocks\n");
1993 SmallVector<WeakTrackingVH, 32> DeadInsts;
1994 SmallVector<Instruction *, 32> TerminatorsToFold;
1996 auto ReplaceUse = [&](Use *U, Value *NewV) {
1997 Value *OldV = U->get();
1999 // If we plan to replace NewV we need to update it at this point.
2000 do {
2001 const auto &Entry = ToBeChangedValues.lookup(NewV);
2002 if (!Entry.first)
2003 break;
2004 NewV = Entry.first;
2005 } while (true);
2007 Instruction *I = dyn_cast<Instruction>(U->getUser());
2008 assert((!I || isRunOn(*I->getFunction())) &&
2009 "Cannot replace an instruction outside the current SCC!");
2011 // Do not replace uses in returns if the value is a must-tail call we will
2012 // not delete.
2013 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2014 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2015 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2016 return;
2017 // If we rewrite a return and the new value is not an argument, strip the
2018 // `returned` attribute as it is wrong now.
2019 if (!isa<Argument>(NewV))
2020 for (auto &Arg : RI->getFunction()->args())
2021 Arg.removeAttr(Attribute::Returned);
2024 // Do not perform call graph altering changes outside the SCC.
2025 if (auto *CB = dyn_cast_or_null<CallBase>(I))
2026 if (CB->isCallee(U))
2027 return;
2029 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2030 << " instead of " << *OldV << "\n");
2031 U->set(NewV);
2033 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2034 CGModifiedFunctions.insert(I->getFunction());
2035 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2036 isInstructionTriviallyDead(I))
2037 DeadInsts.push_back(I);
2039 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2040 auto *CB = cast<CallBase>(U->getUser());
2041 if (CB->isArgOperand(U)) {
2042 unsigned Idx = CB->getArgOperandNo(U);
2043 CB->removeParamAttr(Idx, Attribute::NoUndef);
2044 Function *Fn = CB->getCalledFunction();
2045 if (Fn && Fn->arg_size() > Idx)
2046 Fn->removeParamAttr(Idx, Attribute::NoUndef);
2049 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2050 Instruction *UserI = cast<Instruction>(U->getUser());
2051 if (isa<UndefValue>(NewV)) {
2052 ToBeChangedToUnreachableInsts.insert(UserI);
2053 } else {
2054 TerminatorsToFold.push_back(UserI);
2059 for (auto &It : ToBeChangedUses) {
2060 Use *U = It.first;
2061 Value *NewV = It.second;
2062 ReplaceUse(U, NewV);
2065 SmallVector<Use *, 4> Uses;
2066 for (auto &It : ToBeChangedValues) {
2067 Value *OldV = It.first;
2068 auto &Entry = It.second;
2069 Value *NewV = Entry.first;
2070 Uses.clear();
2071 for (auto &U : OldV->uses())
2072 if (Entry.second || !U.getUser()->isDroppable())
2073 Uses.push_back(&U);
2074 for (Use *U : Uses) {
2075 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2076 if (!isRunOn(*I->getFunction()))
2077 continue;
2078 ReplaceUse(U, NewV);
2082 for (const auto &V : InvokeWithDeadSuccessor)
2083 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2084 assert(isRunOn(*II->getFunction()) &&
2085 "Cannot replace an invoke outside the current SCC!");
2086 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2087 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2088 bool Invoke2CallAllowed =
2089 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2090 assert((UnwindBBIsDead || NormalBBIsDead) &&
2091 "Invoke does not have dead successors!");
2092 BasicBlock *BB = II->getParent();
2093 BasicBlock *NormalDestBB = II->getNormalDest();
2094 if (UnwindBBIsDead) {
2095 Instruction *NormalNextIP = &NormalDestBB->front();
2096 if (Invoke2CallAllowed) {
2097 changeToCall(II);
2098 NormalNextIP = BB->getTerminator();
2100 if (NormalBBIsDead)
2101 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2102 } else {
2103 assert(NormalBBIsDead && "Broken invariant!");
2104 if (!NormalDestBB->getUniquePredecessor())
2105 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2106 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2109 for (Instruction *I : TerminatorsToFold) {
2110 assert(isRunOn(*I->getFunction()) &&
2111 "Cannot replace a terminator outside the current SCC!");
2112 CGModifiedFunctions.insert(I->getFunction());
2113 ConstantFoldTerminator(I->getParent());
2115 for (const auto &V : ToBeChangedToUnreachableInsts)
2116 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2117 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2118 << "\n");
2119 assert(isRunOn(*I->getFunction()) &&
2120 "Cannot replace an instruction outside the current SCC!");
2121 CGModifiedFunctions.insert(I->getFunction());
2122 changeToUnreachable(I);
2125 for (const auto &V : ToBeDeletedInsts) {
2126 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2127 if (auto *CB = dyn_cast<CallBase>(I)) {
2128 assert(isRunOn(*I->getFunction()) &&
2129 "Cannot delete an instruction outside the current SCC!");
2130 if (!isa<IntrinsicInst>(CB))
2131 Configuration.CGUpdater.removeCallSite(*CB);
2133 I->dropDroppableUses();
2134 CGModifiedFunctions.insert(I->getFunction());
2135 if (!I->getType()->isVoidTy())
2136 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2137 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2138 DeadInsts.push_back(I);
2139 else
2140 I->eraseFromParent();
2144 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2146 LLVM_DEBUG({
2147 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2148 for (auto &I : DeadInsts)
2149 if (I)
2150 dbgs() << " - " << *I << "\n";
2153 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2155 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2156 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2157 ToBeDeletedBBs.reserve(NumDeadBlocks);
2158 for (BasicBlock *BB : ToBeDeletedBlocks) {
2159 assert(isRunOn(*BB->getParent()) &&
2160 "Cannot delete a block outside the current SCC!");
2161 CGModifiedFunctions.insert(BB->getParent());
2162 // Do not delete BBs added during manifests of AAs.
2163 if (ManifestAddedBlocks.contains(BB))
2164 continue;
2165 ToBeDeletedBBs.push_back(BB);
2167 // Actually we do not delete the blocks but squash them into a single
2168 // unreachable but untangling branches that jump here is something we need
2169 // to do in a more generic way.
2170 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2173 identifyDeadInternalFunctions();
2175 // Rewrite the functions as requested during manifest.
2176 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2178 for (Function *Fn : CGModifiedFunctions)
2179 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2180 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2182 for (Function *Fn : ToBeDeletedFunctions) {
2183 if (!Functions.count(Fn))
2184 continue;
2185 Configuration.CGUpdater.removeFunction(*Fn);
2188 if (!ToBeChangedUses.empty())
2189 ManifestChange = ChangeStatus::CHANGED;
2191 if (!ToBeChangedToUnreachableInsts.empty())
2192 ManifestChange = ChangeStatus::CHANGED;
2194 if (!ToBeDeletedFunctions.empty())
2195 ManifestChange = ChangeStatus::CHANGED;
2197 if (!ToBeDeletedBlocks.empty())
2198 ManifestChange = ChangeStatus::CHANGED;
2200 if (!ToBeDeletedInsts.empty())
2201 ManifestChange = ChangeStatus::CHANGED;
2203 if (!InvokeWithDeadSuccessor.empty())
2204 ManifestChange = ChangeStatus::CHANGED;
2206 if (!DeadInsts.empty())
2207 ManifestChange = ChangeStatus::CHANGED;
2209 NumFnDeleted += ToBeDeletedFunctions.size();
2211 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2212 << " functions after manifest.\n");
2214 #ifdef EXPENSIVE_CHECKS
2215 for (Function *F : Functions) {
2216 if (ToBeDeletedFunctions.count(F))
2217 continue;
2218 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2220 #endif
2222 return ManifestChange;
2225 ChangeStatus Attributor::run() {
2226 TimeTraceScope TimeScope("Attributor::run");
2227 AttributorCallGraph ACallGraph(*this);
2229 if (PrintCallGraph)
2230 ACallGraph.populateAll();
2232 Phase = AttributorPhase::UPDATE;
2233 runTillFixpoint();
2235 // dump graphs on demand
2236 if (DumpDepGraph)
2237 DG.dumpGraph();
2239 if (ViewDepGraph)
2240 DG.viewGraph();
2242 if (PrintDependencies)
2243 DG.print();
2245 Phase = AttributorPhase::MANIFEST;
2246 ChangeStatus ManifestChange = manifestAttributes();
2248 Phase = AttributorPhase::CLEANUP;
2249 ChangeStatus CleanupChange = cleanupIR();
2251 if (PrintCallGraph)
2252 ACallGraph.print();
2254 return ManifestChange | CleanupChange;
2257 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2258 TimeTraceScope TimeScope(
2259 AA.getName() + std::to_string(AA.getIRPosition().getPositionKind()) +
2260 "::updateAA");
2261 assert(Phase == AttributorPhase::UPDATE &&
2262 "We can update AA only in the update stage!");
2264 // Use a new dependence vector for this update.
2265 DependenceVector DV;
2266 DependenceStack.push_back(&DV);
2268 auto &AAState = AA.getState();
2269 ChangeStatus CS = ChangeStatus::UNCHANGED;
2270 bool UsedAssumedInformation = false;
2271 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2272 /* CheckBBLivenessOnly */ true))
2273 CS = AA.update(*this);
2275 if (!AA.isQueryAA() && DV.empty()) {
2276 // If the attribute did not query any non-fix information, the state
2277 // will not change and we can indicate that right away.
2278 AAState.indicateOptimisticFixpoint();
2281 if (!AAState.isAtFixpoint())
2282 rememberDependences();
2284 // Verify the stack was used properly, that is we pop the dependence vector we
2285 // put there earlier.
2286 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2287 (void)PoppedDV;
2288 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2290 return CS;
2293 void Attributor::createShallowWrapper(Function &F) {
2294 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2296 Module &M = *F.getParent();
2297 LLVMContext &Ctx = M.getContext();
2298 FunctionType *FnTy = F.getFunctionType();
2300 Function *Wrapper =
2301 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2302 F.setName(""); // set the inside function anonymous
2303 M.getFunctionList().insert(F.getIterator(), Wrapper);
2305 F.setLinkage(GlobalValue::InternalLinkage);
2307 F.replaceAllUsesWith(Wrapper);
2308 assert(F.use_empty() && "Uses remained after wrapper was created!");
2310 // Move the COMDAT section to the wrapper.
2311 // TODO: Check if we need to keep it for F as well.
2312 Wrapper->setComdat(F.getComdat());
2313 F.setComdat(nullptr);
2315 // Copy all metadata and attributes but keep them on F as well.
2316 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2317 F.getAllMetadata(MDs);
2318 for (auto MDIt : MDs)
2319 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2320 Wrapper->setAttributes(F.getAttributes());
2322 // Create the call in the wrapper.
2323 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2325 SmallVector<Value *, 8> Args;
2326 Argument *FArgIt = F.arg_begin();
2327 for (Argument &Arg : Wrapper->args()) {
2328 Args.push_back(&Arg);
2329 Arg.setName((FArgIt++)->getName());
2332 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2333 CI->setTailCall(true);
2334 CI->addFnAttr(Attribute::NoInline);
2335 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2337 NumFnShallowWrappersCreated++;
2340 bool Attributor::isInternalizable(Function &F) {
2341 if (F.isDeclaration() || F.hasLocalLinkage() ||
2342 GlobalValue::isInterposableLinkage(F.getLinkage()))
2343 return false;
2344 return true;
2347 Function *Attributor::internalizeFunction(Function &F, bool Force) {
2348 if (!AllowDeepWrapper && !Force)
2349 return nullptr;
2350 if (!isInternalizable(F))
2351 return nullptr;
2353 SmallPtrSet<Function *, 2> FnSet = {&F};
2354 DenseMap<Function *, Function *> InternalizedFns;
2355 internalizeFunctions(FnSet, InternalizedFns);
2357 return InternalizedFns[&F];
2360 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2361 DenseMap<Function *, Function *> &FnMap) {
2362 for (Function *F : FnSet)
2363 if (!Attributor::isInternalizable(*F))
2364 return false;
2366 FnMap.clear();
2367 // Generate the internalized version of each function.
2368 for (Function *F : FnSet) {
2369 Module &M = *F->getParent();
2370 FunctionType *FnTy = F->getFunctionType();
2372 // Create a copy of the current function
2373 Function *Copied =
2374 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2375 F->getName() + ".internalized");
2376 ValueToValueMapTy VMap;
2377 auto *NewFArgIt = Copied->arg_begin();
2378 for (auto &Arg : F->args()) {
2379 auto ArgName = Arg.getName();
2380 NewFArgIt->setName(ArgName);
2381 VMap[&Arg] = &(*NewFArgIt++);
2383 SmallVector<ReturnInst *, 8> Returns;
2385 // Copy the body of the original function to the new one
2386 CloneFunctionInto(Copied, F, VMap,
2387 CloneFunctionChangeType::LocalChangesOnly, Returns);
2389 // Set the linakage and visibility late as CloneFunctionInto has some
2390 // implicit requirements.
2391 Copied->setVisibility(GlobalValue::DefaultVisibility);
2392 Copied->setLinkage(GlobalValue::PrivateLinkage);
2394 // Copy metadata
2395 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2396 F->getAllMetadata(MDs);
2397 for (auto MDIt : MDs)
2398 if (!Copied->hasMetadata())
2399 Copied->addMetadata(MDIt.first, *MDIt.second);
2401 M.getFunctionList().insert(F->getIterator(), Copied);
2402 Copied->setDSOLocal(true);
2403 FnMap[F] = Copied;
2406 // Replace all uses of the old function with the new internalized function
2407 // unless the caller is a function that was just internalized.
2408 for (Function *F : FnSet) {
2409 auto &InternalizedFn = FnMap[F];
2410 auto IsNotInternalized = [&](Use &U) -> bool {
2411 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2412 return !FnMap.lookup(CB->getCaller());
2413 return false;
2415 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2418 return true;
2421 bool Attributor::isValidFunctionSignatureRewrite(
2422 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2424 if (!Configuration.RewriteSignatures)
2425 return false;
2427 Function *Fn = Arg.getParent();
2428 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2429 // Forbid the call site to cast the function return type. If we need to
2430 // rewrite these functions we need to re-create a cast for the new call site
2431 // (if the old had uses).
2432 if (!ACS.getCalledFunction() ||
2433 ACS.getInstruction()->getType() !=
2434 ACS.getCalledFunction()->getReturnType())
2435 return false;
2436 if (ACS.getCalledOperand()->getType() != Fn->getType())
2437 return false;
2438 // Forbid must-tail calls for now.
2439 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2442 // Avoid var-arg functions for now.
2443 if (Fn->isVarArg()) {
2444 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2445 return false;
2448 // Avoid functions with complicated argument passing semantics.
2449 AttributeList FnAttributeList = Fn->getAttributes();
2450 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2451 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2452 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2453 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2454 LLVM_DEBUG(
2455 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2456 return false;
2459 // Avoid callbacks for now.
2460 bool UsedAssumedInformation = false;
2461 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2462 UsedAssumedInformation)) {
2463 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2464 return false;
2467 auto InstPred = [](Instruction &I) {
2468 if (auto *CI = dyn_cast<CallInst>(&I))
2469 return !CI->isMustTailCall();
2470 return true;
2473 // Forbid must-tail calls for now.
2474 // TODO:
2475 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2476 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2477 nullptr, {Instruction::Call},
2478 UsedAssumedInformation)) {
2479 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2480 return false;
2483 return true;
2486 bool Attributor::registerFunctionSignatureRewrite(
2487 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2488 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2489 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2490 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2491 << Arg.getParent()->getName() << " with "
2492 << ReplacementTypes.size() << " replacements\n");
2493 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2494 "Cannot register an invalid rewrite");
2496 Function *Fn = Arg.getParent();
2497 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2498 ArgumentReplacementMap[Fn];
2499 if (ARIs.empty())
2500 ARIs.resize(Fn->arg_size());
2502 // If we have a replacement already with less than or equal new arguments,
2503 // ignore this request.
2504 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2505 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2506 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2507 return false;
2510 // If we have a replacement already but we like the new one better, delete
2511 // the old.
2512 ARI.reset();
2514 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2515 << Arg.getParent()->getName() << " with "
2516 << ReplacementTypes.size() << " replacements\n");
2518 // Remember the replacement.
2519 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2520 std::move(CalleeRepairCB),
2521 std::move(ACSRepairCB)));
2523 return true;
2526 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2527 bool Result = true;
2528 #ifndef NDEBUG
2529 if (SeedAllowList.size() != 0)
2530 Result = llvm::is_contained(SeedAllowList, AA.getName());
2531 Function *Fn = AA.getAnchorScope();
2532 if (FunctionSeedAllowList.size() != 0 && Fn)
2533 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2534 #endif
2535 return Result;
2538 ChangeStatus Attributor::rewriteFunctionSignatures(
2539 SmallSetVector<Function *, 8> &ModifiedFns) {
2540 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2542 for (auto &It : ArgumentReplacementMap) {
2543 Function *OldFn = It.getFirst();
2545 // Deleted functions do not require rewrites.
2546 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2547 continue;
2549 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2550 It.getSecond();
2551 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2553 SmallVector<Type *, 16> NewArgumentTypes;
2554 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2556 // Collect replacement argument types and copy over existing attributes.
2557 AttributeList OldFnAttributeList = OldFn->getAttributes();
2558 for (Argument &Arg : OldFn->args()) {
2559 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2560 ARIs[Arg.getArgNo()]) {
2561 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2562 ARI->ReplacementTypes.end());
2563 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2564 AttributeSet());
2565 } else {
2566 NewArgumentTypes.push_back(Arg.getType());
2567 NewArgumentAttributes.push_back(
2568 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2572 uint64_t LargestVectorWidth = 0;
2573 for (auto *I : NewArgumentTypes)
2574 if (auto *VT = dyn_cast<llvm::VectorType>(I))
2575 LargestVectorWidth = std::max(
2576 LargestVectorWidth, VT->getPrimitiveSizeInBits().getKnownMinSize());
2578 FunctionType *OldFnTy = OldFn->getFunctionType();
2579 Type *RetTy = OldFnTy->getReturnType();
2581 // Construct the new function type using the new arguments types.
2582 FunctionType *NewFnTy =
2583 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
2585 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
2586 << "' from " << *OldFn->getFunctionType() << " to "
2587 << *NewFnTy << "\n");
2589 // Create the new function body and insert it into the module.
2590 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
2591 OldFn->getAddressSpace(), "");
2592 Functions.insert(NewFn);
2593 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
2594 NewFn->takeName(OldFn);
2595 NewFn->copyAttributesFrom(OldFn);
2597 // Patch the pointer to LLVM function in debug info descriptor.
2598 NewFn->setSubprogram(OldFn->getSubprogram());
2599 OldFn->setSubprogram(nullptr);
2601 // Recompute the parameter attributes list based on the new arguments for
2602 // the function.
2603 LLVMContext &Ctx = OldFn->getContext();
2604 NewFn->setAttributes(AttributeList::get(
2605 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
2606 NewArgumentAttributes));
2607 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
2609 // Since we have now created the new function, splice the body of the old
2610 // function right into the new function, leaving the old rotting hulk of the
2611 // function empty.
2612 NewFn->getBasicBlockList().splice(NewFn->begin(),
2613 OldFn->getBasicBlockList());
2615 // Fixup block addresses to reference new function.
2616 SmallVector<BlockAddress *, 8u> BlockAddresses;
2617 for (User *U : OldFn->users())
2618 if (auto *BA = dyn_cast<BlockAddress>(U))
2619 BlockAddresses.push_back(BA);
2620 for (auto *BA : BlockAddresses)
2621 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
2623 // Set of all "call-like" instructions that invoke the old function mapped
2624 // to their new replacements.
2625 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
2627 // Callback to create a new "call-like" instruction for a given one.
2628 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
2629 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
2630 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
2632 // Collect the new argument operands for the replacement call site.
2633 SmallVector<Value *, 16> NewArgOperands;
2634 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
2635 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
2636 unsigned NewFirstArgNum = NewArgOperands.size();
2637 (void)NewFirstArgNum; // only used inside assert.
2638 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2639 ARIs[OldArgNum]) {
2640 if (ARI->ACSRepairCB)
2641 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
2642 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
2643 NewArgOperands.size() &&
2644 "ACS repair callback did not provide as many operand as new "
2645 "types were registered!");
2646 // TODO: Exose the attribute set to the ACS repair callback
2647 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
2648 AttributeSet());
2649 } else {
2650 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
2651 NewArgOperandAttributes.push_back(
2652 OldCallAttributeList.getParamAttrs(OldArgNum));
2656 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
2657 "Mismatch # argument operands vs. # argument operand attributes!");
2658 assert(NewArgOperands.size() == NewFn->arg_size() &&
2659 "Mismatch # argument operands vs. # function arguments!");
2661 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
2662 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
2664 // Create a new call or invoke instruction to replace the old one.
2665 CallBase *NewCB;
2666 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
2667 NewCB =
2668 InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
2669 NewArgOperands, OperandBundleDefs, "", OldCB);
2670 } else {
2671 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
2672 "", OldCB);
2673 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
2674 NewCB = NewCI;
2677 // Copy over various properties and the new attributes.
2678 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
2679 NewCB->setCallingConv(OldCB->getCallingConv());
2680 NewCB->takeName(OldCB);
2681 NewCB->setAttributes(AttributeList::get(
2682 Ctx, OldCallAttributeList.getFnAttrs(),
2683 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
2685 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
2686 LargestVectorWidth);
2688 CallSitePairs.push_back({OldCB, NewCB});
2689 return true;
2692 // Use the CallSiteReplacementCreator to create replacement call sites.
2693 bool UsedAssumedInformation = false;
2694 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
2695 true, nullptr, UsedAssumedInformation);
2696 (void)Success;
2697 assert(Success && "Assumed call site replacement to succeed!");
2699 // Rewire the arguments.
2700 Argument *OldFnArgIt = OldFn->arg_begin();
2701 Argument *NewFnArgIt = NewFn->arg_begin();
2702 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
2703 ++OldArgNum, ++OldFnArgIt) {
2704 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2705 ARIs[OldArgNum]) {
2706 if (ARI->CalleeRepairCB)
2707 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
2708 if (ARI->ReplacementTypes.empty())
2709 OldFnArgIt->replaceAllUsesWith(
2710 PoisonValue::get(OldFnArgIt->getType()));
2711 NewFnArgIt += ARI->ReplacementTypes.size();
2712 } else {
2713 NewFnArgIt->takeName(&*OldFnArgIt);
2714 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
2715 ++NewFnArgIt;
2719 // Eliminate the instructions *after* we visited all of them.
2720 for (auto &CallSitePair : CallSitePairs) {
2721 CallBase &OldCB = *CallSitePair.first;
2722 CallBase &NewCB = *CallSitePair.second;
2723 assert(OldCB.getType() == NewCB.getType() &&
2724 "Cannot handle call sites with different types!");
2725 ModifiedFns.insert(OldCB.getFunction());
2726 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
2727 OldCB.replaceAllUsesWith(&NewCB);
2728 OldCB.eraseFromParent();
2731 // Replace the function in the call graph (if any).
2732 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
2734 // If the old function was modified and needed to be reanalyzed, the new one
2735 // does now.
2736 if (ModifiedFns.remove(OldFn))
2737 ModifiedFns.insert(NewFn);
2739 Changed = ChangeStatus::CHANGED;
2742 return Changed;
2745 void InformationCache::initializeInformationCache(const Function &CF,
2746 FunctionInfo &FI) {
2747 // As we do not modify the function here we can remove the const
2748 // withouth breaking implicit assumptions. At the end of the day, we could
2749 // initialize the cache eagerly which would look the same to the users.
2750 Function &F = const_cast<Function &>(CF);
2752 // Walk all instructions to find interesting instructions that might be
2753 // queried by abstract attributes during their initialization or update.
2754 // This has to happen before we create attributes.
2756 DenseMap<const Value *, Optional<short>> AssumeUsesMap;
2758 // Add \p V to the assume uses map which track the number of uses outside of
2759 // "visited" assumes. If no outside uses are left the value is added to the
2760 // assume only use vector.
2761 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
2762 SmallVector<const Instruction *> Worklist;
2763 if (auto *I = dyn_cast<Instruction>(&V))
2764 Worklist.push_back(I);
2765 while (!Worklist.empty()) {
2766 const Instruction *I = Worklist.pop_back_val();
2767 Optional<short> &NumUses = AssumeUsesMap[I];
2768 if (!NumUses)
2769 NumUses = I->getNumUses();
2770 NumUses = NumUses.value() - /* this assume */ 1;
2771 if (NumUses.value() != 0)
2772 continue;
2773 AssumeOnlyValues.insert(I);
2774 for (const Value *Op : I->operands())
2775 if (auto *OpI = dyn_cast<Instruction>(Op))
2776 Worklist.push_back(OpI);
2780 for (Instruction &I : instructions(&F)) {
2781 bool IsInterestingOpcode = false;
2783 // To allow easy access to all instructions in a function with a given
2784 // opcode we store them in the InfoCache. As not all opcodes are interesting
2785 // to concrete attributes we only cache the ones that are as identified in
2786 // the following switch.
2787 // Note: There are no concrete attributes now so this is initially empty.
2788 switch (I.getOpcode()) {
2789 default:
2790 assert(!isa<CallBase>(&I) &&
2791 "New call base instruction type needs to be known in the "
2792 "Attributor.");
2793 break;
2794 case Instruction::Call:
2795 // Calls are interesting on their own, additionally:
2796 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
2797 // For `must-tail` calls we remember the caller and callee.
2798 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
2799 fillMapFromAssume(*Assume, KnowledgeMap);
2800 AddToAssumeUsesMap(*Assume->getArgOperand(0));
2801 } else if (cast<CallInst>(I).isMustTailCall()) {
2802 FI.ContainsMustTailCall = true;
2803 if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
2804 getFunctionInfo(*Callee).CalledViaMustTail = true;
2806 [[fallthrough]];
2807 case Instruction::CallBr:
2808 case Instruction::Invoke:
2809 case Instruction::CleanupRet:
2810 case Instruction::CatchSwitch:
2811 case Instruction::AtomicRMW:
2812 case Instruction::AtomicCmpXchg:
2813 case Instruction::Br:
2814 case Instruction::Resume:
2815 case Instruction::Ret:
2816 case Instruction::Load:
2817 // The alignment of a pointer is interesting for loads.
2818 case Instruction::Store:
2819 // The alignment of a pointer is interesting for stores.
2820 case Instruction::Alloca:
2821 case Instruction::AddrSpaceCast:
2822 IsInterestingOpcode = true;
2824 if (IsInterestingOpcode) {
2825 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
2826 if (!Insts)
2827 Insts = new (Allocator) InstructionVectorTy();
2828 Insts->push_back(&I);
2830 if (I.mayReadOrWriteMemory())
2831 FI.RWInsts.push_back(&I);
2834 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
2835 isInlineViable(F).isSuccess())
2836 InlineableFunctions.insert(&F);
2839 AAResults *InformationCache::getAAResultsForFunction(const Function &F) {
2840 return AG.getAnalysis<AAManager>(F);
2843 InformationCache::FunctionInfo::~FunctionInfo() {
2844 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
2845 // manually destroy them.
2846 for (auto &It : OpcodeInstMap)
2847 It.getSecond()->~InstructionVectorTy();
2850 void Attributor::recordDependence(const AbstractAttribute &FromAA,
2851 const AbstractAttribute &ToAA,
2852 DepClassTy DepClass) {
2853 if (DepClass == DepClassTy::NONE)
2854 return;
2855 // If we are outside of an update, thus before the actual fixpoint iteration
2856 // started (= when we create AAs), we do not track dependences because we will
2857 // put all AAs into the initial worklist anyway.
2858 if (DependenceStack.empty())
2859 return;
2860 if (FromAA.getState().isAtFixpoint())
2861 return;
2862 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
2865 void Attributor::rememberDependences() {
2866 assert(!DependenceStack.empty() && "No dependences to remember!");
2868 for (DepInfo &DI : *DependenceStack.back()) {
2869 assert((DI.DepClass == DepClassTy::REQUIRED ||
2870 DI.DepClass == DepClassTy::OPTIONAL) &&
2871 "Expected required or optional dependence (1 bit)!");
2872 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
2873 DepAAs.push_back(AbstractAttribute::DepTy(
2874 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
2878 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
2879 if (!VisitedFunctions.insert(&F).second)
2880 return;
2881 if (F.isDeclaration())
2882 return;
2884 // In non-module runs we need to look at the call sites of a function to
2885 // determine if it is part of a must-tail call edge. This will influence what
2886 // attributes we can derive.
2887 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
2888 if (!isModulePass() && !FI.CalledViaMustTail) {
2889 for (const Use &U : F.uses())
2890 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
2891 if (CB->isCallee(&U) && CB->isMustTailCall())
2892 FI.CalledViaMustTail = true;
2895 IRPosition FPos = IRPosition::function(F);
2897 // Check for dead BasicBlocks in every function.
2898 // We need dead instruction detection because we do not want to deal with
2899 // broken IR in which SSA rules do not apply.
2900 getOrCreateAAFor<AAIsDead>(FPos);
2902 // Every function might be "will-return".
2903 getOrCreateAAFor<AAWillReturn>(FPos);
2905 // Every function might contain instructions that cause "undefined behavior".
2906 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
2908 // Every function can be nounwind.
2909 getOrCreateAAFor<AANoUnwind>(FPos);
2911 // Every function might be marked "nosync"
2912 getOrCreateAAFor<AANoSync>(FPos);
2914 // Every function might be "no-free".
2915 getOrCreateAAFor<AANoFree>(FPos);
2917 // Every function might be "no-return".
2918 getOrCreateAAFor<AANoReturn>(FPos);
2920 // Every function might be "no-recurse".
2921 getOrCreateAAFor<AANoRecurse>(FPos);
2923 // Every function might be "readnone/readonly/writeonly/...".
2924 getOrCreateAAFor<AAMemoryBehavior>(FPos);
2926 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
2927 getOrCreateAAFor<AAMemoryLocation>(FPos);
2929 // Every function can track active assumptions.
2930 getOrCreateAAFor<AAAssumptionInfo>(FPos);
2932 // Every function might be applicable for Heap-To-Stack conversion.
2933 if (EnableHeapToStack)
2934 getOrCreateAAFor<AAHeapToStack>(FPos);
2936 // Return attributes are only appropriate if the return type is non void.
2937 Type *ReturnType = F.getReturnType();
2938 if (!ReturnType->isVoidTy()) {
2939 // Argument attribute "returned" --- Create only one per function even
2940 // though it is an argument attribute.
2941 getOrCreateAAFor<AAReturnedValues>(FPos);
2943 IRPosition RetPos = IRPosition::returned(F);
2945 // Every returned value might be dead.
2946 getOrCreateAAFor<AAIsDead>(RetPos);
2948 // Every function might be simplified.
2949 bool UsedAssumedInformation = false;
2950 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
2951 AA::Intraprocedural);
2953 // Every returned value might be marked noundef.
2954 getOrCreateAAFor<AANoUndef>(RetPos);
2956 if (ReturnType->isPointerTy()) {
2958 // Every function with pointer return type might be marked align.
2959 getOrCreateAAFor<AAAlign>(RetPos);
2961 // Every function with pointer return type might be marked nonnull.
2962 getOrCreateAAFor<AANonNull>(RetPos);
2964 // Every function with pointer return type might be marked noalias.
2965 getOrCreateAAFor<AANoAlias>(RetPos);
2967 // Every function with pointer return type might be marked
2968 // dereferenceable.
2969 getOrCreateAAFor<AADereferenceable>(RetPos);
2973 for (Argument &Arg : F.args()) {
2974 IRPosition ArgPos = IRPosition::argument(Arg);
2976 // Every argument might be simplified. We have to go through the Attributor
2977 // interface though as outside AAs can register custom simplification
2978 // callbacks.
2979 bool UsedAssumedInformation = false;
2980 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
2981 AA::Intraprocedural);
2983 // Every argument might be dead.
2984 getOrCreateAAFor<AAIsDead>(ArgPos);
2986 // Every argument might be marked noundef.
2987 getOrCreateAAFor<AANoUndef>(ArgPos);
2989 if (Arg.getType()->isPointerTy()) {
2990 // Every argument with pointer type might be marked nonnull.
2991 getOrCreateAAFor<AANonNull>(ArgPos);
2993 // Every argument with pointer type might be marked noalias.
2994 getOrCreateAAFor<AANoAlias>(ArgPos);
2996 // Every argument with pointer type might be marked dereferenceable.
2997 getOrCreateAAFor<AADereferenceable>(ArgPos);
2999 // Every argument with pointer type might be marked align.
3000 getOrCreateAAFor<AAAlign>(ArgPos);
3002 // Every argument with pointer type might be marked nocapture.
3003 getOrCreateAAFor<AANoCapture>(ArgPos);
3005 // Every argument with pointer type might be marked
3006 // "readnone/readonly/writeonly/..."
3007 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3009 // Every argument with pointer type might be marked nofree.
3010 getOrCreateAAFor<AANoFree>(ArgPos);
3012 // Every argument with pointer type might be privatizable (or promotable)
3013 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3017 auto CallSitePred = [&](Instruction &I) -> bool {
3018 auto &CB = cast<CallBase>(I);
3019 IRPosition CBInstPos = IRPosition::inst(CB);
3020 IRPosition CBFnPos = IRPosition::callsite_function(CB);
3022 // Call sites might be dead if they do not have side effects and no live
3023 // users. The return value might be dead if there are no live users.
3024 getOrCreateAAFor<AAIsDead>(CBInstPos);
3026 Function *Callee = CB.getCalledFunction();
3027 // TODO: Even if the callee is not known now we might be able to simplify
3028 // the call/callee.
3029 if (!Callee)
3030 return true;
3032 // Every call site can track active assumptions.
3033 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3035 // Skip declarations except if annotations on their call sites were
3036 // explicitly requested.
3037 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3038 !Callee->hasMetadata(LLVMContext::MD_callback))
3039 return true;
3041 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3043 IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3044 bool UsedAssumedInformation = false;
3045 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3046 AA::Intraprocedural);
3049 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3051 IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
3053 // Every call site argument might be dead.
3054 getOrCreateAAFor<AAIsDead>(CBArgPos);
3056 // Call site argument might be simplified. We have to go through the
3057 // Attributor interface though as outside AAs can register custom
3058 // simplification callbacks.
3059 bool UsedAssumedInformation = false;
3060 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3061 AA::Intraprocedural);
3063 // Every call site argument might be marked "noundef".
3064 getOrCreateAAFor<AANoUndef>(CBArgPos);
3066 if (!CB.getArgOperand(I)->getType()->isPointerTy())
3067 continue;
3069 // Call site argument attribute "non-null".
3070 getOrCreateAAFor<AANonNull>(CBArgPos);
3072 // Call site argument attribute "nocapture".
3073 getOrCreateAAFor<AANoCapture>(CBArgPos);
3075 // Call site argument attribute "no-alias".
3076 getOrCreateAAFor<AANoAlias>(CBArgPos);
3078 // Call site argument attribute "dereferenceable".
3079 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3081 // Call site argument attribute "align".
3082 getOrCreateAAFor<AAAlign>(CBArgPos);
3084 // Call site argument attribute
3085 // "readnone/readonly/writeonly/..."
3086 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3088 // Call site argument attribute "nofree".
3089 getOrCreateAAFor<AANoFree>(CBArgPos);
3091 return true;
3094 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3095 bool Success;
3096 bool UsedAssumedInformation = false;
3097 Success = checkForAllInstructionsImpl(
3098 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3099 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3100 (unsigned)Instruction::Call},
3101 UsedAssumedInformation);
3102 (void)Success;
3103 assert(Success && "Expected the check call to be successful!");
3105 auto LoadStorePred = [&](Instruction &I) -> bool {
3106 if (isa<LoadInst>(I)) {
3107 getOrCreateAAFor<AAAlign>(
3108 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
3109 if (SimplifyAllLoads)
3110 getAssumedSimplified(IRPosition::value(I), nullptr,
3111 UsedAssumedInformation, AA::Intraprocedural);
3112 } else {
3113 auto &SI = cast<StoreInst>(I);
3114 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3115 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3116 UsedAssumedInformation, AA::Intraprocedural);
3117 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3119 return true;
3121 Success = checkForAllInstructionsImpl(
3122 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3123 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3124 UsedAssumedInformation);
3125 (void)Success;
3126 assert(Success && "Expected the check call to be successful!");
3129 /// Helpers to ease debugging through output streams and print calls.
3131 ///{
3132 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3133 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3136 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3137 switch (AP) {
3138 case IRPosition::IRP_INVALID:
3139 return OS << "inv";
3140 case IRPosition::IRP_FLOAT:
3141 return OS << "flt";
3142 case IRPosition::IRP_RETURNED:
3143 return OS << "fn_ret";
3144 case IRPosition::IRP_CALL_SITE_RETURNED:
3145 return OS << "cs_ret";
3146 case IRPosition::IRP_FUNCTION:
3147 return OS << "fn";
3148 case IRPosition::IRP_CALL_SITE:
3149 return OS << "cs";
3150 case IRPosition::IRP_ARGUMENT:
3151 return OS << "arg";
3152 case IRPosition::IRP_CALL_SITE_ARGUMENT:
3153 return OS << "cs_arg";
3155 llvm_unreachable("Unknown attribute position!");
3158 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3159 const Value &AV = Pos.getAssociatedValue();
3160 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3161 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3163 if (Pos.hasCallBaseContext())
3164 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3165 return OS << "}";
3168 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3169 OS << "range-state(" << S.getBitWidth() << ")<";
3170 S.getKnown().print(OS);
3171 OS << " / ";
3172 S.getAssumed().print(OS);
3173 OS << ">";
3175 return OS << static_cast<const AbstractState &>(S);
3178 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3179 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3182 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3183 AA.print(OS);
3184 return OS;
3187 raw_ostream &llvm::operator<<(raw_ostream &OS,
3188 const PotentialConstantIntValuesState &S) {
3189 OS << "set-state(< {";
3190 if (!S.isValidState())
3191 OS << "full-set";
3192 else {
3193 for (const auto &It : S.getAssumedSet())
3194 OS << It << ", ";
3195 if (S.undefIsContained())
3196 OS << "undef ";
3198 OS << "} >)";
3200 return OS;
3203 raw_ostream &llvm::operator<<(raw_ostream &OS,
3204 const PotentialLLVMValuesState &S) {
3205 OS << "set-state(< {";
3206 if (!S.isValidState())
3207 OS << "full-set";
3208 else {
3209 for (const auto &It : S.getAssumedSet()) {
3210 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3211 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3212 else
3213 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3215 if (S.undefIsContained())
3216 OS << "undef ";
3218 OS << "} >)";
3220 return OS;
3223 void AbstractAttribute::print(raw_ostream &OS) const {
3224 OS << "[";
3225 OS << getName();
3226 OS << "] for CtxI ";
3228 if (auto *I = getCtxI()) {
3229 OS << "'";
3230 I->print(OS);
3231 OS << "'";
3232 } else
3233 OS << "<<null inst>>";
3235 OS << " at position " << getIRPosition() << " with state " << getAsStr()
3236 << '\n';
3239 void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3240 print(OS);
3242 for (const auto &DepAA : Deps) {
3243 auto *AA = DepAA.getPointer();
3244 OS << " updates ";
3245 AA->print(OS);
3248 OS << '\n';
3251 raw_ostream &llvm::operator<<(raw_ostream &OS,
3252 const AAPointerInfo::Access &Acc) {
3253 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3254 if (Acc.getLocalInst() != Acc.getRemoteInst())
3255 OS << " via " << *Acc.getLocalInst();
3256 if (Acc.getContent()) {
3257 if (*Acc.getContent())
3258 OS << " [" << **Acc.getContent() << "]";
3259 else
3260 OS << " [ <unknown> ]";
3262 return OS;
3264 ///}
3266 /// ----------------------------------------------------------------------------
3267 /// Pass (Manager) Boilerplate
3268 /// ----------------------------------------------------------------------------
3270 static bool runAttributorOnFunctions(InformationCache &InfoCache,
3271 SetVector<Function *> &Functions,
3272 AnalysisGetter &AG,
3273 CallGraphUpdater &CGUpdater,
3274 bool DeleteFns, bool IsModulePass) {
3275 if (Functions.empty())
3276 return false;
3278 LLVM_DEBUG({
3279 dbgs() << "[Attributor] Run on module with " << Functions.size()
3280 << " functions:\n";
3281 for (Function *Fn : Functions)
3282 dbgs() << " - " << Fn->getName() << "\n";
3285 // Create an Attributor and initially empty information cache that is filled
3286 // while we identify default attribute opportunities.
3287 AttributorConfig AC(CGUpdater);
3288 AC.IsModulePass = IsModulePass;
3289 AC.DeleteFns = DeleteFns;
3290 Attributor A(Functions, InfoCache, AC);
3292 // Create shallow wrappers for all functions that are not IPO amendable
3293 if (AllowShallowWrappers)
3294 for (Function *F : Functions)
3295 if (!A.isFunctionIPOAmendable(*F))
3296 Attributor::createShallowWrapper(*F);
3298 // Internalize non-exact functions
3299 // TODO: for now we eagerly internalize functions without calculating the
3300 // cost, we need a cost interface to determine whether internalizing
3301 // a function is "beneficial"
3302 if (AllowDeepWrapper) {
3303 unsigned FunSize = Functions.size();
3304 for (unsigned u = 0; u < FunSize; u++) {
3305 Function *F = Functions[u];
3306 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3307 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3308 Function *NewF = Attributor::internalizeFunction(*F);
3309 assert(NewF && "Could not internalize function.");
3310 Functions.insert(NewF);
3312 // Update call graph
3313 CGUpdater.replaceFunctionWith(*F, *NewF);
3314 for (const Use &U : NewF->uses())
3315 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3316 auto *CallerF = CB->getCaller();
3317 CGUpdater.reanalyzeFunction(*CallerF);
3323 for (Function *F : Functions) {
3324 if (F->hasExactDefinition())
3325 NumFnWithExactDefinition++;
3326 else
3327 NumFnWithoutExactDefinition++;
3329 // We look at internal functions only on-demand but if any use is not a
3330 // direct call or outside the current set of analyzed functions, we have
3331 // to do it eagerly.
3332 if (F->hasLocalLinkage()) {
3333 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3334 const auto *CB = dyn_cast<CallBase>(U.getUser());
3335 return CB && CB->isCallee(&U) &&
3336 Functions.count(const_cast<Function *>(CB->getCaller()));
3338 continue;
3341 // Populate the Attributor with abstract attribute opportunities in the
3342 // function and the information cache with IR information.
3343 A.identifyDefaultAbstractAttributes(*F);
3346 ChangeStatus Changed = A.run();
3348 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3349 << " functions, result: " << Changed << ".\n");
3350 return Changed == ChangeStatus::CHANGED;
3353 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3355 void AADepGraph::dumpGraph() {
3356 static std::atomic<int> CallTimes;
3357 std::string Prefix;
3359 if (!DepGraphDotFileNamePrefix.empty())
3360 Prefix = DepGraphDotFileNamePrefix;
3361 else
3362 Prefix = "dep_graph";
3363 std::string Filename =
3364 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3366 outs() << "Dependency graph dump to " << Filename << ".\n";
3368 std::error_code EC;
3370 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3371 if (!EC)
3372 llvm::WriteGraph(File, this);
3374 CallTimes++;
3377 void AADepGraph::print() {
3378 for (auto DepAA : SyntheticRoot.Deps)
3379 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
3382 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
3383 FunctionAnalysisManager &FAM =
3384 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
3385 AnalysisGetter AG(FAM);
3387 SetVector<Function *> Functions;
3388 for (Function &F : M)
3389 Functions.insert(&F);
3391 CallGraphUpdater CGUpdater;
3392 BumpPtrAllocator Allocator;
3393 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3394 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3395 /* DeleteFns */ true, /* IsModulePass */ true)) {
3396 // FIXME: Think about passes we will preserve and add them here.
3397 return PreservedAnalyses::none();
3399 return PreservedAnalyses::all();
3402 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
3403 CGSCCAnalysisManager &AM,
3404 LazyCallGraph &CG,
3405 CGSCCUpdateResult &UR) {
3406 FunctionAnalysisManager &FAM =
3407 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
3408 AnalysisGetter AG(FAM);
3410 SetVector<Function *> Functions;
3411 for (LazyCallGraph::Node &N : C)
3412 Functions.insert(&N.getFunction());
3414 if (Functions.empty())
3415 return PreservedAnalyses::all();
3417 Module &M = *Functions.back()->getParent();
3418 CallGraphUpdater CGUpdater;
3419 CGUpdater.initialize(CG, C, AM, UR);
3420 BumpPtrAllocator Allocator;
3421 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3422 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3423 /* DeleteFns */ false,
3424 /* IsModulePass */ false)) {
3425 // FIXME: Think about passes we will preserve and add them here.
3426 PreservedAnalyses PA;
3427 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
3428 return PA;
3430 return PreservedAnalyses::all();
3433 namespace llvm {
3435 template <> struct GraphTraits<AADepGraphNode *> {
3436 using NodeRef = AADepGraphNode *;
3437 using DepTy = PointerIntPair<AADepGraphNode *, 1>;
3438 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
3440 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
3441 static NodeRef DepGetVal(DepTy &DT) { return DT.getPointer(); }
3443 using ChildIteratorType =
3444 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
3445 using ChildEdgeIteratorType = TinyPtrVector<DepTy>::iterator;
3447 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
3449 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
3452 template <>
3453 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
3454 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
3456 using nodes_iterator =
3457 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
3459 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
3461 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
3464 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
3465 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3467 static std::string getNodeLabel(const AADepGraphNode *Node,
3468 const AADepGraph *DG) {
3469 std::string AAString;
3470 raw_string_ostream O(AAString);
3471 Node->print(O);
3472 return AAString;
3476 } // end namespace llvm
3478 namespace {
3480 struct AttributorLegacyPass : public ModulePass {
3481 static char ID;
3483 AttributorLegacyPass() : ModulePass(ID) {
3484 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
3487 bool runOnModule(Module &M) override {
3488 if (skipModule(M))
3489 return false;
3491 AnalysisGetter AG;
3492 SetVector<Function *> Functions;
3493 for (Function &F : M)
3494 Functions.insert(&F);
3496 CallGraphUpdater CGUpdater;
3497 BumpPtrAllocator Allocator;
3498 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3499 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3500 /* DeleteFns*/ true,
3501 /* IsModulePass */ true);
3504 void getAnalysisUsage(AnalysisUsage &AU) const override {
3505 // FIXME: Think about passes we will preserve and add them here.
3506 AU.addRequired<TargetLibraryInfoWrapperPass>();
3510 struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
3511 static char ID;
3513 AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
3514 initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
3517 bool runOnSCC(CallGraphSCC &SCC) override {
3518 if (skipSCC(SCC))
3519 return false;
3521 SetVector<Function *> Functions;
3522 for (CallGraphNode *CGN : SCC)
3523 if (Function *Fn = CGN->getFunction())
3524 if (!Fn->isDeclaration())
3525 Functions.insert(Fn);
3527 if (Functions.empty())
3528 return false;
3530 AnalysisGetter AG;
3531 CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
3532 CallGraphUpdater CGUpdater;
3533 CGUpdater.initialize(CG, SCC);
3534 Module &M = *Functions.back()->getParent();
3535 BumpPtrAllocator Allocator;
3536 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3537 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3538 /* DeleteFns */ false,
3539 /* IsModulePass */ false);
3542 void getAnalysisUsage(AnalysisUsage &AU) const override {
3543 // FIXME: Think about passes we will preserve and add them here.
3544 AU.addRequired<TargetLibraryInfoWrapperPass>();
3545 CallGraphSCCPass::getAnalysisUsage(AU);
3549 } // end anonymous namespace
3551 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
3552 Pass *llvm::createAttributorCGSCCLegacyPass() {
3553 return new AttributorCGSCCLegacyPass();
3556 char AttributorLegacyPass::ID = 0;
3557 char AttributorCGSCCLegacyPass::ID = 0;
3559 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
3560 "Deduce and propagate attributes", false, false)
3561 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3562 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
3563 "Deduce and propagate attributes", false, false)
3564 INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
3565 "Deduce and propagate attributes (CGSCC pass)", false,
3566 false)
3567 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3568 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
3569 INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
3570 "Deduce and propagate attributes (CGSCC pass)", false,
3571 false)