[TableGen] Fix validateOperandClass for non Phyical Reg (#118146)
[llvm-project.git] / llvm / lib / Transforms / IPO / Attributor.cpp
blob116f419129a239b007a54fc3aba6a4e91bfe2af2
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass that deduces and/or propagates
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/Attributor.h"
18 #include "llvm/ADT/ArrayRef.h"
19 #include "llvm/ADT/PointerIntPair.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/CallGraph.h"
25 #include "llvm/Analysis/InlineCost.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MustExecute.h"
28 #include "llvm/IR/AttributeMask.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/Constant.h"
31 #include "llvm/IR/ConstantFold.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/GlobalValue.h"
35 #include "llvm/IR/GlobalVariable.h"
36 #include "llvm/IR/Instruction.h"
37 #include "llvm/IR/Instructions.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/LLVMContext.h"
40 #include "llvm/IR/ValueHandle.h"
41 #include "llvm/Support/Casting.h"
42 #include "llvm/Support/CommandLine.h"
43 #include "llvm/Support/Debug.h"
44 #include "llvm/Support/DebugCounter.h"
45 #include "llvm/Support/FileSystem.h"
46 #include "llvm/Support/GraphWriter.h"
47 #include "llvm/Support/ModRef.h"
48 #include "llvm/Support/raw_ostream.h"
49 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
50 #include "llvm/Transforms/Utils/Cloning.h"
51 #include "llvm/Transforms/Utils/Local.h"
52 #include <cstdint>
53 #include <memory>
55 #ifdef EXPENSIVE_CHECKS
56 #include "llvm/IR/Verifier.h"
57 #endif
59 #include <cassert>
60 #include <optional>
61 #include <string>
63 using namespace llvm;
65 #define DEBUG_TYPE "attributor"
66 #define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
68 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
69 "Determine what attributes are manifested in the IR");
71 STATISTIC(NumFnDeleted, "Number of function deleted");
72 STATISTIC(NumFnWithExactDefinition,
73 "Number of functions with exact definitions");
74 STATISTIC(NumFnWithoutExactDefinition,
75 "Number of functions without exact definitions");
76 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
77 STATISTIC(NumAttributesTimedOut,
78 "Number of abstract attributes timed out before fixpoint");
79 STATISTIC(NumAttributesValidFixpoint,
80 "Number of abstract attributes in a valid fixpoint state");
81 STATISTIC(NumAttributesManifested,
82 "Number of abstract attributes manifested in IR");
84 // TODO: Determine a good default value.
86 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
87 // (when run with the first 5 abstract attributes). The results also indicate
88 // that we never reach 32 iterations but always find a fixpoint sooner.
90 // This will become more evolved once we perform two interleaved fixpoint
91 // iterations: bottom-up and top-down.
92 static cl::opt<unsigned>
93 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
94 cl::desc("Maximal number of fixpoint iterations."),
95 cl::init(32));
97 static cl::opt<unsigned>
98 MaxSpecializationPerCB("attributor-max-specializations-per-call-base",
99 cl::Hidden,
100 cl::desc("Maximal number of callees specialized for "
101 "a call base"),
102 cl::init(UINT32_MAX));
104 static cl::opt<unsigned, true> MaxInitializationChainLengthX(
105 "attributor-max-initialization-chain-length", cl::Hidden,
106 cl::desc(
107 "Maximal number of chained initializations (to avoid stack overflows)"),
108 cl::location(MaxInitializationChainLength), cl::init(1024));
109 unsigned llvm::MaxInitializationChainLength;
111 static cl::opt<bool> AnnotateDeclarationCallSites(
112 "attributor-annotate-decl-cs", cl::Hidden,
113 cl::desc("Annotate call sites of function declarations."), cl::init(false));
115 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
116 cl::init(true), cl::Hidden);
118 static cl::opt<bool>
119 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
120 cl::desc("Allow the Attributor to create shallow "
121 "wrappers for non-exact definitions."),
122 cl::init(false));
124 static cl::opt<bool>
125 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
126 cl::desc("Allow the Attributor to use IP information "
127 "derived from non-exact functions via cloning"),
128 cl::init(false));
130 // These options can only used for debug builds.
131 #ifndef NDEBUG
132 static cl::list<std::string>
133 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
134 cl::desc("Comma separated list of attribute names that are "
135 "allowed to be seeded."),
136 cl::CommaSeparated);
138 static cl::list<std::string> FunctionSeedAllowList(
139 "attributor-function-seed-allow-list", cl::Hidden,
140 cl::desc("Comma separated list of function names that are "
141 "allowed to be seeded."),
142 cl::CommaSeparated);
143 #endif
145 static cl::opt<bool>
146 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
147 cl::desc("Dump the dependency graph to dot files."),
148 cl::init(false));
150 static cl::opt<std::string> DepGraphDotFileNamePrefix(
151 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
152 cl::desc("The prefix used for the CallGraph dot file names."));
154 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
155 cl::desc("View the dependency graph."),
156 cl::init(false));
158 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
159 cl::desc("Print attribute dependencies"),
160 cl::init(false));
162 static cl::opt<bool> EnableCallSiteSpecific(
163 "attributor-enable-call-site-specific-deduction", cl::Hidden,
164 cl::desc("Allow the Attributor to do call site specific analysis"),
165 cl::init(false));
167 static cl::opt<bool>
168 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
169 cl::desc("Print Attributor's internal call graph"),
170 cl::init(false));
172 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
173 cl::Hidden,
174 cl::desc("Try to simplify all loads."),
175 cl::init(true));
177 static cl::opt<bool> CloseWorldAssumption(
178 "attributor-assume-closed-world", cl::Hidden,
179 cl::desc("Should a closed world be assumed, or not. Default if not set."));
181 /// Logic operators for the change status enum class.
183 ///{
184 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
185 return L == ChangeStatus::CHANGED ? L : R;
187 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
188 L = L | R;
189 return L;
191 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
192 return L == ChangeStatus::UNCHANGED ? L : R;
194 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
195 L = L & R;
196 return L;
198 ///}
200 bool AA::isGPU(const Module &M) {
201 Triple T(M.getTargetTriple());
202 return T.isAMDGPU() || T.isNVPTX();
205 bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
206 const AbstractAttribute &QueryingAA) {
207 // We are looking for volatile instructions or non-relaxed atomics.
208 if (const auto *CB = dyn_cast<CallBase>(&I)) {
209 if (CB->hasFnAttr(Attribute::NoSync))
210 return true;
212 // Non-convergent and readnone imply nosync.
213 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
214 return true;
216 if (AANoSync::isNoSyncIntrinsic(&I))
217 return true;
219 bool IsKnownNoSync;
220 return AA::hasAssumedIRAttr<Attribute::NoSync>(
221 A, &QueryingAA, IRPosition::callsite_function(*CB),
222 DepClassTy::OPTIONAL, IsKnownNoSync);
225 if (!I.mayReadOrWriteMemory())
226 return true;
228 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
231 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
232 const Value &V, bool ForAnalysisOnly) {
233 // TODO: See the AAInstanceInfo class comment.
234 if (!ForAnalysisOnly)
235 return false;
236 auto *InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
237 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
238 return InstanceInfoAA && InstanceInfoAA->isAssumedUniqueForAnalysis();
241 Constant *
242 AA::getInitialValueForObj(Attributor &A, const AbstractAttribute &QueryingAA,
243 Value &Obj, Type &Ty, const TargetLibraryInfo *TLI,
244 const DataLayout &DL, AA::RangeTy *RangePtr) {
245 if (isa<AllocaInst>(Obj))
246 return UndefValue::get(&Ty);
247 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
248 return Init;
249 auto *GV = dyn_cast<GlobalVariable>(&Obj);
250 if (!GV)
251 return nullptr;
253 bool UsedAssumedInformation = false;
254 Constant *Initializer = nullptr;
255 if (A.hasGlobalVariableSimplificationCallback(*GV)) {
256 auto AssumedGV = A.getAssumedInitializerFromCallBack(
257 *GV, &QueryingAA, UsedAssumedInformation);
258 Initializer = *AssumedGV;
259 if (!Initializer)
260 return nullptr;
261 } else {
262 if (!GV->hasLocalLinkage() &&
263 (GV->isInterposable() || !(GV->isConstant() && GV->hasInitializer())))
264 return nullptr;
265 if (!GV->hasInitializer())
266 return UndefValue::get(&Ty);
268 if (!Initializer)
269 Initializer = GV->getInitializer();
272 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
273 APInt Offset = APInt(64, RangePtr->Offset);
274 return ConstantFoldLoadFromConst(Initializer, &Ty, Offset, DL);
277 return ConstantFoldLoadFromUniformValue(Initializer, &Ty, DL);
280 bool AA::isValidInScope(const Value &V, const Function *Scope) {
281 if (isa<Constant>(V))
282 return true;
283 if (auto *I = dyn_cast<Instruction>(&V))
284 return I->getFunction() == Scope;
285 if (auto *A = dyn_cast<Argument>(&V))
286 return A->getParent() == Scope;
287 return false;
290 bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
291 InformationCache &InfoCache) {
292 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
293 return true;
294 const Function *Scope = nullptr;
295 const Instruction *CtxI = VAC.getCtxI();
296 if (CtxI)
297 Scope = CtxI->getFunction();
298 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
299 return A->getParent() == Scope;
300 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
301 if (I->getFunction() == Scope) {
302 if (const DominatorTree *DT =
303 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
304 *Scope))
305 return DT->dominates(I, CtxI);
306 // Local dominance check mostly for the old PM passes.
307 if (CtxI && I->getParent() == CtxI->getParent())
308 return llvm::any_of(
309 make_range(I->getIterator(), I->getParent()->end()),
310 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
313 return false;
316 Value *AA::getWithType(Value &V, Type &Ty) {
317 if (V.getType() == &Ty)
318 return &V;
319 if (isa<PoisonValue>(V))
320 return PoisonValue::get(&Ty);
321 if (isa<UndefValue>(V))
322 return UndefValue::get(&Ty);
323 if (auto *C = dyn_cast<Constant>(&V)) {
324 if (C->isNullValue())
325 return Constant::getNullValue(&Ty);
326 if (C->getType()->isPointerTy() && Ty.isPointerTy())
327 return ConstantExpr::getPointerCast(C, &Ty);
328 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
329 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
330 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
331 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
332 return ConstantFoldCastInstruction(Instruction::FPTrunc, C, &Ty);
335 return nullptr;
338 std::optional<Value *>
339 AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
340 const std::optional<Value *> &B,
341 Type *Ty) {
342 if (A == B)
343 return A;
344 if (!B)
345 return A;
346 if (*B == nullptr)
347 return nullptr;
348 if (!A)
349 return Ty ? getWithType(**B, *Ty) : nullptr;
350 if (*A == nullptr)
351 return nullptr;
352 if (!Ty)
353 Ty = (*A)->getType();
354 if (isa_and_nonnull<UndefValue>(*A))
355 return getWithType(**B, *Ty);
356 if (isa<UndefValue>(*B))
357 return A;
358 if (*A && *B && *A == getWithType(**B, *Ty))
359 return A;
360 return nullptr;
363 template <bool IsLoad, typename Ty>
364 static bool getPotentialCopiesOfMemoryValue(
365 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
366 SmallSetVector<Instruction *, 4> *PotentialValueOrigins,
367 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
368 bool OnlyExact) {
369 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
370 << " (only exact: " << OnlyExact << ")\n";);
372 Value &Ptr = *I.getPointerOperand();
373 // Containers to remember the pointer infos and new copies while we are not
374 // sure that we can find all of them. If we abort we want to avoid spurious
375 // dependences and potential copies in the provided container.
376 SmallVector<const AAPointerInfo *> PIs;
377 SmallSetVector<Value *, 8> NewCopies;
378 SmallSetVector<Instruction *, 8> NewCopyOrigins;
380 const auto *TLI =
381 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
383 auto Pred = [&](Value &Obj) {
384 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
385 if (isa<UndefValue>(&Obj))
386 return true;
387 if (isa<ConstantPointerNull>(&Obj)) {
388 // A null pointer access can be undefined but any offset from null may
389 // be OK. We do not try to optimize the latter.
390 if (!NullPointerIsDefined(I.getFunction(),
391 Ptr.getType()->getPointerAddressSpace()) &&
392 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
393 AA::Interprocedural) == &Obj)
394 return true;
395 LLVM_DEBUG(
396 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
397 return false;
399 // TODO: Use assumed noalias return.
400 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
401 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
402 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
403 << "\n";);
404 return false;
406 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
407 if (!GV->hasLocalLinkage() &&
408 !(GV->isConstant() && GV->hasInitializer())) {
409 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
410 "linkage, not supported yet: "
411 << Obj << "\n";);
412 return false;
415 bool NullOnly = true;
416 bool NullRequired = false;
417 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
418 bool IsExact) {
419 if (!V || *V == nullptr)
420 NullOnly = false;
421 else if (isa<UndefValue>(*V))
422 /* No op */;
423 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
424 NullRequired = !IsExact;
425 else
426 NullOnly = false;
429 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
430 Value &V) {
431 Value *AdjV = AA::getWithType(V, *I.getType());
432 if (!AdjV) {
433 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
434 "cannot be converted to read type: "
435 << *Acc.getRemoteInst() << " : " << *I.getType()
436 << "\n";);
438 return AdjV;
441 auto SkipCB = [&](const AAPointerInfo::Access &Acc) {
442 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
443 return true;
444 if (IsLoad) {
445 if (Acc.isWrittenValueYetUndetermined())
446 return true;
447 if (PotentialValueOrigins && !isa<AssumeInst>(Acc.getRemoteInst()))
448 return false;
449 if (!Acc.isWrittenValueUnknown())
450 if (Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue()))
451 if (NewCopies.count(V)) {
452 NewCopyOrigins.insert(Acc.getRemoteInst());
453 return true;
455 if (auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst()))
456 if (Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand()))
457 if (NewCopies.count(V)) {
458 NewCopyOrigins.insert(Acc.getRemoteInst());
459 return true;
462 return false;
465 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
466 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
467 return true;
468 if (IsLoad && Acc.isWrittenValueYetUndetermined())
469 return true;
470 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
471 if (OnlyExact && !IsExact && !NullOnly &&
472 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
473 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
474 << ", abort!\n");
475 return false;
477 if (NullRequired && !NullOnly) {
478 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
479 "one, however found non-null one: "
480 << *Acc.getRemoteInst() << ", abort!\n");
481 return false;
483 if (IsLoad) {
484 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
485 if (!Acc.isWrittenValueUnknown()) {
486 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
487 if (!V)
488 return false;
489 NewCopies.insert(V);
490 if (PotentialValueOrigins)
491 NewCopyOrigins.insert(Acc.getRemoteInst());
492 return true;
494 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
495 if (!SI) {
496 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
497 "instruction not supported yet: "
498 << *Acc.getRemoteInst() << "\n";);
499 return false;
501 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
502 if (!V)
503 return false;
504 NewCopies.insert(V);
505 if (PotentialValueOrigins)
506 NewCopyOrigins.insert(SI);
507 } else {
508 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
509 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
510 if (!LI && OnlyExact) {
511 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
512 "instruction not supported yet: "
513 << *Acc.getRemoteInst() << "\n";);
514 return false;
516 NewCopies.insert(Acc.getRemoteInst());
518 return true;
521 // If the value has been written to we don't need the initial value of the
522 // object.
523 bool HasBeenWrittenTo = false;
525 AA::RangeTy Range;
526 auto *PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
527 DepClassTy::NONE);
528 if (!PI || !PI->forallInterferingAccesses(
529 A, QueryingAA, I,
530 /* FindInterferingWrites */ IsLoad,
531 /* FindInterferingReads */ !IsLoad, CheckAccess,
532 HasBeenWrittenTo, Range, SkipCB)) {
533 LLVM_DEBUG(
534 dbgs()
535 << "Failed to verify all interfering accesses for underlying object: "
536 << Obj << "\n");
537 return false;
540 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
541 const DataLayout &DL = A.getDataLayout();
542 Value *InitialValue = AA::getInitialValueForObj(
543 A, QueryingAA, Obj, *I.getType(), TLI, DL, &Range);
544 if (!InitialValue) {
545 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
546 "underlying object, abort!\n");
547 return false;
549 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
550 if (NullRequired && !NullOnly) {
551 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
552 "null or undef, abort!\n");
553 return false;
556 NewCopies.insert(InitialValue);
557 if (PotentialValueOrigins)
558 NewCopyOrigins.insert(nullptr);
561 PIs.push_back(PI);
563 return true;
566 const auto *AAUO = A.getAAFor<AAUnderlyingObjects>(
567 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
568 if (!AAUO || !AAUO->forallUnderlyingObjects(Pred)) {
569 LLVM_DEBUG(
570 dbgs() << "Underlying objects stored into could not be determined\n";);
571 return false;
574 // Only if we were successful collection all potential copies we record
575 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
576 // given PotentialCopies container.
577 for (const auto *PI : PIs) {
578 if (!PI->getState().isAtFixpoint())
579 UsedAssumedInformation = true;
580 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
582 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
583 if (PotentialValueOrigins)
584 PotentialValueOrigins->insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
586 return true;
589 bool AA::getPotentiallyLoadedValues(
590 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
591 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
592 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
593 bool OnlyExact) {
594 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
595 A, LI, PotentialValues, &PotentialValueOrigins, QueryingAA,
596 UsedAssumedInformation, OnlyExact);
599 bool AA::getPotentialCopiesOfStoredValue(
600 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
601 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
602 bool OnlyExact) {
603 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
604 A, SI, PotentialCopies, nullptr, QueryingAA, UsedAssumedInformation,
605 OnlyExact);
608 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
609 const AbstractAttribute &QueryingAA,
610 bool RequireReadNone, bool &IsKnown) {
611 if (RequireReadNone) {
612 if (AA::hasAssumedIRAttr<Attribute::ReadNone>(
613 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
614 /* IgnoreSubsumingPositions */ true))
615 return true;
616 } else if (AA::hasAssumedIRAttr<Attribute::ReadOnly>(
617 A, &QueryingAA, IRP, DepClassTy::OPTIONAL, IsKnown,
618 /* IgnoreSubsumingPositions */ true))
619 return true;
621 IRPosition::Kind Kind = IRP.getPositionKind();
622 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
623 const auto *MemLocAA =
624 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
625 if (MemLocAA && MemLocAA->isAssumedReadNone()) {
626 IsKnown = MemLocAA->isKnownReadNone();
627 if (!IsKnown)
628 A.recordDependence(*MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
629 return true;
633 const auto *MemBehaviorAA =
634 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
635 if (MemBehaviorAA &&
636 (MemBehaviorAA->isAssumedReadNone() ||
637 (!RequireReadNone && MemBehaviorAA->isAssumedReadOnly()))) {
638 IsKnown = RequireReadNone ? MemBehaviorAA->isKnownReadNone()
639 : MemBehaviorAA->isKnownReadOnly();
640 if (!IsKnown)
641 A.recordDependence(*MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
642 return true;
645 return false;
648 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
649 const AbstractAttribute &QueryingAA, bool &IsKnown) {
650 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
651 /* RequireReadNone */ false, IsKnown);
653 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
654 const AbstractAttribute &QueryingAA, bool &IsKnown) {
655 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
656 /* RequireReadNone */ true, IsKnown);
659 static bool
660 isPotentiallyReachable(Attributor &A, const Instruction &FromI,
661 const Instruction *ToI, const Function &ToFn,
662 const AbstractAttribute &QueryingAA,
663 const AA::InstExclusionSetTy *ExclusionSet,
664 std::function<bool(const Function &F)> GoBackwardsCB) {
665 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
666 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
667 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
668 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
669 << "]\n";
670 if (ExclusionSet)
671 for (auto *ES : *ExclusionSet)
672 dbgs() << *ES << "\n";
675 // We know kernels (generally) cannot be called from within the module. Thus,
676 // for reachability we would need to step back from a kernel which would allow
677 // us to reach anything anyway. Even if a kernel is invoked from another
678 // kernel, values like allocas and shared memory are not accessible. We
679 // implicitly check for this situation to avoid costly lookups.
680 if (GoBackwardsCB && &ToFn != FromI.getFunction() &&
681 !GoBackwardsCB(*FromI.getFunction()) && ToFn.hasFnAttribute("kernel") &&
682 FromI.getFunction()->hasFnAttribute("kernel")) {
683 LLVM_DEBUG(dbgs() << "[AA] assume kernel cannot be reached from within the "
684 "module; success\n";);
685 return false;
688 // If we can go arbitrarily backwards we will eventually reach an entry point
689 // that can reach ToI. Only if a set of blocks through which we cannot go is
690 // provided, or once we track internal functions not accessible from the
691 // outside, it makes sense to perform backwards analysis in the absence of a
692 // GoBackwardsCB.
693 if (!GoBackwardsCB && !ExclusionSet) {
694 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
695 << " is not checked backwards and does not have an "
696 "exclusion set, abort\n");
697 return true;
700 SmallPtrSet<const Instruction *, 8> Visited;
701 SmallVector<const Instruction *> Worklist;
702 Worklist.push_back(&FromI);
704 while (!Worklist.empty()) {
705 const Instruction *CurFromI = Worklist.pop_back_val();
706 if (!Visited.insert(CurFromI).second)
707 continue;
709 const Function *FromFn = CurFromI->getFunction();
710 if (FromFn == &ToFn) {
711 if (!ToI)
712 return true;
713 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
714 << " intraprocedurally\n");
715 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
716 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
717 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
718 A, *CurFromI, *ToI, ExclusionSet);
719 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
720 << (Result ? "can potentially " : "cannot ") << "reach "
721 << *ToI << " [Intra]\n");
722 if (Result)
723 return true;
726 bool Result = true;
727 if (!ToFn.isDeclaration() && ToI) {
728 const auto *ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
729 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
730 const Instruction &EntryI = ToFn.getEntryBlock().front();
731 Result = !ToReachabilityAA || ToReachabilityAA->isAssumedReachable(
732 A, EntryI, *ToI, ExclusionSet);
733 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
734 << " " << (Result ? "can potentially " : "cannot ")
735 << "reach @" << *ToI << " [ToFn]\n");
738 if (Result) {
739 // The entry of the ToFn can reach the instruction ToI. If the current
740 // instruction is already known to reach the ToFn.
741 const auto *FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
742 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
743 Result = !FnReachabilityAA || FnReachabilityAA->instructionCanReach(
744 A, *CurFromI, ToFn, ExclusionSet);
745 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
746 << " " << (Result ? "can potentially " : "cannot ")
747 << "reach @" << ToFn.getName() << " [FromFn]\n");
748 if (Result)
749 return true;
752 // TODO: Check assumed nounwind.
753 const auto *ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
754 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
755 auto ReturnInstCB = [&](Instruction &Ret) {
756 bool Result = !ReachabilityAA || ReachabilityAA->isAssumedReachable(
757 A, *CurFromI, Ret, ExclusionSet);
758 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
759 << (Result ? "can potentially " : "cannot ") << "reach "
760 << Ret << " [Intra]\n");
761 return !Result;
764 // Check if we can reach returns.
765 bool UsedAssumedInformation = false;
766 if (A.checkForAllInstructions(ReturnInstCB, FromFn, &QueryingAA,
767 {Instruction::Ret}, UsedAssumedInformation)) {
768 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
769 continue;
772 if (!GoBackwardsCB) {
773 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
774 << " is not checked backwards, abort\n");
775 return true;
778 // If we do not go backwards from the FromFn we are done here and so far we
779 // could not find a way to reach ToFn/ToI.
780 if (!GoBackwardsCB(*FromFn))
781 continue;
783 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
784 << FromFn->getName() << "\n");
786 auto CheckCallSite = [&](AbstractCallSite ACS) {
787 CallBase *CB = ACS.getInstruction();
788 if (!CB)
789 return false;
791 if (isa<InvokeInst>(CB))
792 return false;
794 Instruction *Inst = CB->getNextNonDebugInstruction();
795 Worklist.push_back(Inst);
796 return true;
799 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
800 /* RequireAllCallSites */ true,
801 &QueryingAA, UsedAssumedInformation);
802 if (Result) {
803 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
804 << " in @" << FromFn->getName()
805 << " failed, give up\n");
806 return true;
809 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
810 << " in @" << FromFn->getName()
811 << " worklist size is: " << Worklist.size() << "\n");
813 return false;
816 bool AA::isPotentiallyReachable(
817 Attributor &A, const Instruction &FromI, const Instruction &ToI,
818 const AbstractAttribute &QueryingAA,
819 const AA::InstExclusionSetTy *ExclusionSet,
820 std::function<bool(const Function &F)> GoBackwardsCB) {
821 const Function *ToFn = ToI.getFunction();
822 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
823 ExclusionSet, GoBackwardsCB);
826 bool AA::isPotentiallyReachable(
827 Attributor &A, const Instruction &FromI, const Function &ToFn,
828 const AbstractAttribute &QueryingAA,
829 const AA::InstExclusionSetTy *ExclusionSet,
830 std::function<bool(const Function &F)> GoBackwardsCB) {
831 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
832 ExclusionSet, GoBackwardsCB);
835 bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
836 const AbstractAttribute &QueryingAA) {
837 if (isa<UndefValue>(Obj))
838 return true;
839 if (isa<AllocaInst>(Obj)) {
840 InformationCache &InfoCache = A.getInfoCache();
841 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
842 LLVM_DEBUG(
843 dbgs() << "[AA] Object '" << Obj
844 << "' is thread local; stack objects are thread local.\n");
845 return true;
847 bool IsKnownNoCapture;
848 bool IsAssumedNoCapture = AA::hasAssumedIRAttr<Attribute::NoCapture>(
849 A, &QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL,
850 IsKnownNoCapture);
851 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
852 << (IsAssumedNoCapture ? "" : "not") << " thread local; "
853 << (IsAssumedNoCapture ? "non-" : "")
854 << "captured stack object.\n");
855 return IsAssumedNoCapture;
857 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
858 if (GV->isConstant()) {
859 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
860 << "' is thread local; constant global\n");
861 return true;
863 if (GV->isThreadLocal()) {
864 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
865 << "' is thread local; thread local global\n");
866 return true;
870 if (A.getInfoCache().targetIsGPU()) {
871 if (Obj.getType()->getPointerAddressSpace() ==
872 (int)AA::GPUAddressSpace::Local) {
873 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
874 << "' is thread local; GPU local memory\n");
875 return true;
877 if (Obj.getType()->getPointerAddressSpace() ==
878 (int)AA::GPUAddressSpace::Constant) {
879 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
880 << "' is thread local; GPU constant memory\n");
881 return true;
885 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
886 return false;
889 bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
890 const AbstractAttribute &QueryingAA) {
891 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
892 return false;
894 SmallSetVector<const Value *, 8> Ptrs;
896 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
897 if (!Loc || !Loc->Ptr) {
898 LLVM_DEBUG(
899 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
900 return false;
902 Ptrs.insert(Loc->Ptr);
903 return true;
906 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
907 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
908 return true;
909 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
910 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
911 return true;
912 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
913 return true;
915 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
918 bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
919 ArrayRef<const Value *> Ptrs,
920 const AbstractAttribute &QueryingAA,
921 const Instruction *CtxI) {
922 for (const Value *Ptr : Ptrs) {
923 if (!Ptr) {
924 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
925 return true;
928 auto Pred = [&](Value &Obj) {
929 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
930 return true;
931 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
932 << "'; -> requires barrier\n");
933 return false;
936 const auto *UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
937 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
938 if (!UnderlyingObjsAA || !UnderlyingObjsAA->forallUnderlyingObjects(Pred))
939 return true;
941 return false;
944 /// Return true if \p New is equal or worse than \p Old.
945 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
946 if (!Old.isIntAttribute())
947 return true;
949 return Old.getValueAsInt() >= New.getValueAsInt();
952 /// Return true if the information provided by \p Attr was added to the
953 /// attribute set \p AttrSet. This is only the case if it was not already
954 /// present in \p AttrSet.
955 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
956 AttributeSet AttrSet, bool ForceReplace,
957 AttrBuilder &AB) {
959 if (Attr.isEnumAttribute()) {
960 Attribute::AttrKind Kind = Attr.getKindAsEnum();
961 if (AttrSet.hasAttribute(Kind))
962 return false;
963 AB.addAttribute(Kind);
964 return true;
966 if (Attr.isStringAttribute()) {
967 StringRef Kind = Attr.getKindAsString();
968 if (AttrSet.hasAttribute(Kind)) {
969 if (!ForceReplace)
970 return false;
972 AB.addAttribute(Kind, Attr.getValueAsString());
973 return true;
975 if (Attr.isIntAttribute()) {
976 Attribute::AttrKind Kind = Attr.getKindAsEnum();
977 if (!ForceReplace && Kind == Attribute::Memory) {
978 MemoryEffects ME = Attr.getMemoryEffects() & AttrSet.getMemoryEffects();
979 if (ME == AttrSet.getMemoryEffects())
980 return false;
981 AB.addMemoryAttr(ME);
982 return true;
984 if (AttrSet.hasAttribute(Kind)) {
985 if (!ForceReplace && isEqualOrWorse(Attr, AttrSet.getAttribute(Kind)))
986 return false;
988 AB.addAttribute(Attr);
989 return true;
992 llvm_unreachable("Expected enum or string attribute!");
995 Argument *IRPosition::getAssociatedArgument() const {
996 if (getPositionKind() == IRP_ARGUMENT)
997 return cast<Argument>(&getAnchorValue());
999 // Not an Argument and no argument number means this is not a call site
1000 // argument, thus we cannot find a callback argument to return.
1001 int ArgNo = getCallSiteArgNo();
1002 if (ArgNo < 0)
1003 return nullptr;
1005 // Use abstract call sites to make the connection between the call site
1006 // values and the ones in callbacks. If a callback was found that makes use
1007 // of the underlying call site operand, we want the corresponding callback
1008 // callee argument and not the direct callee argument.
1009 std::optional<Argument *> CBCandidateArg;
1010 SmallVector<const Use *, 4> CallbackUses;
1011 const auto &CB = cast<CallBase>(getAnchorValue());
1012 AbstractCallSite::getCallbackUses(CB, CallbackUses);
1013 for (const Use *U : CallbackUses) {
1014 AbstractCallSite ACS(U);
1015 assert(ACS && ACS.isCallbackCall());
1016 if (!ACS.getCalledFunction())
1017 continue;
1019 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
1021 // Test if the underlying call site operand is argument number u of the
1022 // callback callee.
1023 if (ACS.getCallArgOperandNo(u) != ArgNo)
1024 continue;
1026 assert(ACS.getCalledFunction()->arg_size() > u &&
1027 "ACS mapped into var-args arguments!");
1028 if (CBCandidateArg) {
1029 CBCandidateArg = nullptr;
1030 break;
1032 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
1036 // If we found a unique callback candidate argument, return it.
1037 if (CBCandidateArg && *CBCandidateArg)
1038 return *CBCandidateArg;
1040 // If no callbacks were found, or none used the underlying call site operand
1041 // exclusively, use the direct callee argument if available.
1042 auto *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
1043 if (Callee && Callee->arg_size() > unsigned(ArgNo))
1044 return Callee->getArg(ArgNo);
1046 return nullptr;
1049 ChangeStatus AbstractAttribute::update(Attributor &A) {
1050 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1051 if (getState().isAtFixpoint())
1052 return HasChanged;
1054 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
1056 HasChanged = updateImpl(A);
1058 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
1059 << "\n");
1061 return HasChanged;
1064 Attributor::Attributor(SetVector<Function *> &Functions,
1065 InformationCache &InfoCache,
1066 AttributorConfig Configuration)
1067 : Allocator(InfoCache.Allocator), Functions(Functions),
1068 InfoCache(InfoCache), Configuration(Configuration) {
1069 if (!isClosedWorldModule())
1070 return;
1071 for (Function *Fn : Functions)
1072 if (Fn->hasAddressTaken(/*PutOffender=*/nullptr,
1073 /*IgnoreCallbackUses=*/false,
1074 /*IgnoreAssumeLikeCalls=*/true,
1075 /*IgnoreLLVMUsed=*/true,
1076 /*IgnoreARCAttachedCall=*/false,
1077 /*IgnoreCastedDirectCall=*/true))
1078 InfoCache.IndirectlyCallableFunctions.push_back(Fn);
1081 bool Attributor::getAttrsFromAssumes(const IRPosition &IRP,
1082 Attribute::AttrKind AK,
1083 SmallVectorImpl<Attribute> &Attrs) {
1084 assert(IRP.getPositionKind() != IRPosition::IRP_INVALID &&
1085 "Did expect a valid position!");
1086 MustBeExecutedContextExplorer *Explorer =
1087 getInfoCache().getMustBeExecutedContextExplorer();
1088 if (!Explorer)
1089 return false;
1091 Value &AssociatedValue = IRP.getAssociatedValue();
1093 const Assume2KnowledgeMap &A2K =
1094 getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1096 // Check if we found any potential assume use, if not we don't need to create
1097 // explorer iterators.
1098 if (A2K.empty())
1099 return false;
1101 LLVMContext &Ctx = AssociatedValue.getContext();
1102 unsigned AttrsSize = Attrs.size();
1103 auto EIt = Explorer->begin(IRP.getCtxI()),
1104 EEnd = Explorer->end(IRP.getCtxI());
1105 for (const auto &It : A2K)
1106 if (Explorer->findInContextOf(It.first, EIt, EEnd))
1107 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1108 return AttrsSize != Attrs.size();
1111 template <typename DescTy>
1112 ChangeStatus
1113 Attributor::updateAttrMap(const IRPosition &IRP, ArrayRef<DescTy> AttrDescs,
1114 function_ref<bool(const DescTy &, AttributeSet,
1115 AttributeMask &, AttrBuilder &)>
1116 CB) {
1117 if (AttrDescs.empty())
1118 return ChangeStatus::UNCHANGED;
1119 switch (IRP.getPositionKind()) {
1120 case IRPosition::IRP_FLOAT:
1121 case IRPosition::IRP_INVALID:
1122 return ChangeStatus::UNCHANGED;
1123 default:
1124 break;
1127 AttributeList AL;
1128 Value *AttrListAnchor = IRP.getAttrListAnchor();
1129 auto It = AttrsMap.find(AttrListAnchor);
1130 if (It == AttrsMap.end())
1131 AL = IRP.getAttrList();
1132 else
1133 AL = It->getSecond();
1135 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1136 auto AttrIdx = IRP.getAttrIdx();
1137 AttributeSet AS = AL.getAttributes(AttrIdx);
1138 AttributeMask AM;
1139 AttrBuilder AB(Ctx);
1141 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1142 for (const DescTy &AttrDesc : AttrDescs)
1143 if (CB(AttrDesc, AS, AM, AB))
1144 HasChanged = ChangeStatus::CHANGED;
1146 if (HasChanged == ChangeStatus::UNCHANGED)
1147 return ChangeStatus::UNCHANGED;
1149 AL = AL.removeAttributesAtIndex(Ctx, AttrIdx, AM);
1150 AL = AL.addAttributesAtIndex(Ctx, AttrIdx, AB);
1151 AttrsMap[AttrListAnchor] = AL;
1152 return ChangeStatus::CHANGED;
1155 bool Attributor::hasAttr(const IRPosition &IRP,
1156 ArrayRef<Attribute::AttrKind> AttrKinds,
1157 bool IgnoreSubsumingPositions,
1158 Attribute::AttrKind ImpliedAttributeKind) {
1159 bool Implied = false;
1160 bool HasAttr = false;
1161 auto HasAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1162 AttributeMask &, AttrBuilder &) {
1163 if (AttrSet.hasAttribute(Kind)) {
1164 Implied |= Kind != ImpliedAttributeKind;
1165 HasAttr = true;
1167 return false;
1169 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1170 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, HasAttrCB);
1171 if (HasAttr)
1172 break;
1173 // The first position returned by the SubsumingPositionIterator is
1174 // always the position itself. If we ignore subsuming positions we
1175 // are done after the first iteration.
1176 if (IgnoreSubsumingPositions)
1177 break;
1178 Implied = true;
1180 if (!HasAttr) {
1181 Implied = true;
1182 SmallVector<Attribute> Attrs;
1183 for (Attribute::AttrKind AK : AttrKinds)
1184 if (getAttrsFromAssumes(IRP, AK, Attrs)) {
1185 HasAttr = true;
1186 break;
1190 // Check if we should manifest the implied attribute kind at the IRP.
1191 if (ImpliedAttributeKind != Attribute::None && HasAttr && Implied)
1192 manifestAttrs(IRP, {Attribute::get(IRP.getAnchorValue().getContext(),
1193 ImpliedAttributeKind)});
1194 return HasAttr;
1197 void Attributor::getAttrs(const IRPosition &IRP,
1198 ArrayRef<Attribute::AttrKind> AttrKinds,
1199 SmallVectorImpl<Attribute> &Attrs,
1200 bool IgnoreSubsumingPositions) {
1201 auto CollectAttrCB = [&](const Attribute::AttrKind &Kind,
1202 AttributeSet AttrSet, AttributeMask &,
1203 AttrBuilder &) {
1204 if (AttrSet.hasAttribute(Kind))
1205 Attrs.push_back(AttrSet.getAttribute(Kind));
1206 return false;
1208 for (const IRPosition &EquivIRP : SubsumingPositionIterator(IRP)) {
1209 updateAttrMap<Attribute::AttrKind>(EquivIRP, AttrKinds, CollectAttrCB);
1210 // The first position returned by the SubsumingPositionIterator is
1211 // always the position itself. If we ignore subsuming positions we
1212 // are done after the first iteration.
1213 if (IgnoreSubsumingPositions)
1214 break;
1216 for (Attribute::AttrKind AK : AttrKinds)
1217 getAttrsFromAssumes(IRP, AK, Attrs);
1220 ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1221 ArrayRef<Attribute::AttrKind> AttrKinds) {
1222 auto RemoveAttrCB = [&](const Attribute::AttrKind &Kind, AttributeSet AttrSet,
1223 AttributeMask &AM, AttrBuilder &) {
1224 if (!AttrSet.hasAttribute(Kind))
1225 return false;
1226 AM.addAttribute(Kind);
1227 return true;
1229 return updateAttrMap<Attribute::AttrKind>(IRP, AttrKinds, RemoveAttrCB);
1232 ChangeStatus Attributor::removeAttrs(const IRPosition &IRP,
1233 ArrayRef<StringRef> Attrs) {
1234 auto RemoveAttrCB = [&](StringRef Attr, AttributeSet AttrSet,
1235 AttributeMask &AM, AttrBuilder &) -> bool {
1236 if (!AttrSet.hasAttribute(Attr))
1237 return false;
1238 AM.addAttribute(Attr);
1239 return true;
1242 return updateAttrMap<StringRef>(IRP, Attrs, RemoveAttrCB);
1245 ChangeStatus Attributor::manifestAttrs(const IRPosition &IRP,
1246 ArrayRef<Attribute> Attrs,
1247 bool ForceReplace) {
1248 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1249 auto AddAttrCB = [&](const Attribute &Attr, AttributeSet AttrSet,
1250 AttributeMask &, AttrBuilder &AB) {
1251 return addIfNotExistent(Ctx, Attr, AttrSet, ForceReplace, AB);
1253 return updateAttrMap<Attribute>(IRP, Attrs, AddAttrCB);
1256 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1257 const IRPosition
1258 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1260 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1261 IRPositions.emplace_back(IRP);
1263 // Helper to determine if operand bundles on a call site are benign or
1264 // potentially problematic. We handle only llvm.assume for now.
1265 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1266 return (isa<IntrinsicInst>(CB) &&
1267 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1270 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1271 switch (IRP.getPositionKind()) {
1272 case IRPosition::IRP_INVALID:
1273 case IRPosition::IRP_FLOAT:
1274 case IRPosition::IRP_FUNCTION:
1275 return;
1276 case IRPosition::IRP_ARGUMENT:
1277 case IRPosition::IRP_RETURNED:
1278 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1279 return;
1280 case IRPosition::IRP_CALL_SITE:
1281 assert(CB && "Expected call site!");
1282 // TODO: We need to look at the operand bundles similar to the redirection
1283 // in CallBase.
1284 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1285 if (auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand()))
1286 IRPositions.emplace_back(IRPosition::function(*Callee));
1287 return;
1288 case IRPosition::IRP_CALL_SITE_RETURNED:
1289 assert(CB && "Expected call site!");
1290 // TODO: We need to look at the operand bundles similar to the redirection
1291 // in CallBase.
1292 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1293 if (auto *Callee =
1294 dyn_cast_if_present<Function>(CB->getCalledOperand())) {
1295 IRPositions.emplace_back(IRPosition::returned(*Callee));
1296 IRPositions.emplace_back(IRPosition::function(*Callee));
1297 for (const Argument &Arg : Callee->args())
1298 if (Arg.hasReturnedAttr()) {
1299 IRPositions.emplace_back(
1300 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1301 IRPositions.emplace_back(
1302 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1303 IRPositions.emplace_back(IRPosition::argument(Arg));
1307 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1308 return;
1309 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1310 assert(CB && "Expected call site!");
1311 // TODO: We need to look at the operand bundles similar to the redirection
1312 // in CallBase.
1313 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1314 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
1315 if (Callee) {
1316 if (Argument *Arg = IRP.getAssociatedArgument())
1317 IRPositions.emplace_back(IRPosition::argument(*Arg));
1318 IRPositions.emplace_back(IRPosition::function(*Callee));
1321 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1322 return;
1327 void IRPosition::verify() {
1328 #ifdef EXPENSIVE_CHECKS
1329 switch (getPositionKind()) {
1330 case IRP_INVALID:
1331 assert((CBContext == nullptr) &&
1332 "Invalid position must not have CallBaseContext!");
1333 assert(!Enc.getOpaqueValue() &&
1334 "Expected a nullptr for an invalid position!");
1335 return;
1336 case IRP_FLOAT:
1337 assert((!isa<Argument>(&getAssociatedValue())) &&
1338 "Expected specialized kind for argument values!");
1339 return;
1340 case IRP_RETURNED:
1341 assert(isa<Function>(getAsValuePtr()) &&
1342 "Expected function for a 'returned' position!");
1343 assert(getAsValuePtr() == &getAssociatedValue() &&
1344 "Associated value mismatch!");
1345 return;
1346 case IRP_CALL_SITE_RETURNED:
1347 assert((CBContext == nullptr) &&
1348 "'call site returned' position must not have CallBaseContext!");
1349 assert((isa<CallBase>(getAsValuePtr())) &&
1350 "Expected call base for 'call site returned' position!");
1351 assert(getAsValuePtr() == &getAssociatedValue() &&
1352 "Associated value mismatch!");
1353 return;
1354 case IRP_CALL_SITE:
1355 assert((CBContext == nullptr) &&
1356 "'call site function' position must not have CallBaseContext!");
1357 assert((isa<CallBase>(getAsValuePtr())) &&
1358 "Expected call base for 'call site function' position!");
1359 assert(getAsValuePtr() == &getAssociatedValue() &&
1360 "Associated value mismatch!");
1361 return;
1362 case IRP_FUNCTION:
1363 assert(isa<Function>(getAsValuePtr()) &&
1364 "Expected function for a 'function' position!");
1365 assert(getAsValuePtr() == &getAssociatedValue() &&
1366 "Associated value mismatch!");
1367 return;
1368 case IRP_ARGUMENT:
1369 assert(isa<Argument>(getAsValuePtr()) &&
1370 "Expected argument for a 'argument' position!");
1371 assert(getAsValuePtr() == &getAssociatedValue() &&
1372 "Associated value mismatch!");
1373 return;
1374 case IRP_CALL_SITE_ARGUMENT: {
1375 assert((CBContext == nullptr) &&
1376 "'call site argument' position must not have CallBaseContext!");
1377 Use *U = getAsUsePtr();
1378 (void)U; // Silence unused variable warning.
1379 assert(U && "Expected use for a 'call site argument' position!");
1380 assert(isa<CallBase>(U->getUser()) &&
1381 "Expected call base user for a 'call site argument' position!");
1382 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1383 "Expected call base argument operand for a 'call site argument' "
1384 "position");
1385 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1386 unsigned(getCallSiteArgNo()) &&
1387 "Argument number mismatch!");
1388 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1389 return;
1392 #endif
1395 std::optional<Constant *>
1396 Attributor::getAssumedConstant(const IRPosition &IRP,
1397 const AbstractAttribute &AA,
1398 bool &UsedAssumedInformation) {
1399 // First check all callbacks provided by outside AAs. If any of them returns
1400 // a non-null value that is different from the associated value, or
1401 // std::nullopt, we assume it's simplified.
1402 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1403 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1404 if (!SimplifiedV)
1405 return std::nullopt;
1406 if (isa_and_nonnull<Constant>(*SimplifiedV))
1407 return cast<Constant>(*SimplifiedV);
1408 return nullptr;
1410 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1411 return C;
1412 SmallVector<AA::ValueAndContext> Values;
1413 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1414 AA::ValueScope::Interprocedural,
1415 UsedAssumedInformation)) {
1416 if (Values.empty())
1417 return std::nullopt;
1418 if (auto *C = dyn_cast_or_null<Constant>(
1419 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1420 return C;
1422 return nullptr;
1425 std::optional<Value *> Attributor::getAssumedSimplified(
1426 const IRPosition &IRP, const AbstractAttribute *AA,
1427 bool &UsedAssumedInformation, AA::ValueScope S) {
1428 // First check all callbacks provided by outside AAs. If any of them returns
1429 // a non-null value that is different from the associated value, or
1430 // std::nullopt, we assume it's simplified.
1431 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1432 return CB(IRP, AA, UsedAssumedInformation);
1434 SmallVector<AA::ValueAndContext> Values;
1435 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1436 return &IRP.getAssociatedValue();
1437 if (Values.empty())
1438 return std::nullopt;
1439 if (AA)
1440 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1441 return V;
1442 if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1443 IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1444 return nullptr;
1445 return &IRP.getAssociatedValue();
1448 bool Attributor::getAssumedSimplifiedValues(
1449 const IRPosition &InitialIRP, const AbstractAttribute *AA,
1450 SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1451 bool &UsedAssumedInformation, bool RecurseForSelectAndPHI) {
1452 SmallPtrSet<Value *, 8> Seen;
1453 SmallVector<IRPosition, 8> Worklist;
1454 Worklist.push_back(InitialIRP);
1455 while (!Worklist.empty()) {
1456 const IRPosition &IRP = Worklist.pop_back_val();
1458 // First check all callbacks provided by outside AAs. If any of them returns
1459 // a non-null value that is different from the associated value, or
1460 // std::nullopt, we assume it's simplified.
1461 int NV = Values.size();
1462 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1463 for (const auto &CB : SimplificationCBs) {
1464 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1465 if (!CBResult.has_value())
1466 continue;
1467 Value *V = *CBResult;
1468 if (!V)
1469 return false;
1470 if ((S & AA::ValueScope::Interprocedural) ||
1471 AA::isValidInScope(*V, IRP.getAnchorScope()))
1472 Values.push_back(AA::ValueAndContext{*V, nullptr});
1473 else
1474 return false;
1476 if (SimplificationCBs.empty()) {
1477 // If no high-level/outside simplification occurred, use
1478 // AAPotentialValues.
1479 const auto *PotentialValuesAA =
1480 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1481 if (PotentialValuesAA && PotentialValuesAA->getAssumedSimplifiedValues(*this, Values, S)) {
1482 UsedAssumedInformation |= !PotentialValuesAA->isAtFixpoint();
1483 } else if (IRP.getPositionKind() != IRPosition::IRP_RETURNED) {
1484 Values.push_back({IRP.getAssociatedValue(), IRP.getCtxI()});
1485 } else {
1486 // TODO: We could visit all returns and add the operands.
1487 return false;
1491 if (!RecurseForSelectAndPHI)
1492 break;
1494 for (int I = NV, E = Values.size(); I < E; ++I) {
1495 Value *V = Values[I].getValue();
1496 if (!isa<PHINode>(V) && !isa<SelectInst>(V))
1497 continue;
1498 if (!Seen.insert(V).second)
1499 continue;
1500 // Move the last element to this slot.
1501 Values[I] = Values[E - 1];
1502 // Eliminate the last slot, adjust the indices.
1503 Values.pop_back();
1504 --E;
1505 --I;
1506 // Add a new value (select or phi) to the worklist.
1507 Worklist.push_back(IRPosition::value(*V));
1510 return true;
1513 std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1514 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1515 bool &UsedAssumedInformation) {
1516 if (!V)
1517 return V;
1518 if (*V == nullptr || isa<Constant>(*V))
1519 return V;
1520 if (auto *Arg = dyn_cast<Argument>(*V))
1521 if (CB.getCalledOperand() == Arg->getParent() &&
1522 CB.arg_size() > Arg->getArgNo())
1523 if (!Arg->hasPointeeInMemoryValueAttr())
1524 return getAssumedSimplified(
1525 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1526 UsedAssumedInformation, AA::Intraprocedural);
1527 return nullptr;
1530 Attributor::~Attributor() {
1531 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1532 // thus we cannot delete them. We can, and want to, destruct them though.
1533 for (auto &It : AAMap) {
1534 AbstractAttribute *AA = It.getSecond();
1535 AA->~AbstractAttribute();
1539 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1540 const AAIsDead *FnLivenessAA,
1541 bool &UsedAssumedInformation,
1542 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1543 if (!Configuration.UseLiveness)
1544 return false;
1545 const IRPosition &IRP = AA.getIRPosition();
1546 if (!Functions.count(IRP.getAnchorScope()))
1547 return false;
1548 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1549 CheckBBLivenessOnly, DepClass);
1552 bool Attributor::isAssumedDead(const Use &U,
1553 const AbstractAttribute *QueryingAA,
1554 const AAIsDead *FnLivenessAA,
1555 bool &UsedAssumedInformation,
1556 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1557 if (!Configuration.UseLiveness)
1558 return false;
1559 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1560 if (!UserI)
1561 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1562 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1564 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1565 // For call site argument uses we can check if the argument is
1566 // unused/dead.
1567 if (CB->isArgOperand(&U)) {
1568 const IRPosition &CSArgPos =
1569 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1570 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1571 UsedAssumedInformation, CheckBBLivenessOnly,
1572 DepClass);
1574 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1575 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1576 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1577 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1578 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1579 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1580 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1581 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1582 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1583 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1584 const IRPosition IRP = IRPosition::inst(*SI);
1585 const AAIsDead *IsDeadAA =
1586 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1587 if (IsDeadAA && IsDeadAA->isRemovableStore()) {
1588 if (QueryingAA)
1589 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1590 if (!IsDeadAA->isKnown(AAIsDead::IS_REMOVABLE))
1591 UsedAssumedInformation = true;
1592 return true;
1597 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1598 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1601 bool Attributor::isAssumedDead(const Instruction &I,
1602 const AbstractAttribute *QueryingAA,
1603 const AAIsDead *FnLivenessAA,
1604 bool &UsedAssumedInformation,
1605 bool CheckBBLivenessOnly, DepClassTy DepClass,
1606 bool CheckForDeadStore) {
1607 if (!Configuration.UseLiveness)
1608 return false;
1609 const IRPosition::CallBaseContext *CBCtx =
1610 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1612 if (ManifestAddedBlocks.contains(I.getParent()))
1613 return false;
1615 const Function &F = *I.getFunction();
1616 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1617 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1618 QueryingAA, DepClassTy::NONE);
1620 // Don't use recursive reasoning.
1621 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1622 return false;
1624 // If we have a context instruction and a liveness AA we use it.
1625 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1626 : FnLivenessAA->isAssumedDead(&I)) {
1627 if (QueryingAA)
1628 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1629 if (!FnLivenessAA->isKnownDead(&I))
1630 UsedAssumedInformation = true;
1631 return true;
1634 if (CheckBBLivenessOnly)
1635 return false;
1637 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1638 const AAIsDead *IsDeadAA =
1639 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1641 // Don't use recursive reasoning.
1642 if (!IsDeadAA || QueryingAA == IsDeadAA)
1643 return false;
1645 if (IsDeadAA->isAssumedDead()) {
1646 if (QueryingAA)
1647 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1648 if (!IsDeadAA->isKnownDead())
1649 UsedAssumedInformation = true;
1650 return true;
1653 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA->isRemovableStore()) {
1654 if (QueryingAA)
1655 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1656 if (!IsDeadAA->isKnownDead())
1657 UsedAssumedInformation = true;
1658 return true;
1661 return false;
1664 bool Attributor::isAssumedDead(const IRPosition &IRP,
1665 const AbstractAttribute *QueryingAA,
1666 const AAIsDead *FnLivenessAA,
1667 bool &UsedAssumedInformation,
1668 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1669 if (!Configuration.UseLiveness)
1670 return false;
1671 // Don't check liveness for constants, e.g. functions, used as (floating)
1672 // values since the context instruction and such is here meaningless.
1673 if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1674 isa<Constant>(IRP.getAssociatedValue())) {
1675 return false;
1678 Instruction *CtxI = IRP.getCtxI();
1679 if (CtxI &&
1680 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1681 /* CheckBBLivenessOnly */ true,
1682 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1683 return true;
1685 if (CheckBBLivenessOnly)
1686 return false;
1688 // If we haven't succeeded we query the specific liveness info for the IRP.
1689 const AAIsDead *IsDeadAA;
1690 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1691 IsDeadAA = getOrCreateAAFor<AAIsDead>(
1692 IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
1693 QueryingAA, DepClassTy::NONE);
1694 else
1695 IsDeadAA = getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1697 // Don't use recursive reasoning.
1698 if (!IsDeadAA || QueryingAA == IsDeadAA)
1699 return false;
1701 if (IsDeadAA->isAssumedDead()) {
1702 if (QueryingAA)
1703 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1704 if (!IsDeadAA->isKnownDead())
1705 UsedAssumedInformation = true;
1706 return true;
1709 return false;
1712 bool Attributor::isAssumedDead(const BasicBlock &BB,
1713 const AbstractAttribute *QueryingAA,
1714 const AAIsDead *FnLivenessAA,
1715 DepClassTy DepClass) {
1716 if (!Configuration.UseLiveness)
1717 return false;
1718 const Function &F = *BB.getParent();
1719 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1720 FnLivenessAA = getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1721 QueryingAA, DepClassTy::NONE);
1723 // Don't use recursive reasoning.
1724 if (!FnLivenessAA || QueryingAA == FnLivenessAA)
1725 return false;
1727 if (FnLivenessAA->isAssumedDead(&BB)) {
1728 if (QueryingAA)
1729 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1730 return true;
1733 return false;
1736 bool Attributor::checkForAllCallees(
1737 function_ref<bool(ArrayRef<const Function *>)> Pred,
1738 const AbstractAttribute &QueryingAA, const CallBase &CB) {
1739 if (const Function *Callee = dyn_cast<Function>(CB.getCalledOperand()))
1740 return Pred(Callee);
1742 const auto *CallEdgesAA = getAAFor<AACallEdges>(
1743 QueryingAA, IRPosition::callsite_function(CB), DepClassTy::OPTIONAL);
1744 if (!CallEdgesAA || CallEdgesAA->hasUnknownCallee())
1745 return false;
1747 const auto &Callees = CallEdgesAA->getOptimisticEdges();
1748 return Pred(Callees.getArrayRef());
1751 bool canMarkAsVisited(const User *Usr) {
1752 return isa<PHINode>(Usr) || !isa<Instruction>(Usr);
1755 bool Attributor::checkForAllUses(
1756 function_ref<bool(const Use &, bool &)> Pred,
1757 const AbstractAttribute &QueryingAA, const Value &V,
1758 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1759 bool IgnoreDroppableUses,
1760 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1762 // Check virtual uses first.
1763 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1764 if (!CB(*this, &QueryingAA))
1765 return false;
1767 // Check the trivial case first as it catches void values.
1768 if (V.use_empty())
1769 return true;
1771 const IRPosition &IRP = QueryingAA.getIRPosition();
1772 SmallVector<const Use *, 16> Worklist;
1773 SmallPtrSet<const Use *, 16> Visited;
1775 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1776 for (const Use &UU : V.uses()) {
1777 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1778 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1779 "rejected by the equivalence call back: "
1780 << *UU << "!\n");
1781 return false;
1784 Worklist.push_back(&UU);
1786 return true;
1789 AddUsers(V, /* OldUse */ nullptr);
1791 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1792 << " initial uses to check\n");
1794 const Function *ScopeFn = IRP.getAnchorScope();
1795 const auto *LivenessAA =
1796 ScopeFn ? getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1797 DepClassTy::NONE)
1798 : nullptr;
1800 while (!Worklist.empty()) {
1801 const Use *U = Worklist.pop_back_val();
1802 if (canMarkAsVisited(U->getUser()) && !Visited.insert(U).second)
1803 continue;
1804 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1805 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1806 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1807 << "\n";
1808 else
1809 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1810 << "\n";
1812 bool UsedAssumedInformation = false;
1813 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1814 CheckBBLivenessOnly, LivenessDepClass)) {
1815 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1816 dbgs() << "[Attributor] Dead use, skip!\n");
1817 continue;
1819 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1820 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1821 dbgs() << "[Attributor] Droppable user, skip!\n");
1822 continue;
1825 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1826 if (&SI->getOperandUse(0) == U) {
1827 if (!Visited.insert(U).second)
1828 continue;
1829 SmallSetVector<Value *, 4> PotentialCopies;
1830 if (AA::getPotentialCopiesOfStoredValue(
1831 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1832 /* OnlyExact */ true)) {
1833 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1834 dbgs()
1835 << "[Attributor] Value is stored, continue with "
1836 << PotentialCopies.size()
1837 << " potential copies instead!\n");
1838 for (Value *PotentialCopy : PotentialCopies)
1839 if (!AddUsers(*PotentialCopy, U))
1840 return false;
1841 continue;
1846 bool Follow = false;
1847 if (!Pred(*U, Follow))
1848 return false;
1849 if (!Follow)
1850 continue;
1852 User &Usr = *U->getUser();
1853 AddUsers(Usr, /* OldUse */ nullptr);
1856 return true;
1859 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1860 const AbstractAttribute &QueryingAA,
1861 bool RequireAllCallSites,
1862 bool &UsedAssumedInformation) {
1863 // We can try to determine information from
1864 // the call sites. However, this is only possible all call sites are known,
1865 // hence the function has internal linkage.
1866 const IRPosition &IRP = QueryingAA.getIRPosition();
1867 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1868 if (!AssociatedFunction) {
1869 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1870 << "\n");
1871 return false;
1874 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1875 &QueryingAA, UsedAssumedInformation);
1878 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1879 const Function &Fn,
1880 bool RequireAllCallSites,
1881 const AbstractAttribute *QueryingAA,
1882 bool &UsedAssumedInformation,
1883 bool CheckPotentiallyDead) {
1884 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1885 LLVM_DEBUG(
1886 dbgs()
1887 << "[Attributor] Function " << Fn.getName()
1888 << " has no internal linkage, hence not all call sites are known\n");
1889 return false;
1891 // Check virtual uses first.
1892 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1893 if (!CB(*this, QueryingAA))
1894 return false;
1896 SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
1897 for (unsigned u = 0; u < Uses.size(); ++u) {
1898 const Use &U = *Uses[u];
1899 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1900 if (auto *Fn = dyn_cast<Function>(U))
1901 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1902 << *U.getUser() << "\n";
1903 else
1904 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1905 << "\n";
1907 if (!CheckPotentiallyDead &&
1908 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1909 /* CheckBBLivenessOnly */ true)) {
1910 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1911 dbgs() << "[Attributor] Dead use, skip!\n");
1912 continue;
1914 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1915 if (CE->isCast() && CE->getType()->isPointerTy()) {
1916 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1917 dbgs() << "[Attributor] Use, is constant cast expression, add "
1918 << CE->getNumUses() << " uses of that expression instead!\n";
1920 for (const Use &CEU : CE->uses())
1921 Uses.push_back(&CEU);
1922 continue;
1926 AbstractCallSite ACS(&U);
1927 if (!ACS) {
1928 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1929 << " has non call site use " << *U.get() << " in "
1930 << *U.getUser() << "\n");
1931 // BlockAddress users are allowed.
1932 if (isa<BlockAddress>(U.getUser()))
1933 continue;
1934 return false;
1937 const Use *EffectiveUse =
1938 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1939 if (!ACS.isCallee(EffectiveUse)) {
1940 if (!RequireAllCallSites) {
1941 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1942 << " is not a call of " << Fn.getName()
1943 << ", skip use\n");
1944 continue;
1946 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1947 << " is an invalid use of " << Fn.getName() << "\n");
1948 return false;
1951 // Make sure the arguments that can be matched between the call site and the
1952 // callee argee on their type. It is unlikely they do not and it doesn't
1953 // make sense for all attributes to know/care about this.
1954 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1955 unsigned MinArgsParams =
1956 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1957 for (unsigned u = 0; u < MinArgsParams; ++u) {
1958 Value *CSArgOp = ACS.getCallArgOperand(u);
1959 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1960 LLVM_DEBUG(
1961 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1962 << u << "@" << Fn.getName() << ": "
1963 << *Fn.getArg(u)->getType() << " vs. "
1964 << *ACS.getCallArgOperand(u)->getType() << "\n");
1965 return false;
1969 if (Pred(ACS))
1970 continue;
1972 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1973 << *ACS.getInstruction() << "\n");
1974 return false;
1977 return true;
1980 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1981 // TODO: Maintain a cache of Values that are
1982 // on the pathway from a Argument to a Instruction that would effect the
1983 // liveness/return state etc.
1984 return EnableCallSiteSpecific;
1987 bool Attributor::checkForAllReturnedValues(function_ref<bool(Value &)> Pred,
1988 const AbstractAttribute &QueryingAA,
1989 AA::ValueScope S,
1990 bool RecurseForSelectAndPHI) {
1992 const IRPosition &IRP = QueryingAA.getIRPosition();
1993 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1994 if (!AssociatedFunction)
1995 return false;
1997 bool UsedAssumedInformation = false;
1998 SmallVector<AA::ValueAndContext> Values;
1999 if (!getAssumedSimplifiedValues(
2000 IRPosition::returned(*AssociatedFunction), &QueryingAA, Values, S,
2001 UsedAssumedInformation, RecurseForSelectAndPHI))
2002 return false;
2004 return llvm::all_of(Values, [&](const AA::ValueAndContext &VAC) {
2005 return Pred(*VAC.getValue());
2009 static bool checkForAllInstructionsImpl(
2010 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
2011 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
2012 const AAIsDead *LivenessAA, ArrayRef<unsigned> Opcodes,
2013 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
2014 bool CheckPotentiallyDead = false) {
2015 for (unsigned Opcode : Opcodes) {
2016 // Check if we have instructions with this opcode at all first.
2017 auto *Insts = OpcodeInstMap.lookup(Opcode);
2018 if (!Insts)
2019 continue;
2021 for (Instruction *I : *Insts) {
2022 // Skip dead instructions.
2023 if (A && !CheckPotentiallyDead &&
2024 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
2025 UsedAssumedInformation, CheckBBLivenessOnly)) {
2026 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2027 dbgs() << "[Attributor] Instruction " << *I
2028 << " is potentially dead, skip!\n";);
2029 continue;
2032 if (!Pred(*I))
2033 return false;
2036 return true;
2039 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2040 const Function *Fn,
2041 const AbstractAttribute *QueryingAA,
2042 ArrayRef<unsigned> Opcodes,
2043 bool &UsedAssumedInformation,
2044 bool CheckBBLivenessOnly,
2045 bool CheckPotentiallyDead) {
2046 // Since we need to provide instructions we have to have an exact definition.
2047 if (!Fn || Fn->isDeclaration())
2048 return false;
2050 const IRPosition &QueryIRP = IRPosition::function(*Fn);
2051 const auto *LivenessAA =
2052 CheckPotentiallyDead && QueryingAA
2053 ? (getAAFor<AAIsDead>(*QueryingAA, QueryIRP, DepClassTy::NONE))
2054 : nullptr;
2056 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2057 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, QueryingAA,
2058 LivenessAA, Opcodes, UsedAssumedInformation,
2059 CheckBBLivenessOnly, CheckPotentiallyDead))
2060 return false;
2062 return true;
2065 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
2066 const AbstractAttribute &QueryingAA,
2067 ArrayRef<unsigned> Opcodes,
2068 bool &UsedAssumedInformation,
2069 bool CheckBBLivenessOnly,
2070 bool CheckPotentiallyDead) {
2071 const IRPosition &IRP = QueryingAA.getIRPosition();
2072 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2073 return checkForAllInstructions(Pred, AssociatedFunction, &QueryingAA, Opcodes,
2074 UsedAssumedInformation, CheckBBLivenessOnly,
2075 CheckPotentiallyDead);
2078 bool Attributor::checkForAllReadWriteInstructions(
2079 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
2080 bool &UsedAssumedInformation) {
2081 TimeTraceScope TS("checkForAllReadWriteInstructions");
2083 const Function *AssociatedFunction =
2084 QueryingAA.getIRPosition().getAssociatedFunction();
2085 if (!AssociatedFunction)
2086 return false;
2088 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2089 const auto *LivenessAA =
2090 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
2092 for (Instruction *I :
2093 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2094 // Skip dead instructions.
2095 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, LivenessAA,
2096 UsedAssumedInformation))
2097 continue;
2099 if (!Pred(*I))
2100 return false;
2103 return true;
2106 void Attributor::runTillFixpoint() {
2107 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
2108 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2109 << DG.SyntheticRoot.Deps.size()
2110 << " abstract attributes.\n");
2112 // Now that all abstract attributes are collected and initialized we start
2113 // the abstract analysis.
2115 unsigned IterationCounter = 1;
2116 unsigned MaxIterations =
2117 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
2119 SmallVector<AbstractAttribute *, 32> ChangedAAs;
2120 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
2121 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
2123 do {
2124 // Remember the size to determine new attributes.
2125 size_t NumAAs = DG.SyntheticRoot.Deps.size();
2126 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2127 << ", Worklist size: " << Worklist.size() << "\n");
2129 // For invalid AAs we can fix dependent AAs that have a required dependence,
2130 // thereby folding long dependence chains in a single step without the need
2131 // to run updates.
2132 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
2133 AbstractAttribute *InvalidAA = InvalidAAs[u];
2135 // Check the dependences to fast track invalidation.
2136 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2137 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
2138 << " has " << InvalidAA->Deps.size()
2139 << " required & optional dependences\n");
2140 for (auto &DepIt : InvalidAA->Deps) {
2141 AbstractAttribute *DepAA = cast<AbstractAttribute>(DepIt.getPointer());
2142 if (DepIt.getInt() == unsigned(DepClassTy::OPTIONAL)) {
2143 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
2144 dbgs() << " - recompute: " << *DepAA);
2145 Worklist.insert(DepAA);
2146 continue;
2148 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
2149 << " - invalidate: " << *DepAA);
2150 DepAA->getState().indicatePessimisticFixpoint();
2151 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
2152 if (!DepAA->getState().isValidState())
2153 InvalidAAs.insert(DepAA);
2154 else
2155 ChangedAAs.push_back(DepAA);
2157 InvalidAA->Deps.clear();
2160 // Add all abstract attributes that are potentially dependent on one that
2161 // changed to the work list.
2162 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2163 for (auto &DepIt : ChangedAA->Deps)
2164 Worklist.insert(cast<AbstractAttribute>(DepIt.getPointer()));
2165 ChangedAA->Deps.clear();
2168 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2169 << ", Worklist+Dependent size: " << Worklist.size()
2170 << "\n");
2172 // Reset the changed and invalid set.
2173 ChangedAAs.clear();
2174 InvalidAAs.clear();
2176 // Update all abstract attribute in the work list and record the ones that
2177 // changed.
2178 for (AbstractAttribute *AA : Worklist) {
2179 const auto &AAState = AA->getState();
2180 if (!AAState.isAtFixpoint())
2181 if (updateAA(*AA) == ChangeStatus::CHANGED)
2182 ChangedAAs.push_back(AA);
2184 // Use the InvalidAAs vector to propagate invalid states fast transitively
2185 // without requiring updates.
2186 if (!AAState.isValidState())
2187 InvalidAAs.insert(AA);
2190 // Add attributes to the changed set if they have been created in the last
2191 // iteration.
2192 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2193 DG.SyntheticRoot.end());
2195 // Reset the work list and repopulate with the changed abstract attributes.
2196 // Note that dependent ones are added above.
2197 Worklist.clear();
2198 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2199 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2200 QueryAAsAwaitingUpdate.end());
2201 QueryAAsAwaitingUpdate.clear();
2203 } while (!Worklist.empty() && (IterationCounter++ < MaxIterations));
2205 if (IterationCounter > MaxIterations && !Functions.empty()) {
2206 auto Remark = [&](OptimizationRemarkMissed ORM) {
2207 return ORM << "Attributor did not reach a fixpoint after "
2208 << ore::NV("Iterations", MaxIterations) << " iterations.";
2210 Function *F = Functions.front();
2211 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2214 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2215 << IterationCounter << "/" << MaxIterations
2216 << " iterations\n");
2218 // Reset abstract arguments not settled in a sound fixpoint by now. This
2219 // happens when we stopped the fixpoint iteration early. Note that only the
2220 // ones marked as "changed" *and* the ones transitively depending on them
2221 // need to be reverted to a pessimistic state. Others might not be in a
2222 // fixpoint state but we can use the optimistic results for them anyway.
2223 SmallPtrSet<AbstractAttribute *, 32> Visited;
2224 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2225 AbstractAttribute *ChangedAA = ChangedAAs[u];
2226 if (!Visited.insert(ChangedAA).second)
2227 continue;
2229 AbstractState &State = ChangedAA->getState();
2230 if (!State.isAtFixpoint()) {
2231 State.indicatePessimisticFixpoint();
2233 NumAttributesTimedOut++;
2236 for (auto &DepIt : ChangedAA->Deps)
2237 ChangedAAs.push_back(cast<AbstractAttribute>(DepIt.getPointer()));
2238 ChangedAA->Deps.clear();
2241 LLVM_DEBUG({
2242 if (!Visited.empty())
2243 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2244 << " abstract attributes.\n";
2248 void Attributor::registerForUpdate(AbstractAttribute &AA) {
2249 assert(AA.isQueryAA() &&
2250 "Non-query AAs should not be required to register for updates!");
2251 QueryAAsAwaitingUpdate.insert(&AA);
2254 ChangeStatus Attributor::manifestAttributes() {
2255 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2256 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2258 unsigned NumManifested = 0;
2259 unsigned NumAtFixpoint = 0;
2260 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2261 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2262 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2263 AbstractState &State = AA->getState();
2265 // If there is not already a fixpoint reached, we can now take the
2266 // optimistic state. This is correct because we enforced a pessimistic one
2267 // on abstract attributes that were transitively dependent on a changed one
2268 // already above.
2269 if (!State.isAtFixpoint())
2270 State.indicateOptimisticFixpoint();
2272 // We must not manifest Attributes that use Callbase info.
2273 if (AA->hasCallBaseContext())
2274 continue;
2275 // If the state is invalid, we do not try to manifest it.
2276 if (!State.isValidState())
2277 continue;
2279 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2280 continue;
2282 // Skip dead code.
2283 bool UsedAssumedInformation = false;
2284 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2285 /* CheckBBLivenessOnly */ true))
2286 continue;
2287 // Check if the manifest debug counter that allows skipping manifestation of
2288 // AAs
2289 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2290 continue;
2291 // Manifest the state and record if we changed the IR.
2292 ChangeStatus LocalChange = AA->manifest(*this);
2293 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2294 AA->trackStatistics();
2295 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2296 << "\n");
2298 ManifestChange = ManifestChange | LocalChange;
2300 NumAtFixpoint++;
2301 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2304 (void)NumManifested;
2305 (void)NumAtFixpoint;
2306 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2307 << " arguments while " << NumAtFixpoint
2308 << " were in a valid fixpoint state\n");
2310 NumAttributesManifested += NumManifested;
2311 NumAttributesValidFixpoint += NumAtFixpoint;
2313 (void)NumFinalAAs;
2314 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2315 auto DepIt = DG.SyntheticRoot.Deps.begin();
2316 for (unsigned u = 0; u < NumFinalAAs; ++u)
2317 ++DepIt;
2318 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size();
2319 ++u, ++DepIt) {
2320 errs() << "Unexpected abstract attribute: "
2321 << cast<AbstractAttribute>(DepIt->getPointer()) << " :: "
2322 << cast<AbstractAttribute>(DepIt->getPointer())
2323 ->getIRPosition()
2324 .getAssociatedValue()
2325 << "\n";
2327 llvm_unreachable("Expected the final number of abstract attributes to "
2328 "remain unchanged!");
2331 for (auto &It : AttrsMap) {
2332 AttributeList &AL = It.getSecond();
2333 const IRPosition &IRP =
2334 isa<Function>(It.getFirst())
2335 ? IRPosition::function(*cast<Function>(It.getFirst()))
2336 : IRPosition::callsite_function(*cast<CallBase>(It.getFirst()));
2337 IRP.setAttrList(AL);
2340 return ManifestChange;
2343 void Attributor::identifyDeadInternalFunctions() {
2344 // Early exit if we don't intend to delete functions.
2345 if (!Configuration.DeleteFns)
2346 return;
2348 // To avoid triggering an assertion in the lazy call graph we will not delete
2349 // any internal library functions. We should modify the assertion though and
2350 // allow internals to be deleted.
2351 const auto *TLI =
2352 isModulePass()
2353 ? nullptr
2354 : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2355 LibFunc LF;
2357 // Identify dead internal functions and delete them. This happens outside
2358 // the other fixpoint analysis as we might treat potentially dead functions
2359 // as live to lower the number of iterations. If they happen to be dead, the
2360 // below fixpoint loop will identify and eliminate them.
2362 SmallVector<Function *, 8> InternalFns;
2363 for (Function *F : Functions)
2364 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2365 InternalFns.push_back(F);
2367 SmallPtrSet<Function *, 8> LiveInternalFns;
2368 bool FoundLiveInternal = true;
2369 while (FoundLiveInternal) {
2370 FoundLiveInternal = false;
2371 for (Function *&F : InternalFns) {
2372 if (!F)
2373 continue;
2375 bool UsedAssumedInformation = false;
2376 if (checkForAllCallSites(
2377 [&](AbstractCallSite ACS) {
2378 Function *Callee = ACS.getInstruction()->getFunction();
2379 return ToBeDeletedFunctions.count(Callee) ||
2380 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2381 !LiveInternalFns.count(Callee));
2383 *F, true, nullptr, UsedAssumedInformation)) {
2384 continue;
2387 LiveInternalFns.insert(F);
2388 F = nullptr;
2389 FoundLiveInternal = true;
2393 for (Function *F : InternalFns)
2394 if (F)
2395 ToBeDeletedFunctions.insert(F);
2398 ChangeStatus Attributor::cleanupIR() {
2399 TimeTraceScope TimeScope("Attributor::cleanupIR");
2400 // Delete stuff at the end to avoid invalid references and a nice order.
2401 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2402 << ToBeDeletedFunctions.size() << " functions and "
2403 << ToBeDeletedBlocks.size() << " blocks and "
2404 << ToBeDeletedInsts.size() << " instructions and "
2405 << ToBeChangedValues.size() << " values and "
2406 << ToBeChangedUses.size() << " uses. To insert "
2407 << ToBeChangedToUnreachableInsts.size()
2408 << " unreachables.\n"
2409 << "Preserve manifest added " << ManifestAddedBlocks.size()
2410 << " blocks\n");
2412 SmallVector<WeakTrackingVH, 32> DeadInsts;
2413 SmallVector<Instruction *, 32> TerminatorsToFold;
2415 auto ReplaceUse = [&](Use *U, Value *NewV) {
2416 Value *OldV = U->get();
2418 // If we plan to replace NewV we need to update it at this point.
2419 do {
2420 const auto &Entry = ToBeChangedValues.lookup(NewV);
2421 if (!get<0>(Entry))
2422 break;
2423 NewV = get<0>(Entry);
2424 } while (true);
2426 Instruction *I = dyn_cast<Instruction>(U->getUser());
2427 assert((!I || isRunOn(*I->getFunction())) &&
2428 "Cannot replace an instruction outside the current SCC!");
2430 // Do not replace uses in returns if the value is a must-tail call we will
2431 // not delete.
2432 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2433 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2434 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2435 return;
2436 // If we rewrite a return and the new value is not an argument, strip the
2437 // `returned` attribute as it is wrong now.
2438 if (!isa<Argument>(NewV))
2439 for (auto &Arg : RI->getFunction()->args())
2440 Arg.removeAttr(Attribute::Returned);
2443 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2444 << " instead of " << *OldV << "\n");
2445 U->set(NewV);
2447 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2448 CGModifiedFunctions.insert(I->getFunction());
2449 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2450 isInstructionTriviallyDead(I))
2451 DeadInsts.push_back(I);
2453 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2454 auto *CB = cast<CallBase>(U->getUser());
2455 if (CB->isArgOperand(U)) {
2456 unsigned Idx = CB->getArgOperandNo(U);
2457 CB->removeParamAttr(Idx, Attribute::NoUndef);
2458 auto *Callee = dyn_cast_if_present<Function>(CB->getCalledOperand());
2459 if (Callee && Callee->arg_size() > Idx)
2460 Callee->removeParamAttr(Idx, Attribute::NoUndef);
2463 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2464 Instruction *UserI = cast<Instruction>(U->getUser());
2465 if (isa<UndefValue>(NewV)) {
2466 ToBeChangedToUnreachableInsts.insert(UserI);
2467 } else {
2468 TerminatorsToFold.push_back(UserI);
2473 for (auto &It : ToBeChangedUses) {
2474 Use *U = It.first;
2475 Value *NewV = It.second;
2476 ReplaceUse(U, NewV);
2479 SmallVector<Use *, 4> Uses;
2480 for (auto &It : ToBeChangedValues) {
2481 Value *OldV = It.first;
2482 auto [NewV, Done] = It.second;
2483 Uses.clear();
2484 for (auto &U : OldV->uses())
2485 if (Done || !U.getUser()->isDroppable())
2486 Uses.push_back(&U);
2487 for (Use *U : Uses) {
2488 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2489 if (!isRunOn(*I->getFunction()))
2490 continue;
2491 ReplaceUse(U, NewV);
2495 for (const auto &V : InvokeWithDeadSuccessor)
2496 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2497 assert(isRunOn(*II->getFunction()) &&
2498 "Cannot replace an invoke outside the current SCC!");
2499 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2500 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2501 bool Invoke2CallAllowed =
2502 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2503 assert((UnwindBBIsDead || NormalBBIsDead) &&
2504 "Invoke does not have dead successors!");
2505 BasicBlock *BB = II->getParent();
2506 BasicBlock *NormalDestBB = II->getNormalDest();
2507 if (UnwindBBIsDead) {
2508 Instruction *NormalNextIP = &NormalDestBB->front();
2509 if (Invoke2CallAllowed) {
2510 changeToCall(II);
2511 NormalNextIP = BB->getTerminator();
2513 if (NormalBBIsDead)
2514 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2515 } else {
2516 assert(NormalBBIsDead && "Broken invariant!");
2517 if (!NormalDestBB->getUniquePredecessor())
2518 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2519 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2522 for (Instruction *I : TerminatorsToFold) {
2523 assert(isRunOn(*I->getFunction()) &&
2524 "Cannot replace a terminator outside the current SCC!");
2525 CGModifiedFunctions.insert(I->getFunction());
2526 ConstantFoldTerminator(I->getParent());
2528 for (const auto &V : ToBeChangedToUnreachableInsts)
2529 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2530 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2531 << "\n");
2532 assert(isRunOn(*I->getFunction()) &&
2533 "Cannot replace an instruction outside the current SCC!");
2534 CGModifiedFunctions.insert(I->getFunction());
2535 changeToUnreachable(I);
2538 for (const auto &V : ToBeDeletedInsts) {
2539 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2540 assert((!isa<CallBase>(I) || isa<IntrinsicInst>(I) ||
2541 isRunOn(*I->getFunction())) &&
2542 "Cannot delete an instruction outside the current SCC!");
2543 I->dropDroppableUses();
2544 CGModifiedFunctions.insert(I->getFunction());
2545 if (!I->getType()->isVoidTy())
2546 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2547 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2548 DeadInsts.push_back(I);
2549 else
2550 I->eraseFromParent();
2554 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2556 LLVM_DEBUG({
2557 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2558 for (auto &I : DeadInsts)
2559 if (I)
2560 dbgs() << " - " << *I << "\n";
2563 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2565 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2566 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2567 ToBeDeletedBBs.reserve(NumDeadBlocks);
2568 for (BasicBlock *BB : ToBeDeletedBlocks) {
2569 assert(isRunOn(*BB->getParent()) &&
2570 "Cannot delete a block outside the current SCC!");
2571 CGModifiedFunctions.insert(BB->getParent());
2572 // Do not delete BBs added during manifests of AAs.
2573 if (ManifestAddedBlocks.contains(BB))
2574 continue;
2575 ToBeDeletedBBs.push_back(BB);
2577 // Actually we do not delete the blocks but squash them into a single
2578 // unreachable but untangling branches that jump here is something we need
2579 // to do in a more generic way.
2580 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2583 identifyDeadInternalFunctions();
2585 // Rewrite the functions as requested during manifest.
2586 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2588 for (Function *Fn : CGModifiedFunctions)
2589 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2590 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2592 for (Function *Fn : ToBeDeletedFunctions) {
2593 if (!Functions.count(Fn))
2594 continue;
2595 Configuration.CGUpdater.removeFunction(*Fn);
2598 if (!ToBeChangedUses.empty())
2599 ManifestChange = ChangeStatus::CHANGED;
2601 if (!ToBeChangedToUnreachableInsts.empty())
2602 ManifestChange = ChangeStatus::CHANGED;
2604 if (!ToBeDeletedFunctions.empty())
2605 ManifestChange = ChangeStatus::CHANGED;
2607 if (!ToBeDeletedBlocks.empty())
2608 ManifestChange = ChangeStatus::CHANGED;
2610 if (!ToBeDeletedInsts.empty())
2611 ManifestChange = ChangeStatus::CHANGED;
2613 if (!InvokeWithDeadSuccessor.empty())
2614 ManifestChange = ChangeStatus::CHANGED;
2616 if (!DeadInsts.empty())
2617 ManifestChange = ChangeStatus::CHANGED;
2619 NumFnDeleted += ToBeDeletedFunctions.size();
2621 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2622 << " functions after manifest.\n");
2624 #ifdef EXPENSIVE_CHECKS
2625 for (Function *F : Functions) {
2626 if (ToBeDeletedFunctions.count(F))
2627 continue;
2628 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2630 #endif
2632 return ManifestChange;
2635 ChangeStatus Attributor::run() {
2636 TimeTraceScope TimeScope("Attributor::run");
2637 AttributorCallGraph ACallGraph(*this);
2639 if (PrintCallGraph)
2640 ACallGraph.populateAll();
2642 Phase = AttributorPhase::UPDATE;
2643 runTillFixpoint();
2645 // dump graphs on demand
2646 if (DumpDepGraph)
2647 DG.dumpGraph();
2649 if (ViewDepGraph)
2650 DG.viewGraph();
2652 if (PrintDependencies)
2653 DG.print();
2655 Phase = AttributorPhase::MANIFEST;
2656 ChangeStatus ManifestChange = manifestAttributes();
2658 Phase = AttributorPhase::CLEANUP;
2659 ChangeStatus CleanupChange = cleanupIR();
2661 if (PrintCallGraph)
2662 ACallGraph.print();
2664 return ManifestChange | CleanupChange;
2667 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2668 TimeTraceScope TimeScope("updateAA", [&]() {
2669 return AA.getName() + std::to_string(AA.getIRPosition().getPositionKind());
2671 assert(Phase == AttributorPhase::UPDATE &&
2672 "We can update AA only in the update stage!");
2674 // Use a new dependence vector for this update.
2675 DependenceVector DV;
2676 DependenceStack.push_back(&DV);
2678 auto &AAState = AA.getState();
2679 ChangeStatus CS = ChangeStatus::UNCHANGED;
2680 bool UsedAssumedInformation = false;
2681 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2682 /* CheckBBLivenessOnly */ true))
2683 CS = AA.update(*this);
2685 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2686 // If the AA did not rely on outside information but changed, we run it
2687 // again to see if it found a fixpoint. Most AAs do but we don't require
2688 // them to. Hence, it might take the AA multiple iterations to get to a
2689 // fixpoint even if it does not rely on outside information, which is fine.
2690 ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2691 if (CS == ChangeStatus::CHANGED)
2692 RerunCS = AA.update(*this);
2694 // If the attribute did not change during the run or rerun, and it still did
2695 // not query any non-fix information, the state will not change and we can
2696 // indicate that right at this point.
2697 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2698 AAState.indicateOptimisticFixpoint();
2701 if (!AAState.isAtFixpoint())
2702 rememberDependences();
2704 // Verify the stack was used properly, that is we pop the dependence vector we
2705 // put there earlier.
2706 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2707 (void)PoppedDV;
2708 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2710 return CS;
2713 void Attributor::createShallowWrapper(Function &F) {
2714 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2716 Module &M = *F.getParent();
2717 LLVMContext &Ctx = M.getContext();
2718 FunctionType *FnTy = F.getFunctionType();
2720 Function *Wrapper =
2721 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2722 F.setName(""); // set the inside function anonymous
2723 M.getFunctionList().insert(F.getIterator(), Wrapper);
2724 // Flag whether the function is using new-debug-info or not.
2725 Wrapper->IsNewDbgInfoFormat = M.IsNewDbgInfoFormat;
2727 F.setLinkage(GlobalValue::InternalLinkage);
2729 F.replaceAllUsesWith(Wrapper);
2730 assert(F.use_empty() && "Uses remained after wrapper was created!");
2732 // Move the COMDAT section to the wrapper.
2733 // TODO: Check if we need to keep it for F as well.
2734 Wrapper->setComdat(F.getComdat());
2735 F.setComdat(nullptr);
2737 // Copy all metadata and attributes but keep them on F as well.
2738 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2739 F.getAllMetadata(MDs);
2740 for (auto MDIt : MDs)
2741 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2742 Wrapper->setAttributes(F.getAttributes());
2744 // Create the call in the wrapper.
2745 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2747 SmallVector<Value *, 8> Args;
2748 Argument *FArgIt = F.arg_begin();
2749 for (Argument &Arg : Wrapper->args()) {
2750 Args.push_back(&Arg);
2751 Arg.setName((FArgIt++)->getName());
2754 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2755 CI->setTailCall(true);
2756 CI->addFnAttr(Attribute::NoInline);
2757 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2759 NumFnShallowWrappersCreated++;
2762 bool Attributor::isInternalizable(Function &F) {
2763 if (F.isDeclaration() || F.hasLocalLinkage() ||
2764 GlobalValue::isInterposableLinkage(F.getLinkage()))
2765 return false;
2766 return true;
2769 Function *Attributor::internalizeFunction(Function &F, bool Force) {
2770 if (!AllowDeepWrapper && !Force)
2771 return nullptr;
2772 if (!isInternalizable(F))
2773 return nullptr;
2775 SmallPtrSet<Function *, 2> FnSet = {&F};
2776 DenseMap<Function *, Function *> InternalizedFns;
2777 internalizeFunctions(FnSet, InternalizedFns);
2779 return InternalizedFns[&F];
2782 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2783 DenseMap<Function *, Function *> &FnMap) {
2784 for (Function *F : FnSet)
2785 if (!Attributor::isInternalizable(*F))
2786 return false;
2788 FnMap.clear();
2789 // Generate the internalized version of each function.
2790 for (Function *F : FnSet) {
2791 Module &M = *F->getParent();
2792 FunctionType *FnTy = F->getFunctionType();
2794 // Create a copy of the current function
2795 Function *Copied =
2796 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2797 F->getName() + ".internalized");
2798 ValueToValueMapTy VMap;
2799 auto *NewFArgIt = Copied->arg_begin();
2800 for (auto &Arg : F->args()) {
2801 auto ArgName = Arg.getName();
2802 NewFArgIt->setName(ArgName);
2803 VMap[&Arg] = &(*NewFArgIt++);
2805 SmallVector<ReturnInst *, 8> Returns;
2806 // Flag whether the function is using new-debug-info or not.
2807 Copied->IsNewDbgInfoFormat = F->IsNewDbgInfoFormat;
2809 // Copy the body of the original function to the new one
2810 CloneFunctionInto(Copied, F, VMap,
2811 CloneFunctionChangeType::LocalChangesOnly, Returns);
2813 // Set the linakage and visibility late as CloneFunctionInto has some
2814 // implicit requirements.
2815 Copied->setVisibility(GlobalValue::DefaultVisibility);
2816 Copied->setLinkage(GlobalValue::PrivateLinkage);
2818 // Copy metadata
2819 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2820 F->getAllMetadata(MDs);
2821 for (auto MDIt : MDs)
2822 if (!Copied->hasMetadata())
2823 Copied->addMetadata(MDIt.first, *MDIt.second);
2825 M.getFunctionList().insert(F->getIterator(), Copied);
2826 Copied->setDSOLocal(true);
2827 FnMap[F] = Copied;
2830 // Replace all uses of the old function with the new internalized function
2831 // unless the caller is a function that was just internalized.
2832 for (Function *F : FnSet) {
2833 auto &InternalizedFn = FnMap[F];
2834 auto IsNotInternalized = [&](Use &U) -> bool {
2835 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2836 return !FnMap.lookup(CB->getCaller());
2837 return false;
2839 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2842 return true;
2845 bool Attributor::isValidFunctionSignatureRewrite(
2846 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2848 if (!Configuration.RewriteSignatures)
2849 return false;
2851 Function *Fn = Arg.getParent();
2852 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2853 // Forbid the call site to cast the function return type. If we need to
2854 // rewrite these functions we need to re-create a cast for the new call site
2855 // (if the old had uses).
2856 if (!ACS.getCalledFunction() ||
2857 ACS.getInstruction()->getType() !=
2858 ACS.getCalledFunction()->getReturnType())
2859 return false;
2860 if (cast<CallBase>(ACS.getInstruction())->getCalledOperand()->getType() !=
2861 Fn->getType())
2862 return false;
2863 if (ACS.getNumArgOperands() != Fn->arg_size())
2864 return false;
2865 // Forbid must-tail calls for now.
2866 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2869 // Avoid var-arg functions for now.
2870 if (Fn->isVarArg()) {
2871 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2872 return false;
2875 // Avoid functions with complicated argument passing semantics.
2876 AttributeList FnAttributeList = Fn->getAttributes();
2877 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2878 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2879 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2880 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2881 LLVM_DEBUG(
2882 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2883 return false;
2886 // Avoid callbacks for now.
2887 bool UsedAssumedInformation = false;
2888 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2889 UsedAssumedInformation,
2890 /* CheckPotentiallyDead */ true)) {
2891 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2892 return false;
2895 auto InstPred = [](Instruction &I) {
2896 if (auto *CI = dyn_cast<CallInst>(&I))
2897 return !CI->isMustTailCall();
2898 return true;
2901 // Forbid must-tail calls for now.
2902 // TODO:
2903 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2904 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2905 nullptr, {Instruction::Call},
2906 UsedAssumedInformation)) {
2907 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2908 return false;
2911 return true;
2914 bool Attributor::registerFunctionSignatureRewrite(
2915 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2916 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2917 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2918 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2919 << Arg.getParent()->getName() << " with "
2920 << ReplacementTypes.size() << " replacements\n");
2921 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2922 "Cannot register an invalid rewrite");
2924 Function *Fn = Arg.getParent();
2925 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2926 ArgumentReplacementMap[Fn];
2927 if (ARIs.empty())
2928 ARIs.resize(Fn->arg_size());
2930 // If we have a replacement already with less than or equal new arguments,
2931 // ignore this request.
2932 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2933 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2934 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2935 return false;
2938 // If we have a replacement already but we like the new one better, delete
2939 // the old.
2940 ARI.reset();
2942 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2943 << Arg.getParent()->getName() << " with "
2944 << ReplacementTypes.size() << " replacements\n");
2946 // Remember the replacement.
2947 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2948 std::move(CalleeRepairCB),
2949 std::move(ACSRepairCB)));
2951 return true;
2954 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2955 bool Result = true;
2956 #ifndef NDEBUG
2957 if (SeedAllowList.size() != 0)
2958 Result = llvm::is_contained(SeedAllowList, AA.getName());
2959 Function *Fn = AA.getAnchorScope();
2960 if (FunctionSeedAllowList.size() != 0 && Fn)
2961 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2962 #endif
2963 return Result;
2966 ChangeStatus Attributor::rewriteFunctionSignatures(
2967 SmallSetVector<Function *, 8> &ModifiedFns) {
2968 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2970 for (auto &It : ArgumentReplacementMap) {
2971 Function *OldFn = It.getFirst();
2973 // Deleted functions do not require rewrites.
2974 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2975 continue;
2977 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2978 It.getSecond();
2979 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2981 SmallVector<Type *, 16> NewArgumentTypes;
2982 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2984 // Collect replacement argument types and copy over existing attributes.
2985 AttributeList OldFnAttributeList = OldFn->getAttributes();
2986 for (Argument &Arg : OldFn->args()) {
2987 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2988 ARIs[Arg.getArgNo()]) {
2989 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2990 ARI->ReplacementTypes.end());
2991 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2992 AttributeSet());
2993 } else {
2994 NewArgumentTypes.push_back(Arg.getType());
2995 NewArgumentAttributes.push_back(
2996 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
3000 uint64_t LargestVectorWidth = 0;
3001 for (auto *I : NewArgumentTypes)
3002 if (auto *VT = dyn_cast<llvm::VectorType>(I))
3003 LargestVectorWidth =
3004 std::max(LargestVectorWidth,
3005 VT->getPrimitiveSizeInBits().getKnownMinValue());
3007 FunctionType *OldFnTy = OldFn->getFunctionType();
3008 Type *RetTy = OldFnTy->getReturnType();
3010 // Construct the new function type using the new arguments types.
3011 FunctionType *NewFnTy =
3012 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
3014 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
3015 << "' from " << *OldFn->getFunctionType() << " to "
3016 << *NewFnTy << "\n");
3018 // Create the new function body and insert it into the module.
3019 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
3020 OldFn->getAddressSpace(), "");
3021 Functions.insert(NewFn);
3022 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
3023 NewFn->takeName(OldFn);
3024 NewFn->copyAttributesFrom(OldFn);
3025 // Flag whether the function is using new-debug-info or not.
3026 NewFn->IsNewDbgInfoFormat = OldFn->IsNewDbgInfoFormat;
3028 // Patch the pointer to LLVM function in debug info descriptor.
3029 NewFn->setSubprogram(OldFn->getSubprogram());
3030 OldFn->setSubprogram(nullptr);
3032 // Recompute the parameter attributes list based on the new arguments for
3033 // the function.
3034 LLVMContext &Ctx = OldFn->getContext();
3035 NewFn->setAttributes(AttributeList::get(
3036 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
3037 NewArgumentAttributes));
3038 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
3040 // Remove argmem from the memory effects if we have no more pointer
3041 // arguments, or they are readnone.
3042 MemoryEffects ME = NewFn->getMemoryEffects();
3043 int ArgNo = -1;
3044 if (ME.doesAccessArgPointees() && all_of(NewArgumentTypes, [&](Type *T) {
3045 ++ArgNo;
3046 return !T->isPtrOrPtrVectorTy() ||
3047 NewFn->hasParamAttribute(ArgNo, Attribute::ReadNone);
3048 })) {
3049 NewFn->setMemoryEffects(ME - MemoryEffects::argMemOnly());
3052 // Since we have now created the new function, splice the body of the old
3053 // function right into the new function, leaving the old rotting hulk of the
3054 // function empty.
3055 NewFn->splice(NewFn->begin(), OldFn);
3057 // Fixup block addresses to reference new function.
3058 SmallVector<BlockAddress *, 8u> BlockAddresses;
3059 for (User *U : OldFn->users())
3060 if (auto *BA = dyn_cast<BlockAddress>(U))
3061 BlockAddresses.push_back(BA);
3062 for (auto *BA : BlockAddresses)
3063 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
3065 // Set of all "call-like" instructions that invoke the old function mapped
3066 // to their new replacements.
3067 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
3069 // Callback to create a new "call-like" instruction for a given one.
3070 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
3071 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
3072 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
3074 // Collect the new argument operands for the replacement call site.
3075 SmallVector<Value *, 16> NewArgOperands;
3076 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
3077 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
3078 unsigned NewFirstArgNum = NewArgOperands.size();
3079 (void)NewFirstArgNum; // only used inside assert.
3080 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3081 ARIs[OldArgNum]) {
3082 if (ARI->ACSRepairCB)
3083 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
3084 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
3085 NewArgOperands.size() &&
3086 "ACS repair callback did not provide as many operand as new "
3087 "types were registered!");
3088 // TODO: Exose the attribute set to the ACS repair callback
3089 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
3090 AttributeSet());
3091 } else {
3092 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
3093 NewArgOperandAttributes.push_back(
3094 OldCallAttributeList.getParamAttrs(OldArgNum));
3098 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
3099 "Mismatch # argument operands vs. # argument operand attributes!");
3100 assert(NewArgOperands.size() == NewFn->arg_size() &&
3101 "Mismatch # argument operands vs. # function arguments!");
3103 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
3104 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
3106 // Create a new call or invoke instruction to replace the old one.
3107 CallBase *NewCB;
3108 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
3109 NewCB = InvokeInst::Create(NewFn, II->getNormalDest(),
3110 II->getUnwindDest(), NewArgOperands,
3111 OperandBundleDefs, "", OldCB->getIterator());
3112 } else {
3113 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
3114 "", OldCB->getIterator());
3115 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
3116 NewCB = NewCI;
3119 // Copy over various properties and the new attributes.
3120 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
3121 NewCB->setCallingConv(OldCB->getCallingConv());
3122 NewCB->takeName(OldCB);
3123 NewCB->setAttributes(AttributeList::get(
3124 Ctx, OldCallAttributeList.getFnAttrs(),
3125 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
3127 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
3128 LargestVectorWidth);
3130 CallSitePairs.push_back({OldCB, NewCB});
3131 return true;
3134 // Use the CallSiteReplacementCreator to create replacement call sites.
3135 bool UsedAssumedInformation = false;
3136 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
3137 true, nullptr, UsedAssumedInformation,
3138 /* CheckPotentiallyDead */ true);
3139 (void)Success;
3140 assert(Success && "Assumed call site replacement to succeed!");
3142 // Rewire the arguments.
3143 Argument *OldFnArgIt = OldFn->arg_begin();
3144 Argument *NewFnArgIt = NewFn->arg_begin();
3145 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
3146 ++OldArgNum, ++OldFnArgIt) {
3147 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
3148 ARIs[OldArgNum]) {
3149 if (ARI->CalleeRepairCB)
3150 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
3151 if (ARI->ReplacementTypes.empty())
3152 OldFnArgIt->replaceAllUsesWith(
3153 PoisonValue::get(OldFnArgIt->getType()));
3154 NewFnArgIt += ARI->ReplacementTypes.size();
3155 } else {
3156 NewFnArgIt->takeName(&*OldFnArgIt);
3157 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
3158 ++NewFnArgIt;
3162 // Eliminate the instructions *after* we visited all of them.
3163 for (auto &CallSitePair : CallSitePairs) {
3164 CallBase &OldCB = *CallSitePair.first;
3165 CallBase &NewCB = *CallSitePair.second;
3166 assert(OldCB.getType() == NewCB.getType() &&
3167 "Cannot handle call sites with different types!");
3168 ModifiedFns.insert(OldCB.getFunction());
3169 OldCB.replaceAllUsesWith(&NewCB);
3170 OldCB.eraseFromParent();
3173 // Replace the function in the call graph (if any).
3174 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
3176 // If the old function was modified and needed to be reanalyzed, the new one
3177 // does now.
3178 if (ModifiedFns.remove(OldFn))
3179 ModifiedFns.insert(NewFn);
3181 Changed = ChangeStatus::CHANGED;
3184 return Changed;
3187 void InformationCache::initializeInformationCache(const Function &CF,
3188 FunctionInfo &FI) {
3189 // As we do not modify the function here we can remove the const
3190 // withouth breaking implicit assumptions. At the end of the day, we could
3191 // initialize the cache eagerly which would look the same to the users.
3192 Function &F = const_cast<Function &>(CF);
3194 // Walk all instructions to find interesting instructions that might be
3195 // queried by abstract attributes during their initialization or update.
3196 // This has to happen before we create attributes.
3198 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3200 // Add \p V to the assume uses map which track the number of uses outside of
3201 // "visited" assumes. If no outside uses are left the value is added to the
3202 // assume only use vector.
3203 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3204 SmallVector<const Instruction *> Worklist;
3205 if (auto *I = dyn_cast<Instruction>(&V))
3206 Worklist.push_back(I);
3207 while (!Worklist.empty()) {
3208 const Instruction *I = Worklist.pop_back_val();
3209 std::optional<short> &NumUses = AssumeUsesMap[I];
3210 if (!NumUses)
3211 NumUses = I->getNumUses();
3212 NumUses = *NumUses - /* this assume */ 1;
3213 if (*NumUses != 0)
3214 continue;
3215 AssumeOnlyValues.insert(I);
3216 for (const Value *Op : I->operands())
3217 if (auto *OpI = dyn_cast<Instruction>(Op))
3218 Worklist.push_back(OpI);
3222 for (Instruction &I : instructions(&F)) {
3223 bool IsInterestingOpcode = false;
3225 // To allow easy access to all instructions in a function with a given
3226 // opcode we store them in the InfoCache. As not all opcodes are interesting
3227 // to concrete attributes we only cache the ones that are as identified in
3228 // the following switch.
3229 // Note: There are no concrete attributes now so this is initially empty.
3230 switch (I.getOpcode()) {
3231 default:
3232 assert(!isa<CallBase>(&I) &&
3233 "New call base instruction type needs to be known in the "
3234 "Attributor.");
3235 break;
3236 case Instruction::Call:
3237 // Calls are interesting on their own, additionally:
3238 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3239 // For `must-tail` calls we remember the caller and callee.
3240 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3241 AssumeOnlyValues.insert(Assume);
3242 fillMapFromAssume(*Assume, KnowledgeMap);
3243 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3244 } else if (cast<CallInst>(I).isMustTailCall()) {
3245 FI.ContainsMustTailCall = true;
3246 if (auto *Callee = dyn_cast_if_present<Function>(
3247 cast<CallInst>(I).getCalledOperand()))
3248 getFunctionInfo(*Callee).CalledViaMustTail = true;
3250 [[fallthrough]];
3251 case Instruction::CallBr:
3252 case Instruction::Invoke:
3253 case Instruction::CleanupRet:
3254 case Instruction::CatchSwitch:
3255 case Instruction::AtomicRMW:
3256 case Instruction::AtomicCmpXchg:
3257 case Instruction::Br:
3258 case Instruction::Resume:
3259 case Instruction::Ret:
3260 case Instruction::Load:
3261 // The alignment of a pointer is interesting for loads.
3262 case Instruction::Store:
3263 // The alignment of a pointer is interesting for stores.
3264 case Instruction::Alloca:
3265 case Instruction::AddrSpaceCast:
3266 IsInterestingOpcode = true;
3268 if (IsInterestingOpcode) {
3269 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3270 if (!Insts)
3271 Insts = new (Allocator) InstructionVectorTy();
3272 Insts->push_back(&I);
3274 if (I.mayReadOrWriteMemory())
3275 FI.RWInsts.push_back(&I);
3278 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3279 isInlineViable(F).isSuccess())
3280 InlineableFunctions.insert(&F);
3283 InformationCache::FunctionInfo::~FunctionInfo() {
3284 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3285 // manually destroy them.
3286 for (auto &It : OpcodeInstMap)
3287 It.getSecond()->~InstructionVectorTy();
3290 const ArrayRef<Function *>
3291 InformationCache::getIndirectlyCallableFunctions(Attributor &A) const {
3292 assert(A.isClosedWorldModule() && "Cannot see all indirect callees!");
3293 return IndirectlyCallableFunctions;
3296 std::optional<unsigned> InformationCache::getFlatAddressSpace() const {
3297 if (TargetTriple.isAMDGPU() || TargetTriple.isNVPTX())
3298 return 0;
3299 return std::nullopt;
3302 void Attributor::recordDependence(const AbstractAttribute &FromAA,
3303 const AbstractAttribute &ToAA,
3304 DepClassTy DepClass) {
3305 if (DepClass == DepClassTy::NONE)
3306 return;
3307 // If we are outside of an update, thus before the actual fixpoint iteration
3308 // started (= when we create AAs), we do not track dependences because we will
3309 // put all AAs into the initial worklist anyway.
3310 if (DependenceStack.empty())
3311 return;
3312 if (FromAA.getState().isAtFixpoint())
3313 return;
3314 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3317 void Attributor::rememberDependences() {
3318 assert(!DependenceStack.empty() && "No dependences to remember!");
3320 for (DepInfo &DI : *DependenceStack.back()) {
3321 assert((DI.DepClass == DepClassTy::REQUIRED ||
3322 DI.DepClass == DepClassTy::OPTIONAL) &&
3323 "Expected required or optional dependence (1 bit)!");
3324 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3325 DepAAs.insert(AbstractAttribute::DepTy(
3326 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3330 template <Attribute::AttrKind AK, typename AAType>
3331 void Attributor::checkAndQueryIRAttr(const IRPosition &IRP,
3332 AttributeSet Attrs) {
3333 bool IsKnown;
3334 if (!Attrs.hasAttribute(AK))
3335 if (!Configuration.Allowed || Configuration.Allowed->count(&AAType::ID))
3336 if (!AA::hasAssumedIRAttr<AK>(*this, nullptr, IRP, DepClassTy::NONE,
3337 IsKnown))
3338 getOrCreateAAFor<AAType>(IRP);
3341 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3342 if (!VisitedFunctions.insert(&F).second)
3343 return;
3344 if (F.isDeclaration())
3345 return;
3347 // In non-module runs we need to look at the call sites of a function to
3348 // determine if it is part of a must-tail call edge. This will influence what
3349 // attributes we can derive.
3350 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3351 if (!isModulePass() && !FI.CalledViaMustTail) {
3352 for (const Use &U : F.uses())
3353 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3354 if (CB->isCallee(&U) && CB->isMustTailCall())
3355 FI.CalledViaMustTail = true;
3358 IRPosition FPos = IRPosition::function(F);
3359 bool IsIPOAmendable = isFunctionIPOAmendable(F);
3360 auto Attrs = F.getAttributes();
3361 auto FnAttrs = Attrs.getFnAttrs();
3363 // Check for dead BasicBlocks in every function.
3364 // We need dead instruction detection because we do not want to deal with
3365 // broken IR in which SSA rules do not apply.
3366 getOrCreateAAFor<AAIsDead>(FPos);
3368 // Every function might contain instructions that cause "undefined
3369 // behavior".
3370 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3372 // Every function might be applicable for Heap-To-Stack conversion.
3373 if (EnableHeapToStack)
3374 getOrCreateAAFor<AAHeapToStack>(FPos);
3376 // Every function might be "must-progress".
3377 checkAndQueryIRAttr<Attribute::MustProgress, AAMustProgress>(FPos, FnAttrs);
3379 // Every function might be "no-free".
3380 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(FPos, FnAttrs);
3382 // Every function might be "will-return".
3383 checkAndQueryIRAttr<Attribute::WillReturn, AAWillReturn>(FPos, FnAttrs);
3385 // Every function might be marked "nosync"
3386 checkAndQueryIRAttr<Attribute::NoSync, AANoSync>(FPos, FnAttrs);
3388 // Everything that is visible from the outside (=function, argument, return
3389 // positions), cannot be changed if the function is not IPO amendable. We can
3390 // however analyse the code inside.
3391 if (IsIPOAmendable) {
3393 // Every function can be nounwind.
3394 checkAndQueryIRAttr<Attribute::NoUnwind, AANoUnwind>(FPos, FnAttrs);
3396 // Every function might be "no-return".
3397 checkAndQueryIRAttr<Attribute::NoReturn, AANoReturn>(FPos, FnAttrs);
3399 // Every function might be "no-recurse".
3400 checkAndQueryIRAttr<Attribute::NoRecurse, AANoRecurse>(FPos, FnAttrs);
3402 // Every function can be "non-convergent".
3403 if (Attrs.hasFnAttr(Attribute::Convergent))
3404 getOrCreateAAFor<AANonConvergent>(FPos);
3406 // Every function might be "readnone/readonly/writeonly/...".
3407 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3409 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3410 getOrCreateAAFor<AAMemoryLocation>(FPos);
3412 // Every function can track active assumptions.
3413 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3415 // If we're not using a dynamic mode for float, there's nothing worthwhile
3416 // to infer. This misses the edge case denormal-fp-math="dynamic" and
3417 // denormal-fp-math-f32=something, but that likely has no real world use.
3418 DenormalMode Mode = F.getDenormalMode(APFloat::IEEEsingle());
3419 if (Mode.Input == DenormalMode::Dynamic ||
3420 Mode.Output == DenormalMode::Dynamic)
3421 getOrCreateAAFor<AADenormalFPMath>(FPos);
3423 // Return attributes are only appropriate if the return type is non void.
3424 Type *ReturnType = F.getReturnType();
3425 if (!ReturnType->isVoidTy()) {
3426 IRPosition RetPos = IRPosition::returned(F);
3427 AttributeSet RetAttrs = Attrs.getRetAttrs();
3429 // Every returned value might be dead.
3430 getOrCreateAAFor<AAIsDead>(RetPos);
3432 // Every function might be simplified.
3433 bool UsedAssumedInformation = false;
3434 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3435 AA::Intraprocedural);
3437 // Every returned value might be marked noundef.
3438 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(RetPos, RetAttrs);
3440 if (ReturnType->isPointerTy()) {
3442 // Every function with pointer return type might be marked align.
3443 getOrCreateAAFor<AAAlign>(RetPos);
3445 // Every function with pointer return type might be marked nonnull.
3446 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(RetPos, RetAttrs);
3448 // Every function with pointer return type might be marked noalias.
3449 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(RetPos, RetAttrs);
3451 // Every function with pointer return type might be marked
3452 // dereferenceable.
3453 getOrCreateAAFor<AADereferenceable>(RetPos);
3454 } else if (AttributeFuncs::isNoFPClassCompatibleType(ReturnType)) {
3455 getOrCreateAAFor<AANoFPClass>(RetPos);
3460 for (Argument &Arg : F.args()) {
3461 IRPosition ArgPos = IRPosition::argument(Arg);
3462 auto ArgNo = Arg.getArgNo();
3463 AttributeSet ArgAttrs = Attrs.getParamAttrs(ArgNo);
3465 if (!IsIPOAmendable) {
3466 if (Arg.getType()->isPointerTy())
3467 // Every argument with pointer type might be marked nofree.
3468 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3469 continue;
3472 // Every argument might be simplified. We have to go through the
3473 // Attributor interface though as outside AAs can register custom
3474 // simplification callbacks.
3475 bool UsedAssumedInformation = false;
3476 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3477 AA::Intraprocedural);
3479 // Every argument might be dead.
3480 getOrCreateAAFor<AAIsDead>(ArgPos);
3482 // Every argument might be marked noundef.
3483 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(ArgPos, ArgAttrs);
3485 if (Arg.getType()->isPointerTy()) {
3486 // Every argument with pointer type might be marked nonnull.
3487 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(ArgPos, ArgAttrs);
3489 // Every argument with pointer type might be marked noalias.
3490 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(ArgPos, ArgAttrs);
3492 // Every argument with pointer type might be marked dereferenceable.
3493 getOrCreateAAFor<AADereferenceable>(ArgPos);
3495 // Every argument with pointer type might be marked align.
3496 getOrCreateAAFor<AAAlign>(ArgPos);
3498 // Every argument with pointer type might be marked nocapture.
3499 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(ArgPos, ArgAttrs);
3501 // Every argument with pointer type might be marked
3502 // "readnone/readonly/writeonly/..."
3503 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3505 // Every argument with pointer type might be marked nofree.
3506 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(ArgPos, ArgAttrs);
3508 // Every argument with pointer type might be privatizable (or
3509 // promotable)
3510 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3511 } else if (AttributeFuncs::isNoFPClassCompatibleType(Arg.getType())) {
3512 getOrCreateAAFor<AANoFPClass>(ArgPos);
3516 auto CallSitePred = [&](Instruction &I) -> bool {
3517 auto &CB = cast<CallBase>(I);
3518 IRPosition CBInstPos = IRPosition::inst(CB);
3519 IRPosition CBFnPos = IRPosition::callsite_function(CB);
3521 // Call sites might be dead if they do not have side effects and no live
3522 // users. The return value might be dead if there are no live users.
3523 getOrCreateAAFor<AAIsDead>(CBInstPos);
3525 Function *Callee = dyn_cast_if_present<Function>(CB.getCalledOperand());
3526 // TODO: Even if the callee is not known now we might be able to simplify
3527 // the call/callee.
3528 if (!Callee) {
3529 getOrCreateAAFor<AAIndirectCallInfo>(CBFnPos);
3530 return true;
3533 // Every call site can track active assumptions.
3534 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3536 // Skip declarations except if annotations on their call sites were
3537 // explicitly requested.
3538 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3539 !Callee->hasMetadata(LLVMContext::MD_callback))
3540 return true;
3542 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3543 IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3544 bool UsedAssumedInformation = false;
3545 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3546 AA::Intraprocedural);
3548 if (AttributeFuncs::isNoFPClassCompatibleType(Callee->getReturnType()))
3549 getOrCreateAAFor<AANoFPClass>(CBInstPos);
3552 const AttributeList &CBAttrs = CBFnPos.getAttrList();
3553 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3555 IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
3556 AttributeSet CBArgAttrs = CBAttrs.getParamAttrs(I);
3558 // Every call site argument might be dead.
3559 getOrCreateAAFor<AAIsDead>(CBArgPos);
3561 // Call site argument might be simplified. We have to go through the
3562 // Attributor interface though as outside AAs can register custom
3563 // simplification callbacks.
3564 bool UsedAssumedInformation = false;
3565 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3566 AA::Intraprocedural);
3568 // Every call site argument might be marked "noundef".
3569 checkAndQueryIRAttr<Attribute::NoUndef, AANoUndef>(CBArgPos, CBArgAttrs);
3571 Type *ArgTy = CB.getArgOperand(I)->getType();
3573 if (!ArgTy->isPointerTy()) {
3574 if (AttributeFuncs::isNoFPClassCompatibleType(ArgTy))
3575 getOrCreateAAFor<AANoFPClass>(CBArgPos);
3577 continue;
3580 // Call site argument attribute "non-null".
3581 checkAndQueryIRAttr<Attribute::NonNull, AANonNull>(CBArgPos, CBArgAttrs);
3583 // Call site argument attribute "nocapture".
3584 checkAndQueryIRAttr<Attribute::NoCapture, AANoCapture>(CBArgPos,
3585 CBArgAttrs);
3587 // Call site argument attribute "no-alias".
3588 checkAndQueryIRAttr<Attribute::NoAlias, AANoAlias>(CBArgPos, CBArgAttrs);
3590 // Call site argument attribute "dereferenceable".
3591 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3593 // Call site argument attribute "align".
3594 getOrCreateAAFor<AAAlign>(CBArgPos);
3596 // Call site argument attribute
3597 // "readnone/readonly/writeonly/..."
3598 if (!CBAttrs.hasParamAttr(I, Attribute::ReadNone))
3599 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3601 // Call site argument attribute "nofree".
3602 checkAndQueryIRAttr<Attribute::NoFree, AANoFree>(CBArgPos, CBArgAttrs);
3604 return true;
3607 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3608 [[maybe_unused]] bool Success;
3609 bool UsedAssumedInformation = false;
3610 Success = checkForAllInstructionsImpl(
3611 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3612 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3613 (unsigned)Instruction::Call},
3614 UsedAssumedInformation);
3615 assert(Success && "Expected the check call to be successful!");
3617 auto LoadStorePred = [&](Instruction &I) -> bool {
3618 if (auto *LI = dyn_cast<LoadInst>(&I)) {
3619 getOrCreateAAFor<AAAlign>(IRPosition::value(*LI->getPointerOperand()));
3620 if (SimplifyAllLoads)
3621 getAssumedSimplified(IRPosition::value(I), nullptr,
3622 UsedAssumedInformation, AA::Intraprocedural);
3623 getOrCreateAAFor<AAAddressSpace>(
3624 IRPosition::value(*LI->getPointerOperand()));
3625 } else {
3626 auto &SI = cast<StoreInst>(I);
3627 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3628 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3629 UsedAssumedInformation, AA::Intraprocedural);
3630 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3631 getOrCreateAAFor<AAAddressSpace>(
3632 IRPosition::value(*SI.getPointerOperand()));
3634 return true;
3636 Success = checkForAllInstructionsImpl(
3637 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3638 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3639 UsedAssumedInformation);
3640 assert(Success && "Expected the check call to be successful!");
3642 // AllocaInstPredicate
3643 auto AAAllocationInfoPred = [&](Instruction &I) -> bool {
3644 getOrCreateAAFor<AAAllocationInfo>(IRPosition::value(I));
3645 return true;
3648 Success = checkForAllInstructionsImpl(
3649 nullptr, OpcodeInstMap, AAAllocationInfoPred, nullptr, nullptr,
3650 {(unsigned)Instruction::Alloca}, UsedAssumedInformation);
3651 assert(Success && "Expected the check call to be successful!");
3654 bool Attributor::isClosedWorldModule() const {
3655 if (CloseWorldAssumption.getNumOccurrences())
3656 return CloseWorldAssumption;
3657 return isModulePass() && Configuration.IsClosedWorldModule;
3660 /// Helpers to ease debugging through output streams and print calls.
3662 ///{
3663 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3664 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3667 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3668 switch (AP) {
3669 case IRPosition::IRP_INVALID:
3670 return OS << "inv";
3671 case IRPosition::IRP_FLOAT:
3672 return OS << "flt";
3673 case IRPosition::IRP_RETURNED:
3674 return OS << "fn_ret";
3675 case IRPosition::IRP_CALL_SITE_RETURNED:
3676 return OS << "cs_ret";
3677 case IRPosition::IRP_FUNCTION:
3678 return OS << "fn";
3679 case IRPosition::IRP_CALL_SITE:
3680 return OS << "cs";
3681 case IRPosition::IRP_ARGUMENT:
3682 return OS << "arg";
3683 case IRPosition::IRP_CALL_SITE_ARGUMENT:
3684 return OS << "cs_arg";
3686 llvm_unreachable("Unknown attribute position!");
3689 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3690 const Value &AV = Pos.getAssociatedValue();
3691 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3692 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3694 if (Pos.hasCallBaseContext())
3695 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3696 return OS << "}";
3699 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3700 OS << "range-state(" << S.getBitWidth() << ")<";
3701 S.getKnown().print(OS);
3702 OS << " / ";
3703 S.getAssumed().print(OS);
3704 OS << ">";
3706 return OS << static_cast<const AbstractState &>(S);
3709 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3710 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3713 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3714 AA.print(OS);
3715 return OS;
3718 raw_ostream &llvm::operator<<(raw_ostream &OS,
3719 const PotentialConstantIntValuesState &S) {
3720 OS << "set-state(< {";
3721 if (!S.isValidState())
3722 OS << "full-set";
3723 else {
3724 for (const auto &It : S.getAssumedSet())
3725 OS << It << ", ";
3726 if (S.undefIsContained())
3727 OS << "undef ";
3729 OS << "} >)";
3731 return OS;
3734 raw_ostream &llvm::operator<<(raw_ostream &OS,
3735 const PotentialLLVMValuesState &S) {
3736 OS << "set-state(< {";
3737 if (!S.isValidState())
3738 OS << "full-set";
3739 else {
3740 for (const auto &It : S.getAssumedSet()) {
3741 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3742 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3743 else
3744 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3746 if (S.undefIsContained())
3747 OS << "undef ";
3749 OS << "} >)";
3751 return OS;
3754 void AbstractAttribute::print(Attributor *A, raw_ostream &OS) const {
3755 OS << "[";
3756 OS << getName();
3757 OS << "] for CtxI ";
3759 if (auto *I = getCtxI()) {
3760 OS << "'";
3761 I->print(OS);
3762 OS << "'";
3763 } else
3764 OS << "<<null inst>>";
3766 OS << " at position " << getIRPosition() << " with state " << getAsStr(A)
3767 << '\n';
3770 void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3771 print(OS);
3773 for (const auto &DepAA : Deps) {
3774 auto *AA = DepAA.getPointer();
3775 OS << " updates ";
3776 AA->print(OS);
3779 OS << '\n';
3782 raw_ostream &llvm::operator<<(raw_ostream &OS,
3783 const AAPointerInfo::Access &Acc) {
3784 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3785 if (Acc.getLocalInst() != Acc.getRemoteInst())
3786 OS << " via " << *Acc.getLocalInst();
3787 if (Acc.getContent()) {
3788 if (*Acc.getContent())
3789 OS << " [" << **Acc.getContent() << "]";
3790 else
3791 OS << " [ <unknown> ]";
3793 return OS;
3795 ///}
3797 /// ----------------------------------------------------------------------------
3798 /// Pass (Manager) Boilerplate
3799 /// ----------------------------------------------------------------------------
3801 static bool runAttributorOnFunctions(InformationCache &InfoCache,
3802 SetVector<Function *> &Functions,
3803 AnalysisGetter &AG,
3804 CallGraphUpdater &CGUpdater,
3805 bool DeleteFns, bool IsModulePass) {
3806 if (Functions.empty())
3807 return false;
3809 LLVM_DEBUG({
3810 dbgs() << "[Attributor] Run on module with " << Functions.size()
3811 << " functions:\n";
3812 for (Function *Fn : Functions)
3813 dbgs() << " - " << Fn->getName() << "\n";
3816 // Create an Attributor and initially empty information cache that is filled
3817 // while we identify default attribute opportunities.
3818 AttributorConfig AC(CGUpdater);
3819 AC.IsModulePass = IsModulePass;
3820 AC.DeleteFns = DeleteFns;
3822 /// Tracking callback for specialization of indirect calls.
3823 DenseMap<CallBase *, std::unique_ptr<SmallPtrSet<Function *, 8>>>
3824 IndirectCalleeTrackingMap;
3825 if (MaxSpecializationPerCB.getNumOccurrences()) {
3826 AC.IndirectCalleeSpecializationCallback =
3827 [&](Attributor &, const AbstractAttribute &AA, CallBase &CB,
3828 Function &Callee, unsigned) {
3829 if (MaxSpecializationPerCB == 0)
3830 return false;
3831 auto &Set = IndirectCalleeTrackingMap[&CB];
3832 if (!Set)
3833 Set = std::make_unique<SmallPtrSet<Function *, 8>>();
3834 if (Set->size() >= MaxSpecializationPerCB)
3835 return Set->contains(&Callee);
3836 Set->insert(&Callee);
3837 return true;
3841 Attributor A(Functions, InfoCache, AC);
3843 // Create shallow wrappers for all functions that are not IPO amendable
3844 if (AllowShallowWrappers)
3845 for (Function *F : Functions)
3846 if (!A.isFunctionIPOAmendable(*F))
3847 Attributor::createShallowWrapper(*F);
3849 // Internalize non-exact functions
3850 // TODO: for now we eagerly internalize functions without calculating the
3851 // cost, we need a cost interface to determine whether internalizing
3852 // a function is "beneficial"
3853 if (AllowDeepWrapper) {
3854 unsigned FunSize = Functions.size();
3855 for (unsigned u = 0; u < FunSize; u++) {
3856 Function *F = Functions[u];
3857 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3858 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3859 Function *NewF = Attributor::internalizeFunction(*F);
3860 assert(NewF && "Could not internalize function.");
3861 Functions.insert(NewF);
3863 // Update call graph
3864 CGUpdater.replaceFunctionWith(*F, *NewF);
3865 for (const Use &U : NewF->uses())
3866 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3867 auto *CallerF = CB->getCaller();
3868 CGUpdater.reanalyzeFunction(*CallerF);
3874 for (Function *F : Functions) {
3875 if (F->hasExactDefinition())
3876 NumFnWithExactDefinition++;
3877 else
3878 NumFnWithoutExactDefinition++;
3880 // We look at internal functions only on-demand but if any use is not a
3881 // direct call or outside the current set of analyzed functions, we have
3882 // to do it eagerly.
3883 if (F->hasLocalLinkage()) {
3884 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3885 const auto *CB = dyn_cast<CallBase>(U.getUser());
3886 return CB && CB->isCallee(&U) &&
3887 Functions.count(const_cast<Function *>(CB->getCaller()));
3889 continue;
3892 // Populate the Attributor with abstract attribute opportunities in the
3893 // function and the information cache with IR information.
3894 A.identifyDefaultAbstractAttributes(*F);
3897 ChangeStatus Changed = A.run();
3899 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3900 << " functions, result: " << Changed << ".\n");
3901 return Changed == ChangeStatus::CHANGED;
3904 static bool runAttributorLightOnFunctions(InformationCache &InfoCache,
3905 SetVector<Function *> &Functions,
3906 AnalysisGetter &AG,
3907 CallGraphUpdater &CGUpdater,
3908 FunctionAnalysisManager &FAM,
3909 bool IsModulePass) {
3910 if (Functions.empty())
3911 return false;
3913 LLVM_DEBUG({
3914 dbgs() << "[AttributorLight] Run on module with " << Functions.size()
3915 << " functions:\n";
3916 for (Function *Fn : Functions)
3917 dbgs() << " - " << Fn->getName() << "\n";
3920 // Create an Attributor and initially empty information cache that is filled
3921 // while we identify default attribute opportunities.
3922 AttributorConfig AC(CGUpdater);
3923 AC.IsModulePass = IsModulePass;
3924 AC.DeleteFns = false;
3925 DenseSet<const char *> Allowed(
3926 {&AAWillReturn::ID, &AANoUnwind::ID, &AANoRecurse::ID, &AANoSync::ID,
3927 &AANoFree::ID, &AANoReturn::ID, &AAMemoryLocation::ID,
3928 &AAMemoryBehavior::ID, &AAUnderlyingObjects::ID, &AANoCapture::ID,
3929 &AAInterFnReachability::ID, &AAIntraFnReachability::ID, &AACallEdges::ID,
3930 &AANoFPClass::ID, &AAMustProgress::ID, &AANonNull::ID});
3931 AC.Allowed = &Allowed;
3932 AC.UseLiveness = false;
3934 Attributor A(Functions, InfoCache, AC);
3936 for (Function *F : Functions) {
3937 if (F->hasExactDefinition())
3938 NumFnWithExactDefinition++;
3939 else
3940 NumFnWithoutExactDefinition++;
3942 // We look at internal functions only on-demand but if any use is not a
3943 // direct call or outside the current set of analyzed functions, we have
3944 // to do it eagerly.
3945 if (AC.UseLiveness && F->hasLocalLinkage()) {
3946 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3947 const auto *CB = dyn_cast<CallBase>(U.getUser());
3948 return CB && CB->isCallee(&U) &&
3949 Functions.count(const_cast<Function *>(CB->getCaller()));
3951 continue;
3954 // Populate the Attributor with abstract attribute opportunities in the
3955 // function and the information cache with IR information.
3956 A.identifyDefaultAbstractAttributes(*F);
3959 ChangeStatus Changed = A.run();
3961 if (Changed == ChangeStatus::CHANGED) {
3962 // Invalidate analyses for modified functions so that we don't have to
3963 // invalidate all analyses for all functions in this SCC.
3964 PreservedAnalyses FuncPA;
3965 // We haven't changed the CFG for modified functions.
3966 FuncPA.preserveSet<CFGAnalyses>();
3967 for (Function *Changed : A.getModifiedFunctions()) {
3968 FAM.invalidate(*Changed, FuncPA);
3969 // Also invalidate any direct callers of changed functions since analyses
3970 // may care about attributes of direct callees. For example, MemorySSA
3971 // cares about whether or not a call's callee modifies memory and queries
3972 // that through function attributes.
3973 for (auto *U : Changed->users()) {
3974 if (auto *Call = dyn_cast<CallBase>(U)) {
3975 if (Call->getCalledFunction() == Changed)
3976 FAM.invalidate(*Call->getFunction(), FuncPA);
3981 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3982 << " functions, result: " << Changed << ".\n");
3983 return Changed == ChangeStatus::CHANGED;
3986 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3988 void AADepGraph::dumpGraph() {
3989 static std::atomic<int> CallTimes;
3990 std::string Prefix;
3992 if (!DepGraphDotFileNamePrefix.empty())
3993 Prefix = DepGraphDotFileNamePrefix;
3994 else
3995 Prefix = "dep_graph";
3996 std::string Filename =
3997 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3999 outs() << "Dependency graph dump to " << Filename << ".\n";
4001 std::error_code EC;
4003 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
4004 if (!EC)
4005 llvm::WriteGraph(File, this);
4007 CallTimes++;
4010 void AADepGraph::print() {
4011 for (auto DepAA : SyntheticRoot.Deps)
4012 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
4015 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
4016 FunctionAnalysisManager &FAM =
4017 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4018 AnalysisGetter AG(FAM);
4020 SetVector<Function *> Functions;
4021 for (Function &F : M)
4022 Functions.insert(&F);
4024 CallGraphUpdater CGUpdater;
4025 BumpPtrAllocator Allocator;
4026 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4027 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4028 /* DeleteFns */ true, /* IsModulePass */ true)) {
4029 // FIXME: Think about passes we will preserve and add them here.
4030 return PreservedAnalyses::none();
4032 return PreservedAnalyses::all();
4035 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
4036 CGSCCAnalysisManager &AM,
4037 LazyCallGraph &CG,
4038 CGSCCUpdateResult &UR) {
4039 FunctionAnalysisManager &FAM =
4040 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4041 AnalysisGetter AG(FAM);
4043 SetVector<Function *> Functions;
4044 for (LazyCallGraph::Node &N : C)
4045 Functions.insert(&N.getFunction());
4047 if (Functions.empty())
4048 return PreservedAnalyses::all();
4050 Module &M = *Functions.back()->getParent();
4051 CallGraphUpdater CGUpdater;
4052 CGUpdater.initialize(CG, C, AM, UR);
4053 BumpPtrAllocator Allocator;
4054 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4055 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
4056 /* DeleteFns */ false,
4057 /* IsModulePass */ false)) {
4058 // FIXME: Think about passes we will preserve and add them here.
4059 PreservedAnalyses PA;
4060 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4061 return PA;
4063 return PreservedAnalyses::all();
4066 PreservedAnalyses AttributorLightPass::run(Module &M,
4067 ModuleAnalysisManager &AM) {
4068 FunctionAnalysisManager &FAM =
4069 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
4070 AnalysisGetter AG(FAM, /* CachedOnly */ true);
4072 SetVector<Function *> Functions;
4073 for (Function &F : M)
4074 Functions.insert(&F);
4076 CallGraphUpdater CGUpdater;
4077 BumpPtrAllocator Allocator;
4078 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
4079 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4080 /* IsModulePass */ true)) {
4081 PreservedAnalyses PA;
4082 // We have not added or removed functions.
4083 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4084 // We already invalidated all relevant function analyses above.
4085 PA.preserveSet<AllAnalysesOn<Function>>();
4086 return PA;
4088 return PreservedAnalyses::all();
4091 PreservedAnalyses AttributorLightCGSCCPass::run(LazyCallGraph::SCC &C,
4092 CGSCCAnalysisManager &AM,
4093 LazyCallGraph &CG,
4094 CGSCCUpdateResult &UR) {
4095 FunctionAnalysisManager &FAM =
4096 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
4097 AnalysisGetter AG(FAM);
4099 SetVector<Function *> Functions;
4100 for (LazyCallGraph::Node &N : C)
4101 Functions.insert(&N.getFunction());
4103 if (Functions.empty())
4104 return PreservedAnalyses::all();
4106 Module &M = *Functions.back()->getParent();
4107 CallGraphUpdater CGUpdater;
4108 CGUpdater.initialize(CG, C, AM, UR);
4109 BumpPtrAllocator Allocator;
4110 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
4111 if (runAttributorLightOnFunctions(InfoCache, Functions, AG, CGUpdater, FAM,
4112 /* IsModulePass */ false)) {
4113 PreservedAnalyses PA;
4114 // We have not added or removed functions.
4115 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
4116 // We already invalidated all relevant function analyses above.
4117 PA.preserveSet<AllAnalysesOn<Function>>();
4118 return PA;
4120 return PreservedAnalyses::all();
4122 namespace llvm {
4124 template <> struct GraphTraits<AADepGraphNode *> {
4125 using NodeRef = AADepGraphNode *;
4126 using DepTy = PointerIntPair<AADepGraphNode *, 1>;
4127 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
4129 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
4130 static NodeRef DepGetVal(const DepTy &DT) { return DT.getPointer(); }
4132 using ChildIteratorType =
4133 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4134 using ChildEdgeIteratorType = AADepGraphNode::DepSetTy::iterator;
4136 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
4138 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
4141 template <>
4142 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
4143 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
4145 using nodes_iterator =
4146 mapped_iterator<AADepGraphNode::DepSetTy::iterator, decltype(&DepGetVal)>;
4148 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
4150 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
4153 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
4154 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
4156 static std::string getNodeLabel(const AADepGraphNode *Node,
4157 const AADepGraph *DG) {
4158 std::string AAString;
4159 raw_string_ostream O(AAString);
4160 Node->print(O);
4161 return AAString;
4165 } // end namespace llvm