[llvm-objdump] - Remove one overload of reportError. NFCI.
[llvm-complete.git] / lib / Transforms / IPO / Attributor.cpp
bloba2d470affc0e5a6da7cc0b0612ab7b991d2cbed3
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/Attributor.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Argument.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/InstIterator.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
39 #include <cassert>
41 using namespace llvm;
43 #define DEBUG_TYPE "attributor"
45 STATISTIC(NumFnWithExactDefinition,
46 "Number of function with exact definitions");
47 STATISTIC(NumFnWithoutExactDefinition,
48 "Number of function without exact definitions");
49 STATISTIC(NumAttributesTimedOut,
50 "Number of abstract attributes timed out before fixpoint");
51 STATISTIC(NumAttributesValidFixpoint,
52 "Number of abstract attributes in a valid fixpoint state");
53 STATISTIC(NumAttributesManifested,
54 "Number of abstract attributes manifested in IR");
56 // Some helper macros to deal with statistics tracking.
58 // Usage:
59 // For simple IR attribute tracking overload trackStatistics in the abstract
60 // attribute and choose the right STATS_DECLTRACK_********* macro,
61 // e.g.,:
62 // void trackStatistics() const override {
63 // STATS_DECLTRACK_ARG_ATTR(returned)
64 // }
65 // If there is a single "increment" side one can use the macro
66 // STATS_DECLTRACK with a custom message. If there are multiple increment
67 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
69 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
70 ("Number of " #TYPE " marked '" #NAME "'")
71 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
72 #define STATS_DECL(NAME, TYPE, MSG) STATISTIC(BUILD_STAT_NAME(NAME, TYPE), MSG);
73 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
74 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
75 { \
76 STATS_DECL(NAME, TYPE, MSG) \
77 STATS_TRACK(NAME, TYPE) \
79 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
80 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
81 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
82 STATS_DECLTRACK(NAME, CSArguments, \
83 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
84 #define STATS_DECLTRACK_FN_ATTR(NAME) \
85 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
86 #define STATS_DECLTRACK_CS_ATTR(NAME) \
87 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
88 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
89 STATS_DECLTRACK(NAME, FunctionReturn, \
90 BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
91 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
92 STATS_DECLTRACK(NAME, CSReturn, \
93 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
94 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
95 STATS_DECLTRACK(NAME, Floating, \
96 ("Number of floating values known to be '" #NAME "'"))
98 // TODO: Determine a good default value.
100 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
101 // (when run with the first 5 abstract attributes). The results also indicate
102 // that we never reach 32 iterations but always find a fixpoint sooner.
104 // This will become more evolved once we perform two interleaved fixpoint
105 // iterations: bottom-up and top-down.
106 static cl::opt<unsigned>
107 MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
108 cl::desc("Maximal number of fixpoint iterations."),
109 cl::init(32));
110 static cl::opt<bool> VerifyMaxFixpointIterations(
111 "attributor-max-iterations-verify", cl::Hidden,
112 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
113 cl::init(false));
115 static cl::opt<bool> DisableAttributor(
116 "attributor-disable", cl::Hidden,
117 cl::desc("Disable the attributor inter-procedural deduction pass."),
118 cl::init(true));
120 static cl::opt<bool> VerifyAttributor(
121 "attributor-verify", cl::Hidden,
122 cl::desc("Verify the Attributor deduction and "
123 "manifestation of attributes -- may issue false-positive errors"),
124 cl::init(false));
126 /// Logic operators for the change status enum class.
128 ///{
129 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
130 return l == ChangeStatus::CHANGED ? l : r;
132 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
133 return l == ChangeStatus::UNCHANGED ? l : r;
135 ///}
137 /// Recursively visit all values that might become \p IRP at some point. This
138 /// will be done by looking through cast instructions, selects, phis, and calls
139 /// with the "returned" attribute. Once we cannot look through the value any
140 /// further, the callback \p VisitValueCB is invoked and passed the current
141 /// value, the \p State, and a flag to indicate if we stripped anything. To
142 /// limit how much effort is invested, we will never visit more values than
143 /// specified by \p MaxValues.
144 template <typename AAType, typename StateTy>
145 bool genericValueTraversal(
146 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
147 const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
148 int MaxValues = 8) {
150 const AAIsDead *LivenessAA = nullptr;
151 if (IRP.getAnchorScope())
152 LivenessAA = &A.getAAFor<AAIsDead>(
153 QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
154 /* TrackDependence */ false);
155 bool AnyDead = false;
157 // TODO: Use Positions here to allow context sensitivity in VisitValueCB
158 SmallPtrSet<Value *, 16> Visited;
159 SmallVector<Value *, 16> Worklist;
160 Worklist.push_back(&IRP.getAssociatedValue());
162 int Iteration = 0;
163 do {
164 Value *V = Worklist.pop_back_val();
166 // Check if we should process the current value. To prevent endless
167 // recursion keep a record of the values we followed!
168 if (!Visited.insert(V).second)
169 continue;
171 // Make sure we limit the compile time for complex expressions.
172 if (Iteration++ >= MaxValues)
173 return false;
175 // Explicitly look through calls with a "returned" attribute if we do
176 // not have a pointer as stripPointerCasts only works on them.
177 Value *NewV = nullptr;
178 if (V->getType()->isPointerTy()) {
179 NewV = V->stripPointerCasts();
180 } else {
181 CallSite CS(V);
182 if (CS && CS.getCalledFunction()) {
183 for (Argument &Arg : CS.getCalledFunction()->args())
184 if (Arg.hasReturnedAttr()) {
185 NewV = CS.getArgOperand(Arg.getArgNo());
186 break;
190 if (NewV && NewV != V) {
191 Worklist.push_back(NewV);
192 continue;
195 // Look through select instructions, visit both potential values.
196 if (auto *SI = dyn_cast<SelectInst>(V)) {
197 Worklist.push_back(SI->getTrueValue());
198 Worklist.push_back(SI->getFalseValue());
199 continue;
202 // Look through phi nodes, visit all live operands.
203 if (auto *PHI = dyn_cast<PHINode>(V)) {
204 assert(LivenessAA &&
205 "Expected liveness in the presence of instructions!");
206 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
207 const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
208 if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
209 AnyDead =true;
210 continue;
212 Worklist.push_back(PHI->getIncomingValue(u));
214 continue;
217 // Once a leaf is reached we inform the user through the callback.
218 if (!VisitValueCB(*V, State, Iteration > 1))
219 return false;
220 } while (!Worklist.empty());
222 // If we actually used liveness information so we have to record a dependence.
223 if (AnyDead)
224 A.recordDependence(*LivenessAA, QueryingAA);
226 // All values have been visited.
227 return true;
230 /// Return true if \p New is equal or worse than \p Old.
231 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
232 if (!Old.isIntAttribute())
233 return true;
235 return Old.getValueAsInt() >= New.getValueAsInt();
238 /// Return true if the information provided by \p Attr was added to the
239 /// attribute list \p Attrs. This is only the case if it was not already present
240 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
241 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
242 AttributeList &Attrs, int AttrIdx) {
244 if (Attr.isEnumAttribute()) {
245 Attribute::AttrKind Kind = Attr.getKindAsEnum();
246 if (Attrs.hasAttribute(AttrIdx, Kind))
247 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
248 return false;
249 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
250 return true;
252 if (Attr.isStringAttribute()) {
253 StringRef Kind = Attr.getKindAsString();
254 if (Attrs.hasAttribute(AttrIdx, Kind))
255 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
256 return false;
257 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
258 return true;
260 if (Attr.isIntAttribute()) {
261 Attribute::AttrKind Kind = Attr.getKindAsEnum();
262 if (Attrs.hasAttribute(AttrIdx, Kind))
263 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
264 return false;
265 Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
266 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
267 return true;
270 llvm_unreachable("Expected enum or string attribute!");
273 ChangeStatus AbstractAttribute::update(Attributor &A) {
274 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
275 if (getState().isAtFixpoint())
276 return HasChanged;
278 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
280 HasChanged = updateImpl(A);
282 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
283 << "\n");
285 return HasChanged;
288 ChangeStatus
289 IRAttributeManifest::manifestAttrs(Attributor &A, IRPosition &IRP,
290 const ArrayRef<Attribute> &DeducedAttrs) {
291 Function *ScopeFn = IRP.getAssociatedFunction();
292 IRPosition::Kind PK = IRP.getPositionKind();
294 // In the following some generic code that will manifest attributes in
295 // DeducedAttrs if they improve the current IR. Due to the different
296 // annotation positions we use the underlying AttributeList interface.
298 AttributeList Attrs;
299 switch (PK) {
300 case IRPosition::IRP_INVALID:
301 case IRPosition::IRP_FLOAT:
302 return ChangeStatus::UNCHANGED;
303 case IRPosition::IRP_ARGUMENT:
304 case IRPosition::IRP_FUNCTION:
305 case IRPosition::IRP_RETURNED:
306 Attrs = ScopeFn->getAttributes();
307 break;
308 case IRPosition::IRP_CALL_SITE:
309 case IRPosition::IRP_CALL_SITE_RETURNED:
310 case IRPosition::IRP_CALL_SITE_ARGUMENT:
311 Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
312 break;
315 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
316 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
317 for (const Attribute &Attr : DeducedAttrs) {
318 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
319 continue;
321 HasChanged = ChangeStatus::CHANGED;
324 if (HasChanged == ChangeStatus::UNCHANGED)
325 return HasChanged;
327 switch (PK) {
328 case IRPosition::IRP_ARGUMENT:
329 case IRPosition::IRP_FUNCTION:
330 case IRPosition::IRP_RETURNED:
331 ScopeFn->setAttributes(Attrs);
332 break;
333 case IRPosition::IRP_CALL_SITE:
334 case IRPosition::IRP_CALL_SITE_RETURNED:
335 case IRPosition::IRP_CALL_SITE_ARGUMENT:
336 CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
337 break;
338 case IRPosition::IRP_INVALID:
339 case IRPosition::IRP_FLOAT:
340 break;
343 return HasChanged;
346 const IRPosition IRPosition::EmptyKey(255);
347 const IRPosition IRPosition::TombstoneKey(256);
349 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
350 IRPositions.emplace_back(IRP);
352 ImmutableCallSite ICS(&IRP.getAnchorValue());
353 switch (IRP.getPositionKind()) {
354 case IRPosition::IRP_INVALID:
355 case IRPosition::IRP_FLOAT:
356 case IRPosition::IRP_FUNCTION:
357 return;
358 case IRPosition::IRP_ARGUMENT:
359 case IRPosition::IRP_RETURNED:
360 IRPositions.emplace_back(
361 IRPosition::function(*IRP.getAssociatedFunction()));
362 return;
363 case IRPosition::IRP_CALL_SITE:
364 assert(ICS && "Expected call site!");
365 // TODO: We need to look at the operand bundles similar to the redirection
366 // in CallBase.
367 if (!ICS.hasOperandBundles())
368 if (const Function *Callee = ICS.getCalledFunction())
369 IRPositions.emplace_back(IRPosition::function(*Callee));
370 return;
371 case IRPosition::IRP_CALL_SITE_RETURNED:
372 assert(ICS && "Expected call site!");
373 // TODO: We need to look at the operand bundles similar to the redirection
374 // in CallBase.
375 if (!ICS.hasOperandBundles()) {
376 if (const Function *Callee = ICS.getCalledFunction()) {
377 IRPositions.emplace_back(IRPosition::returned(*Callee));
378 IRPositions.emplace_back(IRPosition::function(*Callee));
381 IRPositions.emplace_back(
382 IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
383 return;
384 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
385 int ArgNo = IRP.getArgNo();
386 assert(ICS && ArgNo >= 0 && "Expected call site!");
387 // TODO: We need to look at the operand bundles similar to the redirection
388 // in CallBase.
389 if (!ICS.hasOperandBundles()) {
390 const Function *Callee = ICS.getCalledFunction();
391 if (Callee && Callee->arg_size() > unsigned(ArgNo))
392 IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
393 if (Callee)
394 IRPositions.emplace_back(IRPosition::function(*Callee));
396 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
397 return;
402 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs) const {
403 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
404 for (Attribute::AttrKind AK : AKs)
405 if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
406 return true;
407 return false;
410 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
411 SmallVectorImpl<Attribute> &Attrs) const {
412 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
413 for (Attribute::AttrKind AK : AKs) {
414 const Attribute &Attr = EquivIRP.getAttr(AK);
415 if (Attr.getKindAsEnum() == AK)
416 Attrs.push_back(Attr);
420 void IRPosition::verify() {
421 switch (KindOrArgNo) {
422 default:
423 assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
424 assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
425 "Expected call base or argument for positive attribute index!");
426 if (auto *Arg = dyn_cast<Argument>(AnchorVal)) {
427 assert(Arg->getArgNo() == unsigned(getArgNo()) &&
428 "Argument number mismatch!");
429 assert(Arg == &getAssociatedValue() && "Associated value mismatch!");
430 } else {
431 auto &CB = cast<CallBase>(*AnchorVal);
432 (void)CB;
433 assert(CB.arg_size() > unsigned(getArgNo()) &&
434 "Call site argument number mismatch!");
435 assert(CB.getArgOperand(getArgNo()) == &getAssociatedValue() &&
436 "Associated value mismatch!");
438 break;
439 case IRP_INVALID:
440 assert(!AnchorVal && "Expected no value for an invalid position!");
441 break;
442 case IRP_FLOAT:
443 assert((!isa<CallBase>(&getAssociatedValue()) &&
444 !isa<Argument>(&getAssociatedValue())) &&
445 "Expected specialized kind for call base and argument values!");
446 break;
447 case IRP_RETURNED:
448 assert(isa<Function>(AnchorVal) &&
449 "Expected function for a 'returned' position!");
450 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
451 break;
452 case IRP_CALL_SITE_RETURNED:
453 assert((isa<CallBase>(AnchorVal)) &&
454 "Expected call base for 'call site returned' position!");
455 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
456 break;
457 case IRP_CALL_SITE:
458 assert((isa<CallBase>(AnchorVal)) &&
459 "Expected call base for 'call site function' position!");
460 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
461 break;
462 case IRP_FUNCTION:
463 assert(isa<Function>(AnchorVal) &&
464 "Expected function for a 'function' position!");
465 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
466 break;
470 /// Helper functions to clamp a state \p S of type \p StateType with the
471 /// information in \p R and indicate/return if \p S did change (as-in update is
472 /// required to be run again).
474 ///{
475 template <typename StateType>
476 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R);
478 template <>
479 ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S,
480 const IntegerState &R) {
481 auto Assumed = S.getAssumed();
482 S ^= R;
483 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
484 : ChangeStatus::CHANGED;
487 template <>
488 ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S,
489 const BooleanState &R) {
490 return clampStateAndIndicateChange<IntegerState>(S, R);
492 ///}
494 /// Clamp the information known for all returned values of a function
495 /// (identified by \p QueryingAA) into \p S.
496 template <typename AAType, typename StateType = typename AAType::StateType>
497 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
498 StateType &S) {
499 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
500 << static_cast<const AbstractAttribute &>(QueryingAA)
501 << " into " << S << "\n");
503 assert((QueryingAA.getIRPosition().getPositionKind() ==
504 IRPosition::IRP_RETURNED ||
505 QueryingAA.getIRPosition().getPositionKind() ==
506 IRPosition::IRP_CALL_SITE_RETURNED) &&
507 "Can only clamp returned value states for a function returned or call "
508 "site returned position!");
510 // Use an optional state as there might not be any return values and we want
511 // to join (IntegerState::operator&) the state of all there are.
512 Optional<StateType> T;
514 // Callback for each possibly returned value.
515 auto CheckReturnValue = [&](Value &RV) -> bool {
516 const IRPosition &RVPos = IRPosition::value(RV);
517 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
518 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
519 << " @ " << RVPos << "\n");
520 const StateType &AAS = static_cast<const StateType &>(AA.getState());
521 if (T.hasValue())
522 *T &= AAS;
523 else
524 T = AAS;
525 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
526 << "\n");
527 return T->isValidState();
530 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
531 S.indicatePessimisticFixpoint();
532 else if (T.hasValue())
533 S ^= *T;
536 /// Helper class for generic deduction: return value -> returned position.
537 template <typename AAType, typename Base,
538 typename StateType = typename AAType::StateType>
539 struct AAReturnedFromReturnedValues : public Base {
540 AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
542 /// See AbstractAttribute::updateImpl(...).
543 ChangeStatus updateImpl(Attributor &A) override {
544 StateType S;
545 clampReturnedValueStates<AAType, StateType>(A, *this, S);
546 // TODO: If we know we visited all returned values, thus no are assumed
547 // dead, we can take the known information from the state T.
548 return clampStateAndIndicateChange<StateType>(this->getState(), S);
552 /// Clamp the information known at all call sites for a given argument
553 /// (identified by \p QueryingAA) into \p S.
554 template <typename AAType, typename StateType = typename AAType::StateType>
555 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
556 StateType &S) {
557 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
558 << static_cast<const AbstractAttribute &>(QueryingAA)
559 << " into " << S << "\n");
561 assert(QueryingAA.getIRPosition().getPositionKind() ==
562 IRPosition::IRP_ARGUMENT &&
563 "Can only clamp call site argument states for an argument position!");
565 // Use an optional state as there might not be any return values and we want
566 // to join (IntegerState::operator&) the state of all there are.
567 Optional<StateType> T;
569 // The argument number which is also the call site argument number.
570 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
572 auto CallSiteCheck = [&](CallSite CS) {
573 const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
574 const AAType &AA = A.getAAFor<AAType>(QueryingAA, CSArgPos);
575 LLVM_DEBUG(dbgs() << "[Attributor] CS: " << *CS.getInstruction()
576 << " AA: " << AA.getAsStr() << " @" << CSArgPos << "\n");
577 const StateType &AAS = static_cast<const StateType &>(AA.getState());
578 if (T.hasValue())
579 *T &= AAS;
580 else
581 T = AAS;
582 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
583 << "\n");
584 return T->isValidState();
587 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
588 S.indicatePessimisticFixpoint();
589 else if (T.hasValue())
590 S ^= *T;
593 /// Helper class for generic deduction: call site argument -> argument position.
594 template <typename AAType, typename Base,
595 typename StateType = typename AAType::StateType>
596 struct AAArgumentFromCallSiteArguments : public Base {
597 AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
599 /// See AbstractAttribute::updateImpl(...).
600 ChangeStatus updateImpl(Attributor &A) override {
601 StateType S;
602 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
603 // TODO: If we know we visited all incoming values, thus no are assumed
604 // dead, we can take the known information from the state T.
605 return clampStateAndIndicateChange<StateType>(this->getState(), S);
609 /// Helper class for generic replication: function returned -> cs returned.
610 template <typename AAType, typename Base>
611 struct AACallSiteReturnedFromReturned : public Base {
612 AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
614 /// See AbstractAttribute::updateImpl(...).
615 ChangeStatus updateImpl(Attributor &A) override {
616 assert(this->getIRPosition().getPositionKind() ==
617 IRPosition::IRP_CALL_SITE_RETURNED &&
618 "Can only wrap function returned positions for call site returned "
619 "positions!");
620 auto &S = this->getState();
622 const Function *AssociatedFunction =
623 this->getIRPosition().getAssociatedFunction();
624 if (!AssociatedFunction)
625 return S.indicatePessimisticFixpoint();
627 IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
628 const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
629 return clampStateAndIndicateChange(
630 S, static_cast<const typename AAType::StateType &>(AA.getState()));
634 /// -----------------------NoUnwind Function Attribute--------------------------
636 struct AANoUnwindImpl : AANoUnwind {
637 AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
639 /// See AbstractAttribute::initialize(...).
640 void initialize(Attributor &A) override {
641 if (hasAttr({Attribute::NoUnwind}))
642 indicateOptimisticFixpoint();
645 const std::string getAsStr() const override {
646 return getAssumed() ? "nounwind" : "may-unwind";
649 /// See AbstractAttribute::updateImpl(...).
650 ChangeStatus updateImpl(Attributor &A) override {
651 auto Opcodes = {
652 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
653 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
654 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
656 auto CheckForNoUnwind = [&](Instruction &I) {
657 if (!I.mayThrow())
658 return true;
660 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
661 const auto &NoUnwindAA =
662 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
663 return NoUnwindAA.isAssumedNoUnwind();
665 return false;
668 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
669 return indicatePessimisticFixpoint();
671 return ChangeStatus::UNCHANGED;
675 struct AANoUnwindFunction final : public AANoUnwindImpl {
676 AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
678 /// See AbstractAttribute::trackStatistics()
679 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
682 /// NoUnwind attribute deduction for a call sites.
683 using AANoUnwindCallSite = AANoUnwindFunction;
685 /// --------------------- Function Return Values -------------------------------
687 /// "Attribute" that collects all potential returned values and the return
688 /// instructions that they arise from.
690 /// If there is a unique returned value R, the manifest method will:
691 /// - mark R with the "returned" attribute, if R is an argument.
692 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
694 /// Mapping of values potentially returned by the associated function to the
695 /// return instructions that might return them.
696 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
698 /// Mapping to remember the number of returned values for a call site such
699 /// that we can avoid updates if nothing changed.
700 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
702 /// Set of unresolved calls returned by the associated function.
703 SmallSetVector<CallBase *, 4> UnresolvedCalls;
705 /// State flags
707 ///{
708 bool IsFixed = false;
709 bool IsValidState = true;
710 ///}
712 public:
713 AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
715 /// See AbstractAttribute::initialize(...).
716 void initialize(Attributor &A) override {
717 // Reset the state.
718 IsFixed = false;
719 IsValidState = true;
720 ReturnedValues.clear();
722 Function *F = getAssociatedFunction();
723 if (!F || !F->hasExactDefinition()) {
724 indicatePessimisticFixpoint();
725 return;
728 // The map from instruction opcodes to those instructions in the function.
729 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
731 // Look through all arguments, if one is marked as returned we are done.
732 for (Argument &Arg : F->args()) {
733 if (Arg.hasReturnedAttr()) {
734 auto &ReturnInstSet = ReturnedValues[&Arg];
735 for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
736 ReturnInstSet.insert(cast<ReturnInst>(RI));
738 indicateOptimisticFixpoint();
739 return;
744 /// See AbstractAttribute::manifest(...).
745 ChangeStatus manifest(Attributor &A) override;
747 /// See AbstractAttribute::getState(...).
748 AbstractState &getState() override { return *this; }
750 /// See AbstractAttribute::getState(...).
751 const AbstractState &getState() const override { return *this; }
753 /// See AbstractAttribute::updateImpl(Attributor &A).
754 ChangeStatus updateImpl(Attributor &A) override;
756 llvm::iterator_range<iterator> returned_values() override {
757 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
760 llvm::iterator_range<const_iterator> returned_values() const override {
761 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
764 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
765 return UnresolvedCalls;
768 /// Return the number of potential return values, -1 if unknown.
769 size_t getNumReturnValues() const override {
770 return isValidState() ? ReturnedValues.size() : -1;
773 /// Return an assumed unique return value if a single candidate is found. If
774 /// there cannot be one, return a nullptr. If it is not clear yet, return the
775 /// Optional::NoneType.
776 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
778 /// See AbstractState::checkForAllReturnedValues(...).
779 bool checkForAllReturnedValuesAndReturnInsts(
780 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
781 &Pred) const override;
783 /// Pretty print the attribute similar to the IR representation.
784 const std::string getAsStr() const override;
786 /// See AbstractState::isAtFixpoint().
787 bool isAtFixpoint() const override { return IsFixed; }
789 /// See AbstractState::isValidState().
790 bool isValidState() const override { return IsValidState; }
792 /// See AbstractState::indicateOptimisticFixpoint(...).
793 ChangeStatus indicateOptimisticFixpoint() override {
794 IsFixed = true;
795 return ChangeStatus::UNCHANGED;
798 ChangeStatus indicatePessimisticFixpoint() override {
799 IsFixed = true;
800 IsValidState = false;
801 return ChangeStatus::CHANGED;
805 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
806 ChangeStatus Changed = ChangeStatus::UNCHANGED;
808 // Bookkeeping.
809 assert(isValidState());
810 STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
811 "Number of function with known return values");
813 // Check if we have an assumed unique return value that we could manifest.
814 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
816 if (!UniqueRV.hasValue() || !UniqueRV.getValue())
817 return Changed;
819 // Bookkeeping.
820 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
821 "Number of function with unique return");
823 // Callback to replace the uses of CB with the constant C.
824 auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
825 if (CB.getNumUses() == 0)
826 return ChangeStatus::UNCHANGED;
827 CB.replaceAllUsesWith(&C);
828 return ChangeStatus::CHANGED;
831 // If the assumed unique return value is an argument, annotate it.
832 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
833 getIRPosition() = IRPosition::argument(*UniqueRVArg);
834 Changed = IRAttribute::manifest(A);
835 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
836 // We can replace the returned value with the unique returned constant.
837 Value &AnchorValue = getAnchorValue();
838 if (Function *F = dyn_cast<Function>(&AnchorValue)) {
839 for (const Use &U : F->uses())
840 if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
841 if (CB->isCallee(&U))
842 Changed = ReplaceCallSiteUsersWith(*CB, *RVC) | Changed;
843 } else {
844 assert(isa<CallBase>(AnchorValue) &&
845 "Expcected a function or call base anchor!");
846 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVC);
848 if (Changed == ChangeStatus::CHANGED)
849 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
850 "Number of function returns replaced by constant return");
853 return Changed;
856 const std::string AAReturnedValuesImpl::getAsStr() const {
857 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
858 (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
859 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
862 Optional<Value *>
863 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
864 // If checkForAllReturnedValues provides a unique value, ignoring potential
865 // undef values that can also be present, it is assumed to be the actual
866 // return value and forwarded to the caller of this method. If there are
867 // multiple, a nullptr is returned indicating there cannot be a unique
868 // returned value.
869 Optional<Value *> UniqueRV;
871 auto Pred = [&](Value &RV) -> bool {
872 // If we found a second returned value and neither the current nor the saved
873 // one is an undef, there is no unique returned value. Undefs are special
874 // since we can pretend they have any value.
875 if (UniqueRV.hasValue() && UniqueRV != &RV &&
876 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
877 UniqueRV = nullptr;
878 return false;
881 // Do not overwrite a value with an undef.
882 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
883 UniqueRV = &RV;
885 return true;
888 if (!A.checkForAllReturnedValues(Pred, *this))
889 UniqueRV = nullptr;
891 return UniqueRV;
894 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
895 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
896 &Pred) const {
897 if (!isValidState())
898 return false;
900 // Check all returned values but ignore call sites as long as we have not
901 // encountered an overdefined one during an update.
902 for (auto &It : ReturnedValues) {
903 Value *RV = It.first;
905 CallBase *CB = dyn_cast<CallBase>(RV);
906 if (CB && !UnresolvedCalls.count(CB))
907 continue;
909 if (!Pred(*RV, It.second))
910 return false;
913 return true;
916 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
917 size_t NumUnresolvedCalls = UnresolvedCalls.size();
918 bool Changed = false;
920 // State used in the value traversals starting in returned values.
921 struct RVState {
922 // The map in which we collect return values -> return instrs.
923 decltype(ReturnedValues) &RetValsMap;
924 // The flag to indicate a change.
925 bool &Changed;
926 // The return instrs we come from.
927 SmallSetVector<ReturnInst *, 4> RetInsts;
930 // Callback for a leaf value returned by the associated function.
931 auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
932 auto Size = RVS.RetValsMap[&Val].size();
933 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
934 bool Inserted = RVS.RetValsMap[&Val].size() != Size;
935 RVS.Changed |= Inserted;
936 LLVM_DEBUG({
937 if (Inserted)
938 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
939 << " => " << RVS.RetInsts.size() << "\n";
941 return true;
944 // Helper method to invoke the generic value traversal.
945 auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
946 IRPosition RetValPos = IRPosition::value(RV);
947 return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
948 RVS, VisitValueCB);
951 // Callback for all "return intructions" live in the associated function.
952 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
953 ReturnInst &Ret = cast<ReturnInst>(I);
954 RVState RVS({ReturnedValues, Changed, {}});
955 RVS.RetInsts.insert(&Ret);
956 return VisitReturnedValue(*Ret.getReturnValue(), RVS);
959 // Start by discovering returned values from all live returned instructions in
960 // the associated function.
961 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
962 return indicatePessimisticFixpoint();
964 // Once returned values "directly" present in the code are handled we try to
965 // resolve returned calls.
966 decltype(ReturnedValues) NewRVsMap;
967 for (auto &It : ReturnedValues) {
968 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
969 << " by #" << It.second.size() << " RIs\n");
970 CallBase *CB = dyn_cast<CallBase>(It.first);
971 if (!CB || UnresolvedCalls.count(CB))
972 continue;
974 const auto &RetValAA =
975 A.getAAFor<AAReturnedValues>(*this, IRPosition::callsite_function(*CB));
976 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
977 << static_cast<const AbstractAttribute &>(RetValAA)
978 << "\n");
980 // Skip dead ends, thus if we do not know anything about the returned
981 // call we mark it as unresolved and it will stay that way.
982 if (!RetValAA.getState().isValidState()) {
983 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
984 << "\n");
985 UnresolvedCalls.insert(CB);
986 continue;
989 // Do not try to learn partial information. If the callee has unresolved
990 // return values we will treat the call as unresolved/opaque.
991 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
992 if (!RetValAAUnresolvedCalls.empty()) {
993 UnresolvedCalls.insert(CB);
994 continue;
997 // Now check if we can track transitively returned values. If possible, thus
998 // if all return value can be represented in the current scope, do so.
999 bool Unresolved = false;
1000 for (auto &RetValAAIt : RetValAA.returned_values()) {
1001 Value *RetVal = RetValAAIt.first;
1002 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1003 isa<Constant>(RetVal))
1004 continue;
1005 // Anything that did not fit in the above categories cannot be resolved,
1006 // mark the call as unresolved.
1007 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1008 "cannot be translated: "
1009 << *RetVal << "\n");
1010 UnresolvedCalls.insert(CB);
1011 Unresolved = true;
1012 break;
1015 if (Unresolved)
1016 continue;
1018 // Now track transitively returned values.
1019 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1020 if (NumRetAA == RetValAA.getNumReturnValues()) {
1021 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1022 "changed since it was seen last\n");
1023 continue;
1025 NumRetAA = RetValAA.getNumReturnValues();
1027 for (auto &RetValAAIt : RetValAA.returned_values()) {
1028 Value *RetVal = RetValAAIt.first;
1029 if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1030 // Arguments are mapped to call site operands and we begin the traversal
1031 // again.
1032 bool Unused = false;
1033 RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1034 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1035 continue;
1036 } else if (isa<CallBase>(RetVal)) {
1037 // Call sites are resolved by the callee attribute over time, no need to
1038 // do anything for us.
1039 continue;
1040 } else if (isa<Constant>(RetVal)) {
1041 // Constants are valid everywhere, we can simply take them.
1042 NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1043 continue;
1048 // To avoid modifications to the ReturnedValues map while we iterate over it
1049 // we kept record of potential new entries in a copy map, NewRVsMap.
1050 for (auto &It : NewRVsMap) {
1051 assert(!It.second.empty() && "Entry does not add anything.");
1052 auto &ReturnInsts = ReturnedValues[It.first];
1053 for (ReturnInst *RI : It.second)
1054 if (ReturnInsts.insert(RI)) {
1055 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1056 << *It.first << " => " << *RI << "\n");
1057 Changed = true;
1061 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1062 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1065 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1066 AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1068 /// See AbstractAttribute::trackStatistics()
1069 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1072 /// Returned values information for a call sites.
1073 using AAReturnedValuesCallSite = AAReturnedValuesFunction;
1075 /// ------------------------ NoSync Function Attribute -------------------------
1077 struct AANoSyncImpl : AANoSync {
1078 AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1080 /// See AbstractAttribute::initialize(...).
1081 void initialize(Attributor &A) override {
1082 if (hasAttr({Attribute::NoSync}))
1083 indicateOptimisticFixpoint();
1086 const std::string getAsStr() const override {
1087 return getAssumed() ? "nosync" : "may-sync";
1090 /// See AbstractAttribute::updateImpl(...).
1091 ChangeStatus updateImpl(Attributor &A) override;
1093 /// Helper function used to determine whether an instruction is non-relaxed
1094 /// atomic. In other words, if an atomic instruction does not have unordered
1095 /// or monotonic ordering
1096 static bool isNonRelaxedAtomic(Instruction *I);
1098 /// Helper function used to determine whether an instruction is volatile.
1099 static bool isVolatile(Instruction *I);
1101 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1102 /// memset).
1103 static bool isNoSyncIntrinsic(Instruction *I);
1106 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1107 if (!I->isAtomic())
1108 return false;
1110 AtomicOrdering Ordering;
1111 switch (I->getOpcode()) {
1112 case Instruction::AtomicRMW:
1113 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1114 break;
1115 case Instruction::Store:
1116 Ordering = cast<StoreInst>(I)->getOrdering();
1117 break;
1118 case Instruction::Load:
1119 Ordering = cast<LoadInst>(I)->getOrdering();
1120 break;
1121 case Instruction::Fence: {
1122 auto *FI = cast<FenceInst>(I);
1123 if (FI->getSyncScopeID() == SyncScope::SingleThread)
1124 return false;
1125 Ordering = FI->getOrdering();
1126 break;
1128 case Instruction::AtomicCmpXchg: {
1129 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1130 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1131 // Only if both are relaxed, than it can be treated as relaxed.
1132 // Otherwise it is non-relaxed.
1133 if (Success != AtomicOrdering::Unordered &&
1134 Success != AtomicOrdering::Monotonic)
1135 return true;
1136 if (Failure != AtomicOrdering::Unordered &&
1137 Failure != AtomicOrdering::Monotonic)
1138 return true;
1139 return false;
1141 default:
1142 llvm_unreachable(
1143 "New atomic operations need to be known in the attributor.");
1146 // Relaxed.
1147 if (Ordering == AtomicOrdering::Unordered ||
1148 Ordering == AtomicOrdering::Monotonic)
1149 return false;
1150 return true;
1153 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1154 /// FIXME: We should ipmrove the handling of intrinsics.
1155 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1156 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1157 switch (II->getIntrinsicID()) {
1158 /// Element wise atomic memory intrinsics are can only be unordered,
1159 /// therefore nosync.
1160 case Intrinsic::memset_element_unordered_atomic:
1161 case Intrinsic::memmove_element_unordered_atomic:
1162 case Intrinsic::memcpy_element_unordered_atomic:
1163 return true;
1164 case Intrinsic::memset:
1165 case Intrinsic::memmove:
1166 case Intrinsic::memcpy:
1167 if (!cast<MemIntrinsic>(II)->isVolatile())
1168 return true;
1169 return false;
1170 default:
1171 return false;
1174 return false;
1177 bool AANoSyncImpl::isVolatile(Instruction *I) {
1178 assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1179 "Calls should not be checked here");
1181 switch (I->getOpcode()) {
1182 case Instruction::AtomicRMW:
1183 return cast<AtomicRMWInst>(I)->isVolatile();
1184 case Instruction::Store:
1185 return cast<StoreInst>(I)->isVolatile();
1186 case Instruction::Load:
1187 return cast<LoadInst>(I)->isVolatile();
1188 case Instruction::AtomicCmpXchg:
1189 return cast<AtomicCmpXchgInst>(I)->isVolatile();
1190 default:
1191 return false;
1195 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1197 auto CheckRWInstForNoSync = [&](Instruction &I) {
1198 /// We are looking for volatile instructions or Non-Relaxed atomics.
1199 /// FIXME: We should ipmrove the handling of intrinsics.
1201 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1202 return true;
1204 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1205 if (ICS.hasFnAttr(Attribute::NoSync))
1206 return true;
1208 const auto &NoSyncAA =
1209 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1210 if (NoSyncAA.isAssumedNoSync())
1211 return true;
1212 return false;
1215 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1216 return true;
1218 return false;
1221 auto CheckForNoSync = [&](Instruction &I) {
1222 // At this point we handled all read/write effects and they are all
1223 // nosync, so they can be skipped.
1224 if (I.mayReadOrWriteMemory())
1225 return true;
1227 // non-convergent and readnone imply nosync.
1228 return !ImmutableCallSite(&I).isConvergent();
1231 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1232 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1233 return indicatePessimisticFixpoint();
1235 return ChangeStatus::UNCHANGED;
1238 struct AANoSyncFunction final : public AANoSyncImpl {
1239 AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1241 /// See AbstractAttribute::trackStatistics()
1242 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1245 /// NoSync attribute deduction for a call sites.
1246 using AANoSyncCallSite = AANoSyncFunction;
1248 /// ------------------------ No-Free Attributes ----------------------------
1250 struct AANoFreeImpl : public AANoFree {
1251 AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1253 /// See AbstractAttribute::initialize(...).
1254 void initialize(Attributor &A) override {
1255 if (hasAttr({Attribute::NoFree}))
1256 indicateOptimisticFixpoint();
1259 /// See AbstractAttribute::updateImpl(...).
1260 ChangeStatus updateImpl(Attributor &A) override {
1261 auto CheckForNoFree = [&](Instruction &I) {
1262 ImmutableCallSite ICS(&I);
1263 if (ICS.hasFnAttr(Attribute::NoFree))
1264 return true;
1266 const auto &NoFreeAA =
1267 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1268 return NoFreeAA.isAssumedNoFree();
1271 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1272 return indicatePessimisticFixpoint();
1273 return ChangeStatus::UNCHANGED;
1276 /// See AbstractAttribute::getAsStr().
1277 const std::string getAsStr() const override {
1278 return getAssumed() ? "nofree" : "may-free";
1282 struct AANoFreeFunction final : public AANoFreeImpl {
1283 AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1285 /// See AbstractAttribute::trackStatistics()
1286 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1289 /// NoFree attribute deduction for a call sites.
1290 using AANoFreeCallSite = AANoFreeFunction;
1292 /// ------------------------ NonNull Argument Attribute ------------------------
1293 struct AANonNullImpl : AANonNull {
1294 AANonNullImpl(const IRPosition &IRP) : AANonNull(IRP) {}
1296 /// See AbstractAttribute::initialize(...).
1297 void initialize(Attributor &A) override {
1298 if (hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1299 indicateOptimisticFixpoint();
1302 /// See AbstractAttribute::getAsStr().
1303 const std::string getAsStr() const override {
1304 return getAssumed() ? "nonnull" : "may-null";
1308 /// NonNull attribute for a floating value.
1309 struct AANonNullFloating : AANonNullImpl {
1310 AANonNullFloating(const IRPosition &IRP) : AANonNullImpl(IRP) {}
1312 /// See AbstractAttribute::initialize(...).
1313 void initialize(Attributor &A) override {
1314 AANonNullImpl::initialize(A);
1316 if (isAtFixpoint())
1317 return;
1319 const IRPosition &IRP = getIRPosition();
1320 const Value &V = IRP.getAssociatedValue();
1321 const DataLayout &DL = A.getDataLayout();
1323 // TODO: This context sensitive query should be removed once we can do
1324 // context sensitive queries in the genericValueTraversal below.
1325 if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(),
1326 /* TODO: DT */ nullptr))
1327 indicateOptimisticFixpoint();
1330 /// See AbstractAttribute::updateImpl(...).
1331 ChangeStatus updateImpl(Attributor &A) override {
1332 const DataLayout &DL = A.getDataLayout();
1334 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
1335 bool Stripped) -> bool {
1336 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1337 if (!Stripped && this == &AA) {
1338 if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
1339 /* TODO: CtxI */ nullptr,
1340 /* TODO: DT */ nullptr))
1341 T.indicatePessimisticFixpoint();
1342 } else {
1343 // Use abstract attribute information.
1344 const AANonNull::StateType &NS =
1345 static_cast<const AANonNull::StateType &>(AA.getState());
1346 T ^= NS;
1348 return T.isValidState();
1351 StateType T;
1352 if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1353 T, VisitValueCB))
1354 return indicatePessimisticFixpoint();
1356 return clampStateAndIndicateChange(getState(), T);
1359 /// See AbstractAttribute::trackStatistics()
1360 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1363 /// NonNull attribute for function return value.
1364 struct AANonNullReturned final
1365 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1366 AANonNullReturned(const IRPosition &IRP)
1367 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1369 /// See AbstractAttribute::trackStatistics()
1370 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1373 /// NonNull attribute for function argument.
1374 struct AANonNullArgument final
1375 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1376 AANonNullArgument(const IRPosition &IRP)
1377 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP) {}
1379 /// See AbstractAttribute::trackStatistics()
1380 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1383 struct AANonNullCallSiteArgument final : AANonNullFloating {
1384 AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1386 /// See AbstractAttribute::trackStatistics()
1387 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnul) }
1390 /// NonNull attribute for a call site return position.
1391 struct AANonNullCallSiteReturned final
1392 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1393 AANonNullCallSiteReturned(const IRPosition &IRP)
1394 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP) {}
1396 /// See AbstractAttribute::trackStatistics()
1397 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1400 /// ------------------------ No-Recurse Attributes ----------------------------
1402 struct AANoRecurseImpl : public AANoRecurse {
1403 AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1405 /// See AbstractAttribute::initialize(...).
1406 void initialize(Attributor &A) override {
1407 if (hasAttr({getAttrKind()})) {
1408 indicateOptimisticFixpoint();
1409 return;
1413 /// See AbstractAttribute::getAsStr()
1414 const std::string getAsStr() const override {
1415 return getAssumed() ? "norecurse" : "may-recurse";
1419 struct AANoRecurseFunction final : AANoRecurseImpl {
1420 AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1422 /// See AbstractAttribute::updateImpl(...).
1423 ChangeStatus updateImpl(Attributor &A) override {
1424 // TODO: Implement this.
1425 return indicatePessimisticFixpoint();
1428 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1431 using AANoRecurseCallSite = AANoRecurseFunction;
1433 /// ------------------------ Will-Return Attributes ----------------------------
1435 // Helper function that checks whether a function has any cycle.
1436 // TODO: Replace with more efficent code
1437 static bool containsCycle(Function &F) {
1438 SmallPtrSet<BasicBlock *, 32> Visited;
1440 // Traverse BB by dfs and check whether successor is already visited.
1441 for (BasicBlock *BB : depth_first(&F)) {
1442 Visited.insert(BB);
1443 for (auto *SuccBB : successors(BB)) {
1444 if (Visited.count(SuccBB))
1445 return true;
1448 return false;
1451 // Helper function that checks the function have a loop which might become an
1452 // endless loop
1453 // FIXME: Any cycle is regarded as endless loop for now.
1454 // We have to allow some patterns.
1455 static bool containsPossiblyEndlessLoop(Function *F) {
1456 return !F || !F->hasExactDefinition() || containsCycle(*F);
1459 struct AAWillReturnImpl : public AAWillReturn {
1460 AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1462 /// See AbstractAttribute::initialize(...).
1463 void initialize(Attributor &A) override {
1464 if (hasAttr({Attribute::WillReturn})) {
1465 indicateOptimisticFixpoint();
1466 return;
1469 Function *F = getAssociatedFunction();
1470 if (containsPossiblyEndlessLoop(F))
1471 indicatePessimisticFixpoint();
1474 /// See AbstractAttribute::updateImpl(...).
1475 ChangeStatus updateImpl(Attributor &A) override {
1476 auto CheckForWillReturn = [&](Instruction &I) {
1477 IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
1478 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1479 if (WillReturnAA.isKnownWillReturn())
1480 return true;
1481 if (!WillReturnAA.isAssumedWillReturn())
1482 return false;
1483 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
1484 return NoRecurseAA.isAssumedNoRecurse();
1487 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
1488 return indicatePessimisticFixpoint();
1490 return ChangeStatus::UNCHANGED;
1493 /// See AbstractAttribute::getAsStr()
1494 const std::string getAsStr() const override {
1495 return getAssumed() ? "willreturn" : "may-noreturn";
1499 struct AAWillReturnFunction final : AAWillReturnImpl {
1500 AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1502 /// See AbstractAttribute::trackStatistics()
1503 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
1506 /// WillReturn attribute deduction for a call sites.
1507 using AAWillReturnCallSite = AAWillReturnFunction;
1509 /// ------------------------ NoAlias Argument Attribute ------------------------
1511 struct AANoAliasImpl : AANoAlias {
1512 AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
1514 /// See AbstractAttribute::initialize(...).
1515 void initialize(Attributor &A) override {
1516 if (hasAttr({Attribute::NoAlias}))
1517 indicateOptimisticFixpoint();
1520 const std::string getAsStr() const override {
1521 return getAssumed() ? "noalias" : "may-alias";
1525 /// NoAlias attribute for a floating value.
1526 struct AANoAliasFloating final : AANoAliasImpl {
1527 AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1529 /// See AbstractAttribute::updateImpl(...).
1530 ChangeStatus updateImpl(Attributor &A) override {
1531 // TODO: Implement this.
1532 return indicatePessimisticFixpoint();
1535 /// See AbstractAttribute::trackStatistics()
1536 void trackStatistics() const override {
1537 STATS_DECLTRACK_FLOATING_ATTR(noalias)
1541 /// NoAlias attribute for an argument.
1542 struct AANoAliasArgument final : AANoAliasImpl {
1543 AANoAliasArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1545 /// See AbstractAttribute::updateImpl(...).
1546 ChangeStatus updateImpl(Attributor &A) override {
1547 // TODO: Implement this.
1548 return indicatePessimisticFixpoint();
1551 /// See AbstractAttribute::trackStatistics()
1552 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1555 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
1556 AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1558 /// See AbstractAttribute::updateImpl(...).
1559 ChangeStatus updateImpl(Attributor &A) override {
1560 // TODO: Implement this.
1561 return indicatePessimisticFixpoint();
1564 /// See AbstractAttribute::trackStatistics()
1565 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1568 /// NoAlias attribute for function return value.
1569 struct AANoAliasReturned final : AANoAliasImpl {
1570 AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1572 /// See AbstractAttribute::updateImpl(...).
1573 virtual ChangeStatus updateImpl(Attributor &A) override {
1575 auto CheckReturnValue = [&](Value &RV) -> bool {
1576 if (Constant *C = dyn_cast<Constant>(&RV))
1577 if (C->isNullValue() || isa<UndefValue>(C))
1578 return true;
1580 /// For now, we can only deduce noalias if we have call sites.
1581 /// FIXME: add more support.
1582 ImmutableCallSite ICS(&RV);
1583 if (!ICS)
1584 return false;
1586 const auto &NoAliasAA =
1587 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
1588 if (!NoAliasAA.isAssumedNoAlias())
1589 return false;
1591 /// FIXME: We can improve capture check in two ways:
1592 /// 1. Use the AANoCapture facilities.
1593 /// 2. Use the location of return insts for escape queries.
1594 if (PointerMayBeCaptured(&RV, /* ReturnCaptures */ false,
1595 /* StoreCaptures */ true))
1596 return false;
1598 return true;
1601 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
1602 return indicatePessimisticFixpoint();
1604 return ChangeStatus::UNCHANGED;
1607 /// See AbstractAttribute::trackStatistics()
1608 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
1611 /// NoAlias attribute deduction for a call site return value.
1612 using AANoAliasCallSiteReturned = AANoAliasReturned;
1614 /// -------------------AAIsDead Function Attribute-----------------------
1616 struct AAIsDeadImpl : public AAIsDead {
1617 AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
1619 void initialize(Attributor &A) override {
1620 const Function *F = getAssociatedFunction();
1622 if (F->hasInternalLinkage())
1623 return;
1625 if (!F || !F->hasExactDefinition()) {
1626 indicatePessimisticFixpoint();
1627 return;
1630 exploreFromEntry(A, F);
1633 void exploreFromEntry(Attributor &A, const Function *F) {
1634 ToBeExploredPaths.insert(&(F->getEntryBlock().front()));
1635 AssumedLiveBlocks.insert(&(F->getEntryBlock()));
1637 for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
1638 if (const Instruction *NextNoReturnI =
1639 findNextNoReturn(A, ToBeExploredPaths[i]))
1640 NoReturnCalls.insert(NextNoReturnI);
1643 /// Find the next assumed noreturn instruction in the block of \p I starting
1644 /// from, thus including, \p I.
1646 /// The caller is responsible to monitor the ToBeExploredPaths set as new
1647 /// instructions discovered in other basic block will be placed in there.
1649 /// \returns The next assumed noreturn instructions in the block of \p I
1650 /// starting from, thus including, \p I.
1651 const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
1653 /// See AbstractAttribute::getAsStr().
1654 const std::string getAsStr() const override {
1655 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
1656 std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
1657 std::to_string(NoReturnCalls.size()) + "]";
1660 /// See AbstractAttribute::manifest(...).
1661 ChangeStatus manifest(Attributor &A) override {
1662 assert(getState().isValidState() &&
1663 "Attempted to manifest an invalid state!");
1665 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1666 Function &F = *getAssociatedFunction();
1668 if (AssumedLiveBlocks.empty()) {
1669 F.replaceAllUsesWith(UndefValue::get(F.getType()));
1670 return ChangeStatus::CHANGED;
1673 // Flag to determine if we can change an invoke to a call assuming the
1674 // callee is nounwind. This is not possible if the personality of the
1675 // function allows to catch asynchronous exceptions.
1676 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
1678 for (const Instruction *NRC : NoReturnCalls) {
1679 Instruction *I = const_cast<Instruction *>(NRC);
1680 BasicBlock *BB = I->getParent();
1681 Instruction *SplitPos = I->getNextNode();
1682 // TODO: mark stuff before unreachable instructions as dead.
1683 if (isa_and_nonnull<UnreachableInst>(SplitPos))
1684 continue;
1686 if (auto *II = dyn_cast<InvokeInst>(I)) {
1687 // If we keep the invoke the split position is at the beginning of the
1688 // normal desitination block (it invokes a noreturn function after all).
1689 BasicBlock *NormalDestBB = II->getNormalDest();
1690 SplitPos = &NormalDestBB->front();
1692 /// Invoke is replaced with a call and unreachable is placed after it if
1693 /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
1694 /// and only place an unreachable in the normal successor.
1695 if (Invoke2CallAllowed) {
1696 if (II->getCalledFunction()) {
1697 const IRPosition &IPos = IRPosition::callsite_function(*II);
1698 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
1699 if (AANoUnw.isAssumedNoUnwind()) {
1700 LLVM_DEBUG(dbgs()
1701 << "[AAIsDead] Replace invoke with call inst\n");
1702 // We do not need an invoke (II) but instead want a call followed
1703 // by an unreachable. However, we do not remove II as other
1704 // abstract attributes might have it cached as part of their
1705 // results. Given that we modify the CFG anyway, we simply keep II
1706 // around but in a new dead block. To avoid II being live through
1707 // a different edge we have to ensure the block we place it in is
1708 // only reached from the current block of II and then not reached
1709 // at all when we insert the unreachable.
1710 SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
1711 CallInst *CI = createCallMatchingInvoke(II);
1712 CI->insertBefore(II);
1713 CI->takeName(II);
1714 II->replaceAllUsesWith(CI);
1715 SplitPos = CI->getNextNode();
1721 BB = SplitPos->getParent();
1722 SplitBlock(BB, SplitPos);
1723 changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
1724 HasChanged = ChangeStatus::CHANGED;
1727 return HasChanged;
1730 /// See AbstractAttribute::updateImpl(...).
1731 ChangeStatus updateImpl(Attributor &A) override;
1733 /// See AAIsDead::isAssumedDead(BasicBlock *).
1734 bool isAssumedDead(const BasicBlock *BB) const override {
1735 assert(BB->getParent() == getAssociatedFunction() &&
1736 "BB must be in the same anchor scope function.");
1738 if (!getAssumed())
1739 return false;
1740 return !AssumedLiveBlocks.count(BB);
1743 /// See AAIsDead::isKnownDead(BasicBlock *).
1744 bool isKnownDead(const BasicBlock *BB) const override {
1745 return getKnown() && isAssumedDead(BB);
1748 /// See AAIsDead::isAssumed(Instruction *I).
1749 bool isAssumedDead(const Instruction *I) const override {
1750 assert(I->getParent()->getParent() == getAssociatedFunction() &&
1751 "Instruction must be in the same anchor scope function.");
1753 if (!getAssumed())
1754 return false;
1756 // If it is not in AssumedLiveBlocks then it for sure dead.
1757 // Otherwise, it can still be after noreturn call in a live block.
1758 if (!AssumedLiveBlocks.count(I->getParent()))
1759 return true;
1761 // If it is not after a noreturn call, than it is live.
1762 return isAfterNoReturn(I);
1765 /// See AAIsDead::isKnownDead(Instruction *I).
1766 bool isKnownDead(const Instruction *I) const override {
1767 return getKnown() && isAssumedDead(I);
1770 /// Check if instruction is after noreturn call, in other words, assumed dead.
1771 bool isAfterNoReturn(const Instruction *I) const;
1773 /// Determine if \p F might catch asynchronous exceptions.
1774 static bool mayCatchAsynchronousExceptions(const Function &F) {
1775 return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
1778 /// Collection of to be explored paths.
1779 SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
1781 /// Collection of all assumed live BasicBlocks.
1782 DenseSet<const BasicBlock *> AssumedLiveBlocks;
1784 /// Collection of calls with noreturn attribute, assumed or knwon.
1785 SmallSetVector<const Instruction *, 4> NoReturnCalls;
1788 struct AAIsDeadFunction final : public AAIsDeadImpl {
1789 AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
1791 /// See AbstractAttribute::trackStatistics()
1792 void trackStatistics() const override {
1793 STATS_DECL(DeadInternalFunction, Function,
1794 "Number of internal functions classified as dead (no live callsite)");
1795 BUILD_STAT_NAME(DeadInternalFunction, Function) +=
1796 (getAssociatedFunction()->hasInternalLinkage() &&
1797 AssumedLiveBlocks.empty())
1799 : 0;
1800 STATS_DECL(DeadBlocks, Function,
1801 "Number of basic blocks classified as dead");
1802 BUILD_STAT_NAME(DeadBlocks, Function) +=
1803 getAssociatedFunction()->size() - AssumedLiveBlocks.size();
1804 STATS_DECL(PartiallyDeadBlocks, Function,
1805 "Number of basic blocks classified as partially dead");
1806 BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size();
1810 bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const {
1811 const Instruction *PrevI = I->getPrevNode();
1812 while (PrevI) {
1813 if (NoReturnCalls.count(PrevI))
1814 return true;
1815 PrevI = PrevI->getPrevNode();
1817 return false;
1820 const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A,
1821 const Instruction *I) {
1822 const BasicBlock *BB = I->getParent();
1823 const Function &F = *BB->getParent();
1825 // Flag to determine if we can change an invoke to a call assuming the callee
1826 // is nounwind. This is not possible if the personality of the function allows
1827 // to catch asynchronous exceptions.
1828 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
1830 // TODO: We should have a function that determines if an "edge" is dead.
1831 // Edges could be from an instruction to the next or from a terminator
1832 // to the successor. For now, we need to special case the unwind block
1833 // of InvokeInst below.
1835 while (I) {
1836 ImmutableCallSite ICS(I);
1838 if (ICS) {
1839 const IRPosition &IPos = IRPosition::callsite_function(ICS);
1840 // Regarless of the no-return property of an invoke instruction we only
1841 // learn that the regular successor is not reachable through this
1842 // instruction but the unwind block might still be.
1843 if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
1844 // Use nounwind to justify the unwind block is dead as well.
1845 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
1846 if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
1847 AssumedLiveBlocks.insert(Invoke->getUnwindDest());
1848 ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
1852 const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
1853 if (NoReturnAA.isAssumedNoReturn())
1854 return I;
1857 I = I->getNextNode();
1860 // get new paths (reachable blocks).
1861 for (const BasicBlock *SuccBB : successors(BB)) {
1862 AssumedLiveBlocks.insert(SuccBB);
1863 ToBeExploredPaths.insert(&SuccBB->front());
1866 // No noreturn instruction found.
1867 return nullptr;
1870 ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) {
1871 const Function *F = getAssociatedFunction();
1872 ChangeStatus Status = ChangeStatus::UNCHANGED;
1874 if (F->hasInternalLinkage() && AssumedLiveBlocks.empty()) {
1875 auto CallSiteCheck = [&](CallSite) { return false; };
1877 // All callsites of F are dead.
1878 if (A.checkForAllCallSites(CallSiteCheck, *this, true))
1879 return ChangeStatus::UNCHANGED;
1881 // There exists at least one live call site, so we explore the function.
1882 Status = ChangeStatus::CHANGED;
1884 exploreFromEntry(A, F);
1887 // Temporary collection to iterate over existing noreturn instructions. This
1888 // will alow easier modification of NoReturnCalls collection
1889 SmallVector<const Instruction *, 8> NoReturnChanged;
1891 for (const Instruction *I : NoReturnCalls)
1892 NoReturnChanged.push_back(I);
1894 for (const Instruction *I : NoReturnChanged) {
1895 size_t Size = ToBeExploredPaths.size();
1897 const Instruction *NextNoReturnI = findNextNoReturn(A, I);
1898 if (NextNoReturnI != I) {
1899 Status = ChangeStatus::CHANGED;
1900 NoReturnCalls.remove(I);
1901 if (NextNoReturnI)
1902 NoReturnCalls.insert(NextNoReturnI);
1905 // Explore new paths.
1906 while (Size != ToBeExploredPaths.size()) {
1907 Status = ChangeStatus::CHANGED;
1908 if (const Instruction *NextNoReturnI =
1909 findNextNoReturn(A, ToBeExploredPaths[Size++]))
1910 NoReturnCalls.insert(NextNoReturnI);
1914 LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
1915 << AssumedLiveBlocks.size() << " Total number of blocks: "
1916 << getAssociatedFunction()->size() << "\n");
1918 // If we know everything is live there is no need to query for liveness.
1919 if (NoReturnCalls.empty() &&
1920 getAssociatedFunction()->size() == AssumedLiveBlocks.size()) {
1921 // Indicating a pessimistic fixpoint will cause the state to be "invalid"
1922 // which will cause the Attributor to not return the AAIsDead on request,
1923 // which will prevent us from querying isAssumedDead().
1924 indicatePessimisticFixpoint();
1925 assert(!isValidState() && "Expected an invalid state!");
1928 return Status;
1931 /// Liveness information for a call sites.
1933 // TODO: Once we have call site specific value information we can provide call
1934 // site specific liveness liveness information and then it makes sense to
1935 // specialize attributes for call sites instead of redirecting requests to
1936 // the callee.
1937 using AAIsDeadCallSite = AAIsDeadFunction;
1939 /// -------------------- Dereferenceable Argument Attribute --------------------
1941 template <>
1942 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
1943 const DerefState &R) {
1944 ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>(
1945 S.DerefBytesState, R.DerefBytesState);
1946 ChangeStatus CS1 =
1947 clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState);
1948 return CS0 | CS1;
1951 struct AADereferenceableImpl : AADereferenceable {
1952 AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
1953 using StateType = DerefState;
1955 void initialize(Attributor &A) override {
1956 SmallVector<Attribute, 4> Attrs;
1957 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
1958 Attrs);
1959 for (const Attribute &Attr : Attrs)
1960 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
1962 NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
1965 /// See AbstractAttribute::getState()
1966 /// {
1967 StateType &getState() override { return *this; }
1968 const StateType &getState() const override { return *this; }
1969 /// }
1971 void getDeducedAttributes(LLVMContext &Ctx,
1972 SmallVectorImpl<Attribute> &Attrs) const override {
1973 // TODO: Add *_globally support
1974 if (isAssumedNonNull())
1975 Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
1976 Ctx, getAssumedDereferenceableBytes()));
1977 else
1978 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
1979 Ctx, getAssumedDereferenceableBytes()));
1982 /// See AbstractAttribute::getAsStr().
1983 const std::string getAsStr() const override {
1984 if (!getAssumedDereferenceableBytes())
1985 return "unknown-dereferenceable";
1986 return std::string("dereferenceable") +
1987 (isAssumedNonNull() ? "" : "_or_null") +
1988 (isAssumedGlobal() ? "_globally" : "") + "<" +
1989 std::to_string(getKnownDereferenceableBytes()) + "-" +
1990 std::to_string(getAssumedDereferenceableBytes()) + ">";
1994 /// Dereferenceable attribute for a floating value.
1995 struct AADereferenceableFloating : AADereferenceableImpl {
1996 AADereferenceableFloating(const IRPosition &IRP)
1997 : AADereferenceableImpl(IRP) {}
1999 /// See AbstractAttribute::updateImpl(...).
2000 ChangeStatus updateImpl(Attributor &A) override {
2001 const DataLayout &DL = A.getDataLayout();
2003 auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2004 unsigned IdxWidth =
2005 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
2006 APInt Offset(IdxWidth, 0);
2007 const Value *Base =
2008 V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
2010 const auto &AA =
2011 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2012 int64_t DerefBytes = 0;
2013 if (!Stripped && this == &AA) {
2014 // Use IR information if we did not strip anything.
2015 // TODO: track globally.
2016 bool CanBeNull;
2017 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2018 T.GlobalState.indicatePessimisticFixpoint();
2019 } else {
2020 const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2021 DerefBytes = DS.DerefBytesState.getAssumed();
2022 T.GlobalState &= DS.GlobalState;
2025 // For now we do not try to "increase" dereferenceability due to negative
2026 // indices as we first have to come up with code to deal with loops and
2027 // for overflows of the dereferenceable bytes.
2028 int64_t OffsetSExt = Offset.getSExtValue();
2029 if (OffsetSExt < 0)
2030 Offset = 0;
2032 T.takeAssumedDerefBytesMinimum(
2033 std::max(int64_t(0), DerefBytes - OffsetSExt));
2035 if (this == &AA) {
2036 if (!Stripped) {
2037 // If nothing was stripped IR information is all we got.
2038 T.takeKnownDerefBytesMaximum(
2039 std::max(int64_t(0), DerefBytes - OffsetSExt));
2040 T.indicatePessimisticFixpoint();
2041 } else if (OffsetSExt > 0) {
2042 // If something was stripped but there is circular reasoning we look
2043 // for the offset. If it is positive we basically decrease the
2044 // dereferenceable bytes in a circluar loop now, which will simply
2045 // drive them down to the known value in a very slow way which we
2046 // can accelerate.
2047 T.indicatePessimisticFixpoint();
2051 return T.isValidState();
2054 DerefState T;
2055 if (!genericValueTraversal<AADereferenceable, DerefState>(
2056 A, getIRPosition(), *this, T, VisitValueCB))
2057 return indicatePessimisticFixpoint();
2059 return clampStateAndIndicateChange(getState(), T);
2062 /// See AbstractAttribute::trackStatistics()
2063 void trackStatistics() const override {
2064 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
2068 /// Dereferenceable attribute for a return value.
2069 struct AADereferenceableReturned final
2070 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2071 DerefState> {
2072 AADereferenceableReturned(const IRPosition &IRP)
2073 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2074 DerefState>(IRP) {}
2076 /// See AbstractAttribute::trackStatistics()
2077 void trackStatistics() const override {
2078 STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
2082 /// Dereferenceable attribute for an argument
2083 struct AADereferenceableArgument final
2084 : AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl,
2085 DerefState> {
2086 AADereferenceableArgument(const IRPosition &IRP)
2087 : AAArgumentFromCallSiteArguments<AADereferenceable,
2088 AADereferenceableImpl, DerefState>(
2089 IRP) {}
2091 /// See AbstractAttribute::trackStatistics()
2092 void trackStatistics() const override{
2093 STATS_DECLTRACK_ARG_ATTR(dereferenceable)
2097 /// Dereferenceable attribute for a call site argument.
2098 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
2099 AADereferenceableCallSiteArgument(const IRPosition &IRP)
2100 : AADereferenceableFloating(IRP) {}
2102 /// See AbstractAttribute::trackStatistics()
2103 void trackStatistics() const override {
2104 STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
2108 /// Dereferenceable attribute deduction for a call site return value.
2109 using AADereferenceableCallSiteReturned = AADereferenceableReturned;
2111 // ------------------------ Align Argument Attribute ------------------------
2113 struct AAAlignImpl : AAAlign {
2114 AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
2116 // Max alignemnt value allowed in IR
2117 static const unsigned MAX_ALIGN = 1U << 29;
2119 /// See AbstractAttribute::initialize(...).
2120 void initialize(Attributor &A) override {
2121 takeAssumedMinimum(MAX_ALIGN);
2123 SmallVector<Attribute, 4> Attrs;
2124 getAttrs({Attribute::Alignment}, Attrs);
2125 for (const Attribute &Attr : Attrs)
2126 takeKnownMaximum(Attr.getValueAsInt());
2129 // TODO: Provide a helper to determine the implied ABI alignment and check in
2130 // the existing manifest method and a new one for AAAlignImpl that value
2131 // to avoid making the alignment explicit if it did not improve.
2133 /// See AbstractAttribute::getDeducedAttributes
2134 virtual void
2135 getDeducedAttributes(LLVMContext &Ctx,
2136 SmallVectorImpl<Attribute> &Attrs) const override {
2137 if (getAssumedAlign() > 1)
2138 Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign()));
2141 /// See AbstractAttribute::getAsStr().
2142 const std::string getAsStr() const override {
2143 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2144 "-" + std::to_string(getAssumedAlign()) + ">")
2145 : "unknown-align";
2149 /// Align attribute for a floating value.
2150 struct AAAlignFloating : AAAlignImpl {
2151 AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2153 /// See AbstractAttribute::manifest(...).
2154 ChangeStatus manifest(Attributor &A) override {
2155 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2157 // Check for users that allow alignment annotations.
2158 Value &AnchorVal = getIRPosition().getAnchorValue();
2159 for (const Use &U : AnchorVal.uses()) {
2160 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
2161 if (SI->getPointerOperand() == &AnchorVal)
2162 if (SI->getAlignment() < getAssumedAlign()) {
2163 STATS_DECLTRACK(AAAlign, Store,
2164 "Number of times alignemnt added to a store");
2165 SI->setAlignment(getAssumedAlign());
2166 Changed = ChangeStatus::CHANGED;
2168 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
2169 if (LI->getPointerOperand() == &AnchorVal)
2170 if (LI->getAlignment() < getAssumedAlign()) {
2171 LI->setAlignment(getAssumedAlign());
2172 STATS_DECLTRACK(AAAlign, Load,
2173 "Number of times alignemnt added to a load");
2174 Changed = ChangeStatus::CHANGED;
2179 return AAAlignImpl::manifest(A) | Changed;
2182 /// See AbstractAttribute::updateImpl(...).
2183 ChangeStatus updateImpl(Attributor &A) override {
2184 const DataLayout &DL = A.getDataLayout();
2186 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
2187 bool Stripped) -> bool {
2188 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
2189 if (!Stripped && this == &AA) {
2190 // Use only IR information if we did not strip anything.
2191 T.takeKnownMaximum(V.getPointerAlignment(DL));
2192 T.indicatePessimisticFixpoint();
2193 } else {
2194 // Use abstract attribute information.
2195 const AAAlign::StateType &DS =
2196 static_cast<const AAAlign::StateType &>(AA.getState());
2197 T ^= DS;
2199 return T.isValidState();
2202 StateType T;
2203 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
2204 VisitValueCB))
2205 return indicatePessimisticFixpoint();
2207 // TODO: If we know we visited all incoming values, thus no are assumed
2208 // dead, we can take the known information from the state T.
2209 return clampStateAndIndicateChange(getState(), T);
2212 /// See AbstractAttribute::trackStatistics()
2213 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
2216 /// Align attribute for function return value.
2217 struct AAAlignReturned final
2218 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
2219 AAAlignReturned(const IRPosition &IRP)
2220 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
2222 /// See AbstractAttribute::trackStatistics()
2223 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
2226 /// Align attribute for function argument.
2227 struct AAAlignArgument final
2228 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
2229 AAAlignArgument(const IRPosition &IRP)
2230 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
2232 /// See AbstractAttribute::trackStatistics()
2233 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
2236 struct AAAlignCallSiteArgument final : AAAlignFloating {
2237 AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
2239 /// See AbstractAttribute::manifest(...).
2240 ChangeStatus manifest(Attributor &A) override {
2241 return AAAlignImpl::manifest(A);
2244 /// See AbstractAttribute::trackStatistics()
2245 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
2248 /// Align attribute deduction for a call site return value.
2249 using AAAlignCallSiteReturned = AAAlignReturned;
2251 /// ------------------ Function No-Return Attribute ----------------------------
2252 struct AANoReturnImpl : public AANoReturn {
2253 AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
2255 /// See AbstractAttribute::getAsStr().
2256 const std::string getAsStr() const override {
2257 return getAssumed() ? "noreturn" : "may-return";
2260 /// See AbstractAttribute::initialize(...).
2261 void initialize(Attributor &A) override {
2262 if (hasAttr({getAttrKind()}))
2263 indicateOptimisticFixpoint();
2266 /// See AbstractAttribute::updateImpl(Attributor &A).
2267 virtual ChangeStatus updateImpl(Attributor &A) override {
2268 auto CheckForNoReturn = [](Instruction &) { return false; };
2269 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
2270 {(unsigned)Instruction::Ret}))
2271 return indicatePessimisticFixpoint();
2272 return ChangeStatus::UNCHANGED;
2276 struct AANoReturnFunction final : AANoReturnImpl {
2277 AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2279 /// See AbstractAttribute::trackStatistics()
2280 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
2283 /// NoReturn attribute deduction for a call sites.
2284 using AANoReturnCallSite = AANoReturnFunction;
2286 /// ----------------------------------------------------------------------------
2287 /// Attributor
2288 /// ----------------------------------------------------------------------------
2290 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
2291 const AAIsDead *LivenessAA) {
2292 const Instruction *CtxI = AA.getIRPosition().getCtxI();
2293 if (!CtxI)
2294 return false;
2296 if (!LivenessAA)
2297 LivenessAA =
2298 &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
2299 /* TrackDependence */ false);
2301 // Don't check liveness for AAIsDead.
2302 if (&AA == LivenessAA)
2303 return false;
2305 if (!LivenessAA->isAssumedDead(CtxI))
2306 return false;
2308 // We actually used liveness information so we have to record a dependence.
2309 recordDependence(*LivenessAA, AA);
2311 return true;
2314 bool Attributor::checkForAllCallSites(const function_ref<bool(CallSite)> &Pred,
2315 const AbstractAttribute &QueryingAA,
2316 bool RequireAllCallSites) {
2317 // We can try to determine information from
2318 // the call sites. However, this is only possible all call sites are known,
2319 // hence the function has internal linkage.
2320 const IRPosition &IRP = QueryingAA.getIRPosition();
2321 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2322 if (!AssociatedFunction)
2323 return false;
2325 if (RequireAllCallSites && !AssociatedFunction->hasInternalLinkage()) {
2326 LLVM_DEBUG(
2327 dbgs()
2328 << "[Attributor] Function " << AssociatedFunction->getName()
2329 << " has no internal linkage, hence not all call sites are known\n");
2330 return false;
2333 for (const Use &U : AssociatedFunction->uses()) {
2334 Instruction *I = dyn_cast<Instruction>(U.getUser());
2335 // TODO: Deal with abstract call sites here.
2336 if (!I)
2337 return false;
2339 Function *Caller = I->getFunction();
2341 const auto &LivenessAA = getAAFor<AAIsDead>(
2342 QueryingAA, IRPosition::function(*Caller), /* TrackDependence */ false);
2344 // Skip dead calls.
2345 if (LivenessAA.isAssumedDead(I)) {
2346 // We actually used liveness information so we have to record a
2347 // dependence.
2348 recordDependence(LivenessAA, QueryingAA);
2349 continue;
2352 CallSite CS(U.getUser());
2353 if (!CS || !CS.isCallee(&U) || !CS.getCaller()->hasExactDefinition()) {
2354 if (!RequireAllCallSites)
2355 continue;
2357 LLVM_DEBUG(dbgs() << "[Attributor] User " << *U.getUser()
2358 << " is an invalid use of "
2359 << AssociatedFunction->getName() << "\n");
2360 return false;
2363 if (Pred(CS))
2364 continue;
2366 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
2367 << *CS.getInstruction() << "\n");
2368 return false;
2371 return true;
2374 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
2375 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
2376 &Pred,
2377 const AbstractAttribute &QueryingAA) {
2379 const IRPosition &IRP = QueryingAA.getIRPosition();
2380 // Since we need to provide return instructions we have to have an exact
2381 // definition.
2382 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2383 if (!AssociatedFunction || !AssociatedFunction->hasExactDefinition())
2384 return false;
2386 // If this is a call site query we use the call site specific return values
2387 // and liveness information.
2388 const IRPosition &QueryIRP = IRPosition::function_scope(IRP);
2389 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
2390 if (!AARetVal.getState().isValidState())
2391 return false;
2393 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
2396 bool Attributor::checkForAllReturnedValues(
2397 const function_ref<bool(Value &)> &Pred,
2398 const AbstractAttribute &QueryingAA) {
2400 const IRPosition &IRP = QueryingAA.getIRPosition();
2401 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2402 if (!AssociatedFunction || !AssociatedFunction->hasExactDefinition())
2403 return false;
2405 const IRPosition &QueryIRP = IRPosition::function_scope(IRP);
2406 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
2407 if (!AARetVal.getState().isValidState())
2408 return false;
2410 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
2411 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
2412 return Pred(RV);
2416 bool Attributor::checkForAllInstructions(
2417 const llvm::function_ref<bool(Instruction &)> &Pred,
2418 const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
2420 const IRPosition &IRP = QueryingAA.getIRPosition();
2421 // Since we need to provide instructions we have to have an exact definition.
2422 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2423 if (!AssociatedFunction || !AssociatedFunction->hasExactDefinition())
2424 return false;
2426 const IRPosition &QueryIRP = IRPosition::function_scope(IRP);
2427 const auto &LivenessAA =
2428 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
2429 bool AnyDead = false;
2431 auto &OpcodeInstMap =
2432 InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
2433 for (unsigned Opcode : Opcodes) {
2434 for (Instruction *I : OpcodeInstMap[Opcode]) {
2435 // Skip dead instructions.
2436 if (LivenessAA.isAssumedDead(I)) {
2437 AnyDead = true;
2438 continue;
2441 if (!Pred(*I))
2442 return false;
2446 // If we actually used liveness information so we have to record a dependence.
2447 if (AnyDead)
2448 recordDependence(LivenessAA, QueryingAA);
2450 return true;
2453 bool Attributor::checkForAllReadWriteInstructions(
2454 const llvm::function_ref<bool(Instruction &)> &Pred,
2455 AbstractAttribute &QueryingAA) {
2457 const Function *AssociatedFunction =
2458 QueryingAA.getIRPosition().getAssociatedFunction();
2459 if (!AssociatedFunction)
2460 return false;
2462 const auto &LivenessAA = getAAFor<AAIsDead>(
2463 QueryingAA, QueryingAA.getIRPosition(), /* TrackDependence */ false);
2464 bool AnyDead = false;
2466 for (Instruction *I :
2467 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2468 // Skip dead instructions.
2469 if (LivenessAA.isAssumedDead(I)) {
2470 AnyDead = true;
2471 continue;
2474 if (!Pred(*I))
2475 return false;
2478 // If we actually used liveness information so we have to record a dependence.
2479 if (AnyDead)
2480 recordDependence(LivenessAA, QueryingAA);
2482 return true;
2485 ChangeStatus Attributor::run() {
2486 // Initialize all abstract attributes, allow new ones to be created.
2487 for (unsigned u = 0; u < AllAbstractAttributes.size(); u++)
2488 AllAbstractAttributes[u]->initialize(*this);
2490 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2491 << AllAbstractAttributes.size()
2492 << " abstract attributes.\n");
2494 // Now that all abstract attributes are collected and initialized we start
2495 // the abstract analysis.
2497 unsigned IterationCounter = 1;
2499 SmallVector<AbstractAttribute *, 64> ChangedAAs;
2500 SetVector<AbstractAttribute *> Worklist;
2501 Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
2503 do {
2504 // Remember the size to determine new attributes.
2505 size_t NumAAs = AllAbstractAttributes.size();
2506 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2507 << ", Worklist size: " << Worklist.size() << "\n");
2509 // Add all abstract attributes that are potentially dependent on one that
2510 // changed to the work list.
2511 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2512 auto &QuerriedAAs = QueryMap[ChangedAA];
2513 Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end());
2516 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2517 << ", Worklist+Dependent size: " << Worklist.size()
2518 << "\n");
2520 // Reset the changed set.
2521 ChangedAAs.clear();
2523 // Update all abstract attribute in the work list and record the ones that
2524 // changed.
2525 for (AbstractAttribute *AA : Worklist)
2526 if (!isAssumedDead(*AA, nullptr))
2527 if (AA->update(*this) == ChangeStatus::CHANGED)
2528 ChangedAAs.push_back(AA);
2530 // Add attributes to the changed set if they have been created in the last
2531 // iteration.
2532 ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
2533 AllAbstractAttributes.end());
2535 // Reset the work list and repopulate with the changed abstract attributes.
2536 // Note that dependent ones are added above.
2537 Worklist.clear();
2538 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2540 } while (!Worklist.empty() && IterationCounter++ < MaxFixpointIterations);
2542 size_t NumFinalAAs = AllAbstractAttributes.size();
2544 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2545 << IterationCounter << "/" << MaxFixpointIterations
2546 << " iterations\n");
2548 if (VerifyMaxFixpointIterations && IterationCounter != MaxFixpointIterations)
2549 llvm_unreachable("The fixpoint was not reached with exactly the number of "
2550 "specified iterations!");
2552 bool FinishedAtFixpoint = Worklist.empty();
2554 // Reset abstract arguments not settled in a sound fixpoint by now. This
2555 // happens when we stopped the fixpoint iteration early. Note that only the
2556 // ones marked as "changed" *and* the ones transitively depending on them
2557 // need to be reverted to a pessimistic state. Others might not be in a
2558 // fixpoint state but we can use the optimistic results for them anyway.
2559 SmallPtrSet<AbstractAttribute *, 32> Visited;
2560 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2561 AbstractAttribute *ChangedAA = ChangedAAs[u];
2562 if (!Visited.insert(ChangedAA).second)
2563 continue;
2565 AbstractState &State = ChangedAA->getState();
2566 if (!State.isAtFixpoint()) {
2567 State.indicatePessimisticFixpoint();
2569 NumAttributesTimedOut++;
2572 auto &QuerriedAAs = QueryMap[ChangedAA];
2573 ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end());
2576 LLVM_DEBUG({
2577 if (!Visited.empty())
2578 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2579 << " abstract attributes.\n";
2582 unsigned NumManifested = 0;
2583 unsigned NumAtFixpoint = 0;
2584 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2585 for (AbstractAttribute *AA : AllAbstractAttributes) {
2586 AbstractState &State = AA->getState();
2588 // If there is not already a fixpoint reached, we can now take the
2589 // optimistic state. This is correct because we enforced a pessimistic one
2590 // on abstract attributes that were transitively dependent on a changed one
2591 // already above.
2592 if (!State.isAtFixpoint())
2593 State.indicateOptimisticFixpoint();
2595 // If the state is invalid, we do not try to manifest it.
2596 if (!State.isValidState())
2597 continue;
2599 // Skip dead code.
2600 if (isAssumedDead(*AA, nullptr))
2601 continue;
2602 // Manifest the state and record if we changed the IR.
2603 ChangeStatus LocalChange = AA->manifest(*this);
2604 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2605 AA->trackStatistics();
2607 ManifestChange = ManifestChange | LocalChange;
2609 NumAtFixpoint++;
2610 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2613 (void)NumManifested;
2614 (void)NumAtFixpoint;
2615 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2616 << " arguments while " << NumAtFixpoint
2617 << " were in a valid fixpoint state\n");
2619 // If verification is requested, we finished this run at a fixpoint, and the
2620 // IR was changed, we re-run the whole fixpoint analysis, starting at
2621 // re-initialization of the arguments. This re-run should not result in an IR
2622 // change. Though, the (virtual) state of attributes at the end of the re-run
2623 // might be more optimistic than the known state or the IR state if the better
2624 // state cannot be manifested.
2625 if (VerifyAttributor && FinishedAtFixpoint &&
2626 ManifestChange == ChangeStatus::CHANGED) {
2627 VerifyAttributor = false;
2628 ChangeStatus VerifyStatus = run();
2629 if (VerifyStatus != ChangeStatus::UNCHANGED)
2630 llvm_unreachable(
2631 "Attributor verification failed, re-run did result in an IR change "
2632 "even after a fixpoint was reached in the original run. (False "
2633 "positives possible!)");
2634 VerifyAttributor = true;
2637 NumAttributesManifested += NumManifested;
2638 NumAttributesValidFixpoint += NumAtFixpoint;
2640 (void)NumFinalAAs;
2641 assert(
2642 NumFinalAAs == AllAbstractAttributes.size() &&
2643 "Expected the final number of abstract attributes to remain unchanged!");
2645 // Delete stuff at the end to avoid invalid references and a nice order.
2646 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete " << ToBeDeletedFunctions.size()
2647 << " functions and " << ToBeDeletedBlocks.size()
2648 << " blocks and " << ToBeDeletedInsts.size()
2649 << " instructions\n");
2650 for (Instruction *I : ToBeDeletedInsts) {
2651 if (I->hasNUsesOrMore(1))
2652 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2653 I->eraseFromParent();
2655 for (BasicBlock *BB : ToBeDeletedBlocks) {
2656 // TODO: Check if we need to replace users (PHIs, indirect branches?)
2657 BB->eraseFromParent();
2659 for (Function *Fn : ToBeDeletedFunctions) {
2660 Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
2661 Fn->eraseFromParent();
2664 return ManifestChange;
2667 /// Helper function that checks if an abstract attribute of type \p AAType
2668 /// should be created for IR position \p IRP and if so creates and registers it
2669 /// with the Attributor \p A.
2671 /// This method will look at the provided whitelist. If one is given and the
2672 /// kind \p AAType::ID is not contained, no abstract attribute is created.
2674 /// \returns The created abstract argument, or nullptr if none was created.
2675 template <typename AAType>
2676 static const AAType *checkAndRegisterAA(const IRPosition &IRP, Attributor &A,
2677 DenseSet<const char *> *Whitelist) {
2678 if (Whitelist && !Whitelist->count(&AAType::ID))
2679 return nullptr;
2681 return &A.registerAA<AAType>(*new AAType(IRP));
2684 void Attributor::identifyDefaultAbstractAttributes(
2685 Function &F, DenseSet<const char *> *Whitelist) {
2687 IRPosition FPos = IRPosition::function(F);
2689 // Check for dead BasicBlocks in every function.
2690 // We need dead instruction detection because we do not want to deal with
2691 // broken IR in which SSA rules do not apply.
2692 checkAndRegisterAA<AAIsDeadFunction>(FPos, *this, /* Whitelist */ nullptr);
2694 // Every function might be "will-return".
2695 checkAndRegisterAA<AAWillReturnFunction>(FPos, *this, Whitelist);
2697 // Every function can be nounwind.
2698 checkAndRegisterAA<AANoUnwindFunction>(FPos, *this, Whitelist);
2700 // Every function might be marked "nosync"
2701 checkAndRegisterAA<AANoSyncFunction>(FPos, *this, Whitelist);
2703 // Every function might be "no-free".
2704 checkAndRegisterAA<AANoFreeFunction>(FPos, *this, Whitelist);
2706 // Every function might be "no-return".
2707 checkAndRegisterAA<AANoReturnFunction>(FPos, *this, Whitelist);
2709 // Return attributes are only appropriate if the return type is non void.
2710 Type *ReturnType = F.getReturnType();
2711 if (!ReturnType->isVoidTy()) {
2712 // Argument attribute "returned" --- Create only one per function even
2713 // though it is an argument attribute.
2714 checkAndRegisterAA<AAReturnedValuesFunction>(FPos, *this, Whitelist);
2716 if (ReturnType->isPointerTy()) {
2717 IRPosition RetPos = IRPosition::returned(F);
2719 // Every function with pointer return type might be marked align.
2720 checkAndRegisterAA<AAAlignReturned>(RetPos, *this, Whitelist);
2722 // Every function with pointer return type might be marked nonnull.
2723 checkAndRegisterAA<AANonNullReturned>(RetPos, *this, Whitelist);
2725 // Every function with pointer return type might be marked noalias.
2726 checkAndRegisterAA<AANoAliasReturned>(RetPos, *this, Whitelist);
2728 // Every function with pointer return type might be marked
2729 // dereferenceable.
2730 checkAndRegisterAA<AADereferenceableReturned>(RetPos, *this, Whitelist);
2734 for (Argument &Arg : F.args()) {
2735 if (Arg.getType()->isPointerTy()) {
2736 IRPosition ArgPos = IRPosition::argument(Arg);
2737 // Every argument with pointer type might be marked nonnull.
2738 checkAndRegisterAA<AANonNullArgument>(ArgPos, *this, Whitelist);
2740 // Every argument with pointer type might be marked dereferenceable.
2741 checkAndRegisterAA<AADereferenceableArgument>(ArgPos, *this, Whitelist);
2743 // Every argument with pointer type might be marked align.
2744 checkAndRegisterAA<AAAlignArgument>(ArgPos, *this, Whitelist);
2748 // Walk all instructions to find more attribute opportunities and also
2749 // interesting instructions that might be queried by abstract attributes
2750 // during their initialization or update.
2751 auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
2752 auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
2754 for (Instruction &I : instructions(&F)) {
2755 bool IsInterestingOpcode = false;
2757 // To allow easy access to all instructions in a function with a given
2758 // opcode we store them in the InfoCache. As not all opcodes are interesting
2759 // to concrete attributes we only cache the ones that are as identified in
2760 // the following switch.
2761 // Note: There are no concrete attributes now so this is initially empty.
2762 switch (I.getOpcode()) {
2763 default:
2764 assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
2765 "New call site/base instruction type needs to be known int the "
2766 "attributor.");
2767 break;
2768 case Instruction::Load:
2769 // The alignment of a pointer is interesting for loads.
2770 checkAndRegisterAA<AAAlignFloating>(
2771 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()), *this,
2772 Whitelist);
2773 break;
2774 case Instruction::Store:
2775 // The alignment of a pointer is interesting for stores.
2776 checkAndRegisterAA<AAAlignFloating>(
2777 IRPosition::value(*cast<StoreInst>(I).getPointerOperand()), *this,
2778 Whitelist);
2779 break;
2780 case Instruction::Call:
2781 case Instruction::CallBr:
2782 case Instruction::Invoke:
2783 case Instruction::CleanupRet:
2784 case Instruction::CatchSwitch:
2785 case Instruction::Resume:
2786 case Instruction::Ret:
2787 IsInterestingOpcode = true;
2789 if (IsInterestingOpcode)
2790 InstOpcodeMap[I.getOpcode()].push_back(&I);
2791 if (I.mayReadOrWriteMemory())
2792 ReadOrWriteInsts.push_back(&I);
2794 CallSite CS(&I);
2795 if (CS && CS.getCalledFunction()) {
2796 for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) {
2797 if (!CS.getArgument(i)->getType()->isPointerTy())
2798 continue;
2799 IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
2801 // Call site argument attribute "non-null".
2802 checkAndRegisterAA<AANonNullCallSiteArgument>(CSArgPos, *this,
2803 Whitelist);
2805 // Call site argument attribute "dereferenceable".
2806 checkAndRegisterAA<AADereferenceableCallSiteArgument>(CSArgPos, *this,
2807 Whitelist);
2809 // Call site argument attribute "align".
2810 checkAndRegisterAA<AAAlignCallSiteArgument>(CSArgPos, *this, Whitelist);
2816 /// Helpers to ease debugging through output streams and print calls.
2818 ///{
2819 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
2820 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
2823 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
2824 switch (AP) {
2825 case IRPosition::IRP_INVALID:
2826 return OS << "inv";
2827 case IRPosition::IRP_FLOAT:
2828 return OS << "flt";
2829 case IRPosition::IRP_RETURNED:
2830 return OS << "fn_ret";
2831 case IRPosition::IRP_CALL_SITE_RETURNED:
2832 return OS << "cs_ret";
2833 case IRPosition::IRP_FUNCTION:
2834 return OS << "fn";
2835 case IRPosition::IRP_CALL_SITE:
2836 return OS << "cs";
2837 case IRPosition::IRP_ARGUMENT:
2838 return OS << "arg";
2839 case IRPosition::IRP_CALL_SITE_ARGUMENT:
2840 return OS << "cs_arg";
2842 llvm_unreachable("Unknown attribute position!");
2845 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
2846 const Value &AV = Pos.getAssociatedValue();
2847 return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
2848 << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
2851 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerState &S) {
2852 return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
2853 << static_cast<const AbstractState &>(S);
2856 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
2857 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
2860 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
2861 AA.print(OS);
2862 return OS;
2865 void AbstractAttribute::print(raw_ostream &OS) const {
2866 OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
2867 << "]";
2869 ///}
2871 /// ----------------------------------------------------------------------------
2872 /// Pass (Manager) Boilerplate
2873 /// ----------------------------------------------------------------------------
2875 static bool runAttributorOnModule(Module &M) {
2876 if (DisableAttributor)
2877 return false;
2879 LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
2880 << " functions.\n");
2882 // Create an Attributor and initially empty information cache that is filled
2883 // while we identify default attribute opportunities.
2884 InformationCache InfoCache(M.getDataLayout());
2885 Attributor A(InfoCache);
2887 for (Function &F : M) {
2888 // TODO: Not all attributes require an exact definition. Find a way to
2889 // enable deduction for some but not all attributes in case the
2890 // definition might be changed at runtime, see also
2891 // http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
2892 // TODO: We could always determine abstract attributes and if sufficient
2893 // information was found we could duplicate the functions that do not
2894 // have an exact definition.
2895 if (!F.hasExactDefinition()) {
2896 NumFnWithoutExactDefinition++;
2897 continue;
2900 // For now we ignore naked and optnone functions.
2901 if (F.hasFnAttribute(Attribute::Naked) ||
2902 F.hasFnAttribute(Attribute::OptimizeNone))
2903 continue;
2905 NumFnWithExactDefinition++;
2907 // Populate the Attributor with abstract attribute opportunities in the
2908 // function and the information cache with IR information.
2909 A.identifyDefaultAbstractAttributes(F);
2912 return A.run() == ChangeStatus::CHANGED;
2915 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
2916 if (runAttributorOnModule(M)) {
2917 // FIXME: Think about passes we will preserve and add them here.
2918 return PreservedAnalyses::none();
2920 return PreservedAnalyses::all();
2923 namespace {
2925 struct AttributorLegacyPass : public ModulePass {
2926 static char ID;
2928 AttributorLegacyPass() : ModulePass(ID) {
2929 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
2932 bool runOnModule(Module &M) override {
2933 if (skipModule(M))
2934 return false;
2935 return runAttributorOnModule(M);
2938 void getAnalysisUsage(AnalysisUsage &AU) const override {
2939 // FIXME: Think about passes we will preserve and add them here.
2940 AU.setPreservesCFG();
2944 } // end anonymous namespace
2946 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
2948 char AttributorLegacyPass::ID = 0;
2950 const char AAReturnedValues::ID = 0;
2951 const char AANoUnwind::ID = 0;
2952 const char AANoSync::ID = 0;
2953 const char AANoFree::ID = 0;
2954 const char AANonNull::ID = 0;
2955 const char AANoRecurse::ID = 0;
2956 const char AAWillReturn::ID = 0;
2957 const char AANoAlias::ID = 0;
2958 const char AANoReturn::ID = 0;
2959 const char AAIsDead::ID = 0;
2960 const char AADereferenceable::ID = 0;
2961 const char AAAlign::ID = 0;
2963 // Macro magic to create the static generator function for attributes that
2964 // follow the naming scheme.
2966 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
2967 case IRPosition::PK: \
2968 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
2970 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
2971 case IRPosition::PK: \
2972 AA = new CLASS##SUFFIX(IRP); \
2973 break;
2975 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
2976 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
2977 CLASS *AA = nullptr; \
2978 switch (IRP.getPositionKind()) { \
2979 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
2980 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
2981 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
2982 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
2983 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
2984 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
2985 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
2986 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
2988 AA->initialize(A); \
2989 return *AA; \
2992 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
2993 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
2994 CLASS *AA = nullptr; \
2995 switch (IRP.getPositionKind()) { \
2996 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
2997 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
2998 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
2999 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
3000 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
3001 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
3002 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
3003 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
3005 AA->initialize(A); \
3006 return *AA; \
3009 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
3010 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
3011 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
3012 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
3013 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
3014 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
3015 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
3016 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
3018 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
3019 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
3020 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
3021 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
3023 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
3024 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
3025 #undef SWITCH_PK_CREATE
3026 #undef SWITCH_PK_INV
3028 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
3029 "Deduce and propagate attributes", false, false)
3030 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
3031 "Deduce and propagate attributes", false, false)