[ASan] Make insertion of version mismatch guard configurable
[llvm-core.git] / lib / Transforms / IPO / Attributor.cpp
blobbe609a2b238d6e289aa58f39aa3b8ba0365154fe
1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/Attributor.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Argument.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/InstIterator.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
39 #include <cassert>
41 using namespace llvm;
43 #define DEBUG_TYPE "attributor"
45 STATISTIC(NumFnWithExactDefinition,
46 "Number of function with exact definitions");
47 STATISTIC(NumFnWithoutExactDefinition,
48 "Number of function without exact definitions");
49 STATISTIC(NumAttributesTimedOut,
50 "Number of abstract attributes timed out before fixpoint");
51 STATISTIC(NumAttributesValidFixpoint,
52 "Number of abstract attributes in a valid fixpoint state");
53 STATISTIC(NumAttributesManifested,
54 "Number of abstract attributes manifested in IR");
56 // Some helper macros to deal with statistics tracking.
58 // Usage:
59 // For simple IR attribute tracking overload trackStatistics in the abstract
60 // attribute and choose the right STATS_DECLTRACK_********* macro,
61 // e.g.,:
62 // void trackStatistics() const override {
63 // STATS_DECLTRACK_ARG_ATTR(returned)
64 // }
65 // If there is a single "increment" side one can use the macro
66 // STATS_DECLTRACK with a custom message. If there are multiple increment
67 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
69 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
70 ("Number of " #TYPE " marked '" #NAME "'")
71 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
72 #define STATS_DECL(NAME, TYPE, MSG) STATISTIC(BUILD_STAT_NAME(NAME, TYPE), MSG);
73 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
74 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
75 { \
76 STATS_DECL(NAME, TYPE, MSG) \
77 STATS_TRACK(NAME, TYPE) \
79 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
80 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
81 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
82 STATS_DECLTRACK(NAME, CSArguments, \
83 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
84 #define STATS_DECLTRACK_FN_ATTR(NAME) \
85 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
86 #define STATS_DECLTRACK_CS_ATTR(NAME) \
87 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
88 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
89 STATS_DECLTRACK(NAME, FunctionReturn, \
90 BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
91 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
92 STATS_DECLTRACK(NAME, CSReturn, \
93 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
94 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
95 STATS_DECLTRACK(NAME, Floating, \
96 ("Number of floating values known to be '" #NAME "'"))
98 // TODO: Determine a good default value.
100 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
101 // (when run with the first 5 abstract attributes). The results also indicate
102 // that we never reach 32 iterations but always find a fixpoint sooner.
104 // This will become more evolved once we perform two interleaved fixpoint
105 // iterations: bottom-up and top-down.
106 static cl::opt<unsigned>
107 MaxFixpointIterations("attributor-max-iterations", cl::Hidden,
108 cl::desc("Maximal number of fixpoint iterations."),
109 cl::init(32));
110 static cl::opt<bool> VerifyMaxFixpointIterations(
111 "attributor-max-iterations-verify", cl::Hidden,
112 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
113 cl::init(false));
115 static cl::opt<bool> DisableAttributor(
116 "attributor-disable", cl::Hidden,
117 cl::desc("Disable the attributor inter-procedural deduction pass."),
118 cl::init(true));
120 static cl::opt<bool> VerifyAttributor(
121 "attributor-verify", cl::Hidden,
122 cl::desc("Verify the Attributor deduction and "
123 "manifestation of attributes -- may issue false-positive errors"),
124 cl::init(false));
126 static cl::opt<unsigned> DepRecInterval(
127 "attributor-dependence-recompute-interval", cl::Hidden,
128 cl::desc("Number of iterations until dependences are recomputed."),
129 cl::init(4));
131 /// Logic operators for the change status enum class.
133 ///{
134 ChangeStatus llvm::operator|(ChangeStatus l, ChangeStatus r) {
135 return l == ChangeStatus::CHANGED ? l : r;
137 ChangeStatus llvm::operator&(ChangeStatus l, ChangeStatus r) {
138 return l == ChangeStatus::UNCHANGED ? l : r;
140 ///}
142 /// Recursively visit all values that might become \p IRP at some point. This
143 /// will be done by looking through cast instructions, selects, phis, and calls
144 /// with the "returned" attribute. Once we cannot look through the value any
145 /// further, the callback \p VisitValueCB is invoked and passed the current
146 /// value, the \p State, and a flag to indicate if we stripped anything. To
147 /// limit how much effort is invested, we will never visit more values than
148 /// specified by \p MaxValues.
149 template <typename AAType, typename StateTy>
150 bool genericValueTraversal(
151 Attributor &A, IRPosition IRP, const AAType &QueryingAA, StateTy &State,
152 const function_ref<bool(Value &, StateTy &, bool)> &VisitValueCB,
153 int MaxValues = 8) {
155 const AAIsDead *LivenessAA = nullptr;
156 if (IRP.getAnchorScope())
157 LivenessAA = &A.getAAFor<AAIsDead>(
158 QueryingAA, IRPosition::function(*IRP.getAnchorScope()),
159 /* TrackDependence */ false);
160 bool AnyDead = false;
162 // TODO: Use Positions here to allow context sensitivity in VisitValueCB
163 SmallPtrSet<Value *, 16> Visited;
164 SmallVector<Value *, 16> Worklist;
165 Worklist.push_back(&IRP.getAssociatedValue());
167 int Iteration = 0;
168 do {
169 Value *V = Worklist.pop_back_val();
171 // Check if we should process the current value. To prevent endless
172 // recursion keep a record of the values we followed!
173 if (!Visited.insert(V).second)
174 continue;
176 // Make sure we limit the compile time for complex expressions.
177 if (Iteration++ >= MaxValues)
178 return false;
180 // Explicitly look through calls with a "returned" attribute if we do
181 // not have a pointer as stripPointerCasts only works on them.
182 Value *NewV = nullptr;
183 if (V->getType()->isPointerTy()) {
184 NewV = V->stripPointerCasts();
185 } else {
186 CallSite CS(V);
187 if (CS && CS.getCalledFunction()) {
188 for (Argument &Arg : CS.getCalledFunction()->args())
189 if (Arg.hasReturnedAttr()) {
190 NewV = CS.getArgOperand(Arg.getArgNo());
191 break;
195 if (NewV && NewV != V) {
196 Worklist.push_back(NewV);
197 continue;
200 // Look through select instructions, visit both potential values.
201 if (auto *SI = dyn_cast<SelectInst>(V)) {
202 Worklist.push_back(SI->getTrueValue());
203 Worklist.push_back(SI->getFalseValue());
204 continue;
207 // Look through phi nodes, visit all live operands.
208 if (auto *PHI = dyn_cast<PHINode>(V)) {
209 assert(LivenessAA &&
210 "Expected liveness in the presence of instructions!");
211 for (unsigned u = 0, e = PHI->getNumIncomingValues(); u < e; u++) {
212 const BasicBlock *IncomingBB = PHI->getIncomingBlock(u);
213 if (LivenessAA->isAssumedDead(IncomingBB->getTerminator())) {
214 AnyDead =true;
215 continue;
217 Worklist.push_back(PHI->getIncomingValue(u));
219 continue;
222 // Once a leaf is reached we inform the user through the callback.
223 if (!VisitValueCB(*V, State, Iteration > 1))
224 return false;
225 } while (!Worklist.empty());
227 // If we actually used liveness information so we have to record a dependence.
228 if (AnyDead)
229 A.recordDependence(*LivenessAA, QueryingAA);
231 // All values have been visited.
232 return true;
235 /// Return true if \p New is equal or worse than \p Old.
236 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
237 if (!Old.isIntAttribute())
238 return true;
240 return Old.getValueAsInt() >= New.getValueAsInt();
243 /// Return true if the information provided by \p Attr was added to the
244 /// attribute list \p Attrs. This is only the case if it was not already present
245 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
246 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
247 AttributeList &Attrs, int AttrIdx) {
249 if (Attr.isEnumAttribute()) {
250 Attribute::AttrKind Kind = Attr.getKindAsEnum();
251 if (Attrs.hasAttribute(AttrIdx, Kind))
252 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
253 return false;
254 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
255 return true;
257 if (Attr.isStringAttribute()) {
258 StringRef Kind = Attr.getKindAsString();
259 if (Attrs.hasAttribute(AttrIdx, Kind))
260 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
261 return false;
262 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
263 return true;
265 if (Attr.isIntAttribute()) {
266 Attribute::AttrKind Kind = Attr.getKindAsEnum();
267 if (Attrs.hasAttribute(AttrIdx, Kind))
268 if (isEqualOrWorse(Attr, Attrs.getAttribute(AttrIdx, Kind)))
269 return false;
270 Attrs = Attrs.removeAttribute(Ctx, AttrIdx, Kind);
271 Attrs = Attrs.addAttribute(Ctx, AttrIdx, Attr);
272 return true;
275 llvm_unreachable("Expected enum or string attribute!");
278 ChangeStatus AbstractAttribute::update(Attributor &A) {
279 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
280 if (getState().isAtFixpoint())
281 return HasChanged;
283 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
285 HasChanged = updateImpl(A);
287 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
288 << "\n");
290 return HasChanged;
293 ChangeStatus
294 IRAttributeManifest::manifestAttrs(Attributor &A, IRPosition &IRP,
295 const ArrayRef<Attribute> &DeducedAttrs) {
296 Function *ScopeFn = IRP.getAssociatedFunction();
297 IRPosition::Kind PK = IRP.getPositionKind();
299 // In the following some generic code that will manifest attributes in
300 // DeducedAttrs if they improve the current IR. Due to the different
301 // annotation positions we use the underlying AttributeList interface.
303 AttributeList Attrs;
304 switch (PK) {
305 case IRPosition::IRP_INVALID:
306 case IRPosition::IRP_FLOAT:
307 return ChangeStatus::UNCHANGED;
308 case IRPosition::IRP_ARGUMENT:
309 case IRPosition::IRP_FUNCTION:
310 case IRPosition::IRP_RETURNED:
311 Attrs = ScopeFn->getAttributes();
312 break;
313 case IRPosition::IRP_CALL_SITE:
314 case IRPosition::IRP_CALL_SITE_RETURNED:
315 case IRPosition::IRP_CALL_SITE_ARGUMENT:
316 Attrs = ImmutableCallSite(&IRP.getAnchorValue()).getAttributes();
317 break;
320 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
321 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
322 for (const Attribute &Attr : DeducedAttrs) {
323 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx()))
324 continue;
326 HasChanged = ChangeStatus::CHANGED;
329 if (HasChanged == ChangeStatus::UNCHANGED)
330 return HasChanged;
332 switch (PK) {
333 case IRPosition::IRP_ARGUMENT:
334 case IRPosition::IRP_FUNCTION:
335 case IRPosition::IRP_RETURNED:
336 ScopeFn->setAttributes(Attrs);
337 break;
338 case IRPosition::IRP_CALL_SITE:
339 case IRPosition::IRP_CALL_SITE_RETURNED:
340 case IRPosition::IRP_CALL_SITE_ARGUMENT:
341 CallSite(&IRP.getAnchorValue()).setAttributes(Attrs);
342 break;
343 case IRPosition::IRP_INVALID:
344 case IRPosition::IRP_FLOAT:
345 break;
348 return HasChanged;
351 const IRPosition IRPosition::EmptyKey(255);
352 const IRPosition IRPosition::TombstoneKey(256);
354 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
355 IRPositions.emplace_back(IRP);
357 ImmutableCallSite ICS(&IRP.getAnchorValue());
358 switch (IRP.getPositionKind()) {
359 case IRPosition::IRP_INVALID:
360 case IRPosition::IRP_FLOAT:
361 case IRPosition::IRP_FUNCTION:
362 return;
363 case IRPosition::IRP_ARGUMENT:
364 case IRPosition::IRP_RETURNED:
365 IRPositions.emplace_back(
366 IRPosition::function(*IRP.getAssociatedFunction()));
367 return;
368 case IRPosition::IRP_CALL_SITE:
369 assert(ICS && "Expected call site!");
370 // TODO: We need to look at the operand bundles similar to the redirection
371 // in CallBase.
372 if (!ICS.hasOperandBundles())
373 if (const Function *Callee = ICS.getCalledFunction())
374 IRPositions.emplace_back(IRPosition::function(*Callee));
375 return;
376 case IRPosition::IRP_CALL_SITE_RETURNED:
377 assert(ICS && "Expected call site!");
378 // TODO: We need to look at the operand bundles similar to the redirection
379 // in CallBase.
380 if (!ICS.hasOperandBundles()) {
381 if (const Function *Callee = ICS.getCalledFunction()) {
382 IRPositions.emplace_back(IRPosition::returned(*Callee));
383 IRPositions.emplace_back(IRPosition::function(*Callee));
386 IRPositions.emplace_back(
387 IRPosition::callsite_function(cast<CallBase>(*ICS.getInstruction())));
388 return;
389 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
390 int ArgNo = IRP.getArgNo();
391 assert(ICS && ArgNo >= 0 && "Expected call site!");
392 // TODO: We need to look at the operand bundles similar to the redirection
393 // in CallBase.
394 if (!ICS.hasOperandBundles()) {
395 const Function *Callee = ICS.getCalledFunction();
396 if (Callee && Callee->arg_size() > unsigned(ArgNo))
397 IRPositions.emplace_back(IRPosition::argument(*Callee->getArg(ArgNo)));
398 if (Callee)
399 IRPositions.emplace_back(IRPosition::function(*Callee));
401 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
402 return;
407 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs) const {
408 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
409 for (Attribute::AttrKind AK : AKs)
410 if (EquivIRP.getAttr(AK).getKindAsEnum() == AK)
411 return true;
412 return false;
415 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
416 SmallVectorImpl<Attribute> &Attrs) const {
417 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this))
418 for (Attribute::AttrKind AK : AKs) {
419 const Attribute &Attr = EquivIRP.getAttr(AK);
420 if (Attr.getKindAsEnum() == AK)
421 Attrs.push_back(Attr);
425 void IRPosition::verify() {
426 switch (KindOrArgNo) {
427 default:
428 assert(KindOrArgNo >= 0 && "Expected argument or call site argument!");
429 assert((isa<CallBase>(AnchorVal) || isa<Argument>(AnchorVal)) &&
430 "Expected call base or argument for positive attribute index!");
431 if (auto *Arg = dyn_cast<Argument>(AnchorVal)) {
432 assert(Arg->getArgNo() == unsigned(getArgNo()) &&
433 "Argument number mismatch!");
434 assert(Arg == &getAssociatedValue() && "Associated value mismatch!");
435 } else {
436 auto &CB = cast<CallBase>(*AnchorVal);
437 (void)CB;
438 assert(CB.arg_size() > unsigned(getArgNo()) &&
439 "Call site argument number mismatch!");
440 assert(CB.getArgOperand(getArgNo()) == &getAssociatedValue() &&
441 "Associated value mismatch!");
443 break;
444 case IRP_INVALID:
445 assert(!AnchorVal && "Expected no value for an invalid position!");
446 break;
447 case IRP_FLOAT:
448 assert((!isa<CallBase>(&getAssociatedValue()) &&
449 !isa<Argument>(&getAssociatedValue())) &&
450 "Expected specialized kind for call base and argument values!");
451 break;
452 case IRP_RETURNED:
453 assert(isa<Function>(AnchorVal) &&
454 "Expected function for a 'returned' position!");
455 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
456 break;
457 case IRP_CALL_SITE_RETURNED:
458 assert((isa<CallBase>(AnchorVal)) &&
459 "Expected call base for 'call site returned' position!");
460 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
461 break;
462 case IRP_CALL_SITE:
463 assert((isa<CallBase>(AnchorVal)) &&
464 "Expected call base for 'call site function' position!");
465 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
466 break;
467 case IRP_FUNCTION:
468 assert(isa<Function>(AnchorVal) &&
469 "Expected function for a 'function' position!");
470 assert(AnchorVal == &getAssociatedValue() && "Associated value mismatch!");
471 break;
475 /// Helper functions to clamp a state \p S of type \p StateType with the
476 /// information in \p R and indicate/return if \p S did change (as-in update is
477 /// required to be run again).
479 ///{
480 template <typename StateType>
481 ChangeStatus clampStateAndIndicateChange(StateType &S, const StateType &R);
483 template <>
484 ChangeStatus clampStateAndIndicateChange<IntegerState>(IntegerState &S,
485 const IntegerState &R) {
486 auto Assumed = S.getAssumed();
487 S ^= R;
488 return Assumed == S.getAssumed() ? ChangeStatus::UNCHANGED
489 : ChangeStatus::CHANGED;
492 template <>
493 ChangeStatus clampStateAndIndicateChange<BooleanState>(BooleanState &S,
494 const BooleanState &R) {
495 return clampStateAndIndicateChange<IntegerState>(S, R);
497 ///}
499 /// Clamp the information known for all returned values of a function
500 /// (identified by \p QueryingAA) into \p S.
501 template <typename AAType, typename StateType = typename AAType::StateType>
502 static void clampReturnedValueStates(Attributor &A, const AAType &QueryingAA,
503 StateType &S) {
504 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
505 << static_cast<const AbstractAttribute &>(QueryingAA)
506 << " into " << S << "\n");
508 assert((QueryingAA.getIRPosition().getPositionKind() ==
509 IRPosition::IRP_RETURNED ||
510 QueryingAA.getIRPosition().getPositionKind() ==
511 IRPosition::IRP_CALL_SITE_RETURNED) &&
512 "Can only clamp returned value states for a function returned or call "
513 "site returned position!");
515 // Use an optional state as there might not be any return values and we want
516 // to join (IntegerState::operator&) the state of all there are.
517 Optional<StateType> T;
519 // Callback for each possibly returned value.
520 auto CheckReturnValue = [&](Value &RV) -> bool {
521 const IRPosition &RVPos = IRPosition::value(RV);
522 const AAType &AA = A.getAAFor<AAType>(QueryingAA, RVPos);
523 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV << " AA: " << AA.getAsStr()
524 << " @ " << RVPos << "\n");
525 const StateType &AAS = static_cast<const StateType &>(AA.getState());
526 if (T.hasValue())
527 *T &= AAS;
528 else
529 T = AAS;
530 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " RV State: " << T
531 << "\n");
532 return T->isValidState();
535 if (!A.checkForAllReturnedValues(CheckReturnValue, QueryingAA))
536 S.indicatePessimisticFixpoint();
537 else if (T.hasValue())
538 S ^= *T;
541 /// Helper class for generic deduction: return value -> returned position.
542 template <typename AAType, typename Base,
543 typename StateType = typename AAType::StateType>
544 struct AAReturnedFromReturnedValues : public Base {
545 AAReturnedFromReturnedValues(const IRPosition &IRP) : Base(IRP) {}
547 /// See AbstractAttribute::updateImpl(...).
548 ChangeStatus updateImpl(Attributor &A) override {
549 StateType S;
550 clampReturnedValueStates<AAType, StateType>(A, *this, S);
551 // TODO: If we know we visited all returned values, thus no are assumed
552 // dead, we can take the known information from the state T.
553 return clampStateAndIndicateChange<StateType>(this->getState(), S);
557 /// Clamp the information known at all call sites for a given argument
558 /// (identified by \p QueryingAA) into \p S.
559 template <typename AAType, typename StateType = typename AAType::StateType>
560 static void clampCallSiteArgumentStates(Attributor &A, const AAType &QueryingAA,
561 StateType &S) {
562 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
563 << static_cast<const AbstractAttribute &>(QueryingAA)
564 << " into " << S << "\n");
566 assert(QueryingAA.getIRPosition().getPositionKind() ==
567 IRPosition::IRP_ARGUMENT &&
568 "Can only clamp call site argument states for an argument position!");
570 // Use an optional state as there might not be any return values and we want
571 // to join (IntegerState::operator&) the state of all there are.
572 Optional<StateType> T;
574 // The argument number which is also the call site argument number.
575 unsigned ArgNo = QueryingAA.getIRPosition().getArgNo();
577 auto CallSiteCheck = [&](CallSite CS) {
578 const IRPosition &CSArgPos = IRPosition::callsite_argument(CS, ArgNo);
579 const AAType &AA = A.getAAFor<AAType>(QueryingAA, CSArgPos);
580 LLVM_DEBUG(dbgs() << "[Attributor] CS: " << *CS.getInstruction()
581 << " AA: " << AA.getAsStr() << " @" << CSArgPos << "\n");
582 const StateType &AAS = static_cast<const StateType &>(AA.getState());
583 if (T.hasValue())
584 *T &= AAS;
585 else
586 T = AAS;
587 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS << " CSA State: " << T
588 << "\n");
589 return T->isValidState();
592 if (!A.checkForAllCallSites(CallSiteCheck, QueryingAA, true))
593 S.indicatePessimisticFixpoint();
594 else if (T.hasValue())
595 S ^= *T;
598 /// Helper class for generic deduction: call site argument -> argument position.
599 template <typename AAType, typename Base,
600 typename StateType = typename AAType::StateType>
601 struct AAArgumentFromCallSiteArguments : public Base {
602 AAArgumentFromCallSiteArguments(const IRPosition &IRP) : Base(IRP) {}
604 /// See AbstractAttribute::updateImpl(...).
605 ChangeStatus updateImpl(Attributor &A) override {
606 StateType S;
607 clampCallSiteArgumentStates<AAType, StateType>(A, *this, S);
608 // TODO: If we know we visited all incoming values, thus no are assumed
609 // dead, we can take the known information from the state T.
610 return clampStateAndIndicateChange<StateType>(this->getState(), S);
614 /// Helper class for generic replication: function returned -> cs returned.
615 template <typename AAType, typename Base>
616 struct AACallSiteReturnedFromReturned : public Base {
617 AACallSiteReturnedFromReturned(const IRPosition &IRP) : Base(IRP) {}
619 /// See AbstractAttribute::updateImpl(...).
620 ChangeStatus updateImpl(Attributor &A) override {
621 assert(this->getIRPosition().getPositionKind() ==
622 IRPosition::IRP_CALL_SITE_RETURNED &&
623 "Can only wrap function returned positions for call site returned "
624 "positions!");
625 auto &S = this->getState();
627 const Function *AssociatedFunction =
628 this->getIRPosition().getAssociatedFunction();
629 if (!AssociatedFunction)
630 return S.indicatePessimisticFixpoint();
632 IRPosition FnPos = IRPosition::returned(*AssociatedFunction);
633 const AAType &AA = A.getAAFor<AAType>(*this, FnPos);
634 return clampStateAndIndicateChange(
635 S, static_cast<const typename AAType::StateType &>(AA.getState()));
639 /// -----------------------NoUnwind Function Attribute--------------------------
641 struct AANoUnwindImpl : AANoUnwind {
642 AANoUnwindImpl(const IRPosition &IRP) : AANoUnwind(IRP) {}
644 /// See AbstractAttribute::initialize(...).
645 void initialize(Attributor &A) override {
646 if (hasAttr({Attribute::NoUnwind}))
647 indicateOptimisticFixpoint();
650 const std::string getAsStr() const override {
651 return getAssumed() ? "nounwind" : "may-unwind";
654 /// See AbstractAttribute::updateImpl(...).
655 ChangeStatus updateImpl(Attributor &A) override {
656 auto Opcodes = {
657 (unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
658 (unsigned)Instruction::Call, (unsigned)Instruction::CleanupRet,
659 (unsigned)Instruction::CatchSwitch, (unsigned)Instruction::Resume};
661 auto CheckForNoUnwind = [&](Instruction &I) {
662 if (!I.mayThrow())
663 return true;
665 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
666 const auto &NoUnwindAA =
667 A.getAAFor<AANoUnwind>(*this, IRPosition::callsite_function(ICS));
668 return NoUnwindAA.isAssumedNoUnwind();
670 return false;
673 if (!A.checkForAllInstructions(CheckForNoUnwind, *this, Opcodes))
674 return indicatePessimisticFixpoint();
676 return ChangeStatus::UNCHANGED;
680 struct AANoUnwindFunction final : public AANoUnwindImpl {
681 AANoUnwindFunction(const IRPosition &IRP) : AANoUnwindImpl(IRP) {}
683 /// See AbstractAttribute::trackStatistics()
684 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nounwind) }
687 /// NoUnwind attribute deduction for a call sites.
688 using AANoUnwindCallSite = AANoUnwindFunction;
690 /// --------------------- Function Return Values -------------------------------
692 /// "Attribute" that collects all potential returned values and the return
693 /// instructions that they arise from.
695 /// If there is a unique returned value R, the manifest method will:
696 /// - mark R with the "returned" attribute, if R is an argument.
697 class AAReturnedValuesImpl : public AAReturnedValues, public AbstractState {
699 /// Mapping of values potentially returned by the associated function to the
700 /// return instructions that might return them.
701 MapVector<Value *, SmallSetVector<ReturnInst *, 4>> ReturnedValues;
703 /// Mapping to remember the number of returned values for a call site such
704 /// that we can avoid updates if nothing changed.
705 DenseMap<const CallBase *, unsigned> NumReturnedValuesPerKnownAA;
707 /// Set of unresolved calls returned by the associated function.
708 SmallSetVector<CallBase *, 4> UnresolvedCalls;
710 /// State flags
712 ///{
713 bool IsFixed = false;
714 bool IsValidState = true;
715 ///}
717 public:
718 AAReturnedValuesImpl(const IRPosition &IRP) : AAReturnedValues(IRP) {}
720 /// See AbstractAttribute::initialize(...).
721 void initialize(Attributor &A) override {
722 // Reset the state.
723 IsFixed = false;
724 IsValidState = true;
725 ReturnedValues.clear();
727 Function *F = getAssociatedFunction();
728 if (!F || !F->hasExactDefinition()) {
729 indicatePessimisticFixpoint();
730 return;
733 // The map from instruction opcodes to those instructions in the function.
734 auto &OpcodeInstMap = A.getInfoCache().getOpcodeInstMapForFunction(*F);
736 // Look through all arguments, if one is marked as returned we are done.
737 for (Argument &Arg : F->args()) {
738 if (Arg.hasReturnedAttr()) {
739 auto &ReturnInstSet = ReturnedValues[&Arg];
740 for (Instruction *RI : OpcodeInstMap[Instruction::Ret])
741 ReturnInstSet.insert(cast<ReturnInst>(RI));
743 indicateOptimisticFixpoint();
744 return;
749 /// See AbstractAttribute::manifest(...).
750 ChangeStatus manifest(Attributor &A) override;
752 /// See AbstractAttribute::getState(...).
753 AbstractState &getState() override { return *this; }
755 /// See AbstractAttribute::getState(...).
756 const AbstractState &getState() const override { return *this; }
758 /// See AbstractAttribute::updateImpl(Attributor &A).
759 ChangeStatus updateImpl(Attributor &A) override;
761 llvm::iterator_range<iterator> returned_values() override {
762 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
765 llvm::iterator_range<const_iterator> returned_values() const override {
766 return llvm::make_range(ReturnedValues.begin(), ReturnedValues.end());
769 const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const override {
770 return UnresolvedCalls;
773 /// Return the number of potential return values, -1 if unknown.
774 size_t getNumReturnValues() const override {
775 return isValidState() ? ReturnedValues.size() : -1;
778 /// Return an assumed unique return value if a single candidate is found. If
779 /// there cannot be one, return a nullptr. If it is not clear yet, return the
780 /// Optional::NoneType.
781 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const;
783 /// See AbstractState::checkForAllReturnedValues(...).
784 bool checkForAllReturnedValuesAndReturnInsts(
785 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
786 &Pred) const override;
788 /// Pretty print the attribute similar to the IR representation.
789 const std::string getAsStr() const override;
791 /// See AbstractState::isAtFixpoint().
792 bool isAtFixpoint() const override { return IsFixed; }
794 /// See AbstractState::isValidState().
795 bool isValidState() const override { return IsValidState; }
797 /// See AbstractState::indicateOptimisticFixpoint(...).
798 ChangeStatus indicateOptimisticFixpoint() override {
799 IsFixed = true;
800 return ChangeStatus::UNCHANGED;
803 ChangeStatus indicatePessimisticFixpoint() override {
804 IsFixed = true;
805 IsValidState = false;
806 return ChangeStatus::CHANGED;
810 ChangeStatus AAReturnedValuesImpl::manifest(Attributor &A) {
811 ChangeStatus Changed = ChangeStatus::UNCHANGED;
813 // Bookkeeping.
814 assert(isValidState());
815 STATS_DECLTRACK(KnownReturnValues, FunctionReturn,
816 "Number of function with known return values");
818 // Check if we have an assumed unique return value that we could manifest.
819 Optional<Value *> UniqueRV = getAssumedUniqueReturnValue(A);
821 if (!UniqueRV.hasValue() || !UniqueRV.getValue())
822 return Changed;
824 // Bookkeeping.
825 STATS_DECLTRACK(UniqueReturnValue, FunctionReturn,
826 "Number of function with unique return");
828 // Callback to replace the uses of CB with the constant C.
829 auto ReplaceCallSiteUsersWith = [](CallBase &CB, Constant &C) {
830 if (CB.getNumUses() == 0)
831 return ChangeStatus::UNCHANGED;
832 CB.replaceAllUsesWith(&C);
833 return ChangeStatus::CHANGED;
836 // If the assumed unique return value is an argument, annotate it.
837 if (auto *UniqueRVArg = dyn_cast<Argument>(UniqueRV.getValue())) {
838 getIRPosition() = IRPosition::argument(*UniqueRVArg);
839 Changed = IRAttribute::manifest(A);
840 } else if (auto *RVC = dyn_cast<Constant>(UniqueRV.getValue())) {
841 // We can replace the returned value with the unique returned constant.
842 Value &AnchorValue = getAnchorValue();
843 if (Function *F = dyn_cast<Function>(&AnchorValue)) {
844 for (const Use &U : F->uses())
845 if (CallBase *CB = dyn_cast<CallBase>(U.getUser()))
846 if (CB->isCallee(&U))
847 Changed = ReplaceCallSiteUsersWith(*CB, *RVC) | Changed;
848 } else {
849 assert(isa<CallBase>(AnchorValue) &&
850 "Expcected a function or call base anchor!");
851 Changed = ReplaceCallSiteUsersWith(cast<CallBase>(AnchorValue), *RVC);
853 if (Changed == ChangeStatus::CHANGED)
854 STATS_DECLTRACK(UniqueConstantReturnValue, FunctionReturn,
855 "Number of function returns replaced by constant return");
858 return Changed;
861 const std::string AAReturnedValuesImpl::getAsStr() const {
862 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
863 (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
864 ")[#UC: " + std::to_string(UnresolvedCalls.size()) + "]";
867 Optional<Value *>
868 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor &A) const {
869 // If checkForAllReturnedValues provides a unique value, ignoring potential
870 // undef values that can also be present, it is assumed to be the actual
871 // return value and forwarded to the caller of this method. If there are
872 // multiple, a nullptr is returned indicating there cannot be a unique
873 // returned value.
874 Optional<Value *> UniqueRV;
876 auto Pred = [&](Value &RV) -> bool {
877 // If we found a second returned value and neither the current nor the saved
878 // one is an undef, there is no unique returned value. Undefs are special
879 // since we can pretend they have any value.
880 if (UniqueRV.hasValue() && UniqueRV != &RV &&
881 !(isa<UndefValue>(RV) || isa<UndefValue>(UniqueRV.getValue()))) {
882 UniqueRV = nullptr;
883 return false;
886 // Do not overwrite a value with an undef.
887 if (!UniqueRV.hasValue() || !isa<UndefValue>(RV))
888 UniqueRV = &RV;
890 return true;
893 if (!A.checkForAllReturnedValues(Pred, *this))
894 UniqueRV = nullptr;
896 return UniqueRV;
899 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
900 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
901 &Pred) const {
902 if (!isValidState())
903 return false;
905 // Check all returned values but ignore call sites as long as we have not
906 // encountered an overdefined one during an update.
907 for (auto &It : ReturnedValues) {
908 Value *RV = It.first;
910 CallBase *CB = dyn_cast<CallBase>(RV);
911 if (CB && !UnresolvedCalls.count(CB))
912 continue;
914 if (!Pred(*RV, It.second))
915 return false;
918 return true;
921 ChangeStatus AAReturnedValuesImpl::updateImpl(Attributor &A) {
922 size_t NumUnresolvedCalls = UnresolvedCalls.size();
923 bool Changed = false;
925 // State used in the value traversals starting in returned values.
926 struct RVState {
927 // The map in which we collect return values -> return instrs.
928 decltype(ReturnedValues) &RetValsMap;
929 // The flag to indicate a change.
930 bool &Changed;
931 // The return instrs we come from.
932 SmallSetVector<ReturnInst *, 4> RetInsts;
935 // Callback for a leaf value returned by the associated function.
936 auto VisitValueCB = [](Value &Val, RVState &RVS, bool) -> bool {
937 auto Size = RVS.RetValsMap[&Val].size();
938 RVS.RetValsMap[&Val].insert(RVS.RetInsts.begin(), RVS.RetInsts.end());
939 bool Inserted = RVS.RetValsMap[&Val].size() != Size;
940 RVS.Changed |= Inserted;
941 LLVM_DEBUG({
942 if (Inserted)
943 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
944 << " => " << RVS.RetInsts.size() << "\n";
946 return true;
949 // Helper method to invoke the generic value traversal.
950 auto VisitReturnedValue = [&](Value &RV, RVState &RVS) {
951 IRPosition RetValPos = IRPosition::value(RV);
952 return genericValueTraversal<AAReturnedValues, RVState>(A, RetValPos, *this,
953 RVS, VisitValueCB);
956 // Callback for all "return intructions" live in the associated function.
957 auto CheckReturnInst = [this, &VisitReturnedValue, &Changed](Instruction &I) {
958 ReturnInst &Ret = cast<ReturnInst>(I);
959 RVState RVS({ReturnedValues, Changed, {}});
960 RVS.RetInsts.insert(&Ret);
961 return VisitReturnedValue(*Ret.getReturnValue(), RVS);
964 // Start by discovering returned values from all live returned instructions in
965 // the associated function.
966 if (!A.checkForAllInstructions(CheckReturnInst, *this, {Instruction::Ret}))
967 return indicatePessimisticFixpoint();
969 // Once returned values "directly" present in the code are handled we try to
970 // resolve returned calls.
971 decltype(ReturnedValues) NewRVsMap;
972 for (auto &It : ReturnedValues) {
973 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It.first
974 << " by #" << It.second.size() << " RIs\n");
975 CallBase *CB = dyn_cast<CallBase>(It.first);
976 if (!CB || UnresolvedCalls.count(CB))
977 continue;
979 if (!CB->getCalledFunction()) {
980 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
981 << "\n");
982 UnresolvedCalls.insert(CB);
983 continue;
986 // TODO: use the function scope once we have call site AAReturnedValues.
987 const auto &RetValAA = A.getAAFor<AAReturnedValues>(
988 *this, IRPosition::function(*CB->getCalledFunction()));
989 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
990 << static_cast<const AbstractAttribute &>(RetValAA)
991 << "\n");
993 // Skip dead ends, thus if we do not know anything about the returned
994 // call we mark it as unresolved and it will stay that way.
995 if (!RetValAA.getState().isValidState()) {
996 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
997 << "\n");
998 UnresolvedCalls.insert(CB);
999 continue;
1002 // Do not try to learn partial information. If the callee has unresolved
1003 // return values we will treat the call as unresolved/opaque.
1004 auto &RetValAAUnresolvedCalls = RetValAA.getUnresolvedCalls();
1005 if (!RetValAAUnresolvedCalls.empty()) {
1006 UnresolvedCalls.insert(CB);
1007 continue;
1010 // Now check if we can track transitively returned values. If possible, thus
1011 // if all return value can be represented in the current scope, do so.
1012 bool Unresolved = false;
1013 for (auto &RetValAAIt : RetValAA.returned_values()) {
1014 Value *RetVal = RetValAAIt.first;
1015 if (isa<Argument>(RetVal) || isa<CallBase>(RetVal) ||
1016 isa<Constant>(RetVal))
1017 continue;
1018 // Anything that did not fit in the above categories cannot be resolved,
1019 // mark the call as unresolved.
1020 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1021 "cannot be translated: "
1022 << *RetVal << "\n");
1023 UnresolvedCalls.insert(CB);
1024 Unresolved = true;
1025 break;
1028 if (Unresolved)
1029 continue;
1031 // Now track transitively returned values.
1032 unsigned &NumRetAA = NumReturnedValuesPerKnownAA[CB];
1033 if (NumRetAA == RetValAA.getNumReturnValues()) {
1034 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1035 "changed since it was seen last\n");
1036 continue;
1038 NumRetAA = RetValAA.getNumReturnValues();
1040 for (auto &RetValAAIt : RetValAA.returned_values()) {
1041 Value *RetVal = RetValAAIt.first;
1042 if (Argument *Arg = dyn_cast<Argument>(RetVal)) {
1043 // Arguments are mapped to call site operands and we begin the traversal
1044 // again.
1045 bool Unused = false;
1046 RVState RVS({NewRVsMap, Unused, RetValAAIt.second});
1047 VisitReturnedValue(*CB->getArgOperand(Arg->getArgNo()), RVS);
1048 continue;
1049 } else if (isa<CallBase>(RetVal)) {
1050 // Call sites are resolved by the callee attribute over time, no need to
1051 // do anything for us.
1052 continue;
1053 } else if (isa<Constant>(RetVal)) {
1054 // Constants are valid everywhere, we can simply take them.
1055 NewRVsMap[RetVal].insert(It.second.begin(), It.second.end());
1056 continue;
1061 // To avoid modifications to the ReturnedValues map while we iterate over it
1062 // we kept record of potential new entries in a copy map, NewRVsMap.
1063 for (auto &It : NewRVsMap) {
1064 assert(!It.second.empty() && "Entry does not add anything.");
1065 auto &ReturnInsts = ReturnedValues[It.first];
1066 for (ReturnInst *RI : It.second)
1067 if (ReturnInsts.insert(RI)) {
1068 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1069 << *It.first << " => " << *RI << "\n");
1070 Changed = true;
1074 Changed |= (NumUnresolvedCalls != UnresolvedCalls.size());
1075 return Changed ? ChangeStatus::CHANGED : ChangeStatus::UNCHANGED;
1078 struct AAReturnedValuesFunction final : public AAReturnedValuesImpl {
1079 AAReturnedValuesFunction(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1081 /// See AbstractAttribute::trackStatistics()
1082 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(returned) }
1085 /// Returned values information for a call sites.
1086 struct AAReturnedValuesCallSite final : AAReturnedValuesImpl {
1087 AAReturnedValuesCallSite(const IRPosition &IRP) : AAReturnedValuesImpl(IRP) {}
1089 /// See AbstractAttribute::initialize(...).
1090 void initialize(Attributor &A) override {
1091 // TODO: Once we have call site specific value information we can provide
1092 // call site specific liveness liveness information and then it makes
1093 // sense to specialize attributes for call sites instead of
1094 // redirecting requests to the callee.
1095 llvm_unreachable("Abstract attributes for returned values are not "
1096 "supported for call sites yet!");
1099 /// See AbstractAttribute::updateImpl(...).
1100 ChangeStatus updateImpl(Attributor &A) override {
1101 return indicatePessimisticFixpoint();
1104 /// See AbstractAttribute::trackStatistics()
1105 void trackStatistics() const override {}
1108 /// ------------------------ NoSync Function Attribute -------------------------
1110 struct AANoSyncImpl : AANoSync {
1111 AANoSyncImpl(const IRPosition &IRP) : AANoSync(IRP) {}
1113 /// See AbstractAttribute::initialize(...).
1114 void initialize(Attributor &A) override {
1115 if (hasAttr({Attribute::NoSync}))
1116 indicateOptimisticFixpoint();
1119 const std::string getAsStr() const override {
1120 return getAssumed() ? "nosync" : "may-sync";
1123 /// See AbstractAttribute::updateImpl(...).
1124 ChangeStatus updateImpl(Attributor &A) override;
1126 /// Helper function used to determine whether an instruction is non-relaxed
1127 /// atomic. In other words, if an atomic instruction does not have unordered
1128 /// or monotonic ordering
1129 static bool isNonRelaxedAtomic(Instruction *I);
1131 /// Helper function used to determine whether an instruction is volatile.
1132 static bool isVolatile(Instruction *I);
1134 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1135 /// memset).
1136 static bool isNoSyncIntrinsic(Instruction *I);
1139 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction *I) {
1140 if (!I->isAtomic())
1141 return false;
1143 AtomicOrdering Ordering;
1144 switch (I->getOpcode()) {
1145 case Instruction::AtomicRMW:
1146 Ordering = cast<AtomicRMWInst>(I)->getOrdering();
1147 break;
1148 case Instruction::Store:
1149 Ordering = cast<StoreInst>(I)->getOrdering();
1150 break;
1151 case Instruction::Load:
1152 Ordering = cast<LoadInst>(I)->getOrdering();
1153 break;
1154 case Instruction::Fence: {
1155 auto *FI = cast<FenceInst>(I);
1156 if (FI->getSyncScopeID() == SyncScope::SingleThread)
1157 return false;
1158 Ordering = FI->getOrdering();
1159 break;
1161 case Instruction::AtomicCmpXchg: {
1162 AtomicOrdering Success = cast<AtomicCmpXchgInst>(I)->getSuccessOrdering();
1163 AtomicOrdering Failure = cast<AtomicCmpXchgInst>(I)->getFailureOrdering();
1164 // Only if both are relaxed, than it can be treated as relaxed.
1165 // Otherwise it is non-relaxed.
1166 if (Success != AtomicOrdering::Unordered &&
1167 Success != AtomicOrdering::Monotonic)
1168 return true;
1169 if (Failure != AtomicOrdering::Unordered &&
1170 Failure != AtomicOrdering::Monotonic)
1171 return true;
1172 return false;
1174 default:
1175 llvm_unreachable(
1176 "New atomic operations need to be known in the attributor.");
1179 // Relaxed.
1180 if (Ordering == AtomicOrdering::Unordered ||
1181 Ordering == AtomicOrdering::Monotonic)
1182 return false;
1183 return true;
1186 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1187 /// FIXME: We should ipmrove the handling of intrinsics.
1188 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction *I) {
1189 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1190 switch (II->getIntrinsicID()) {
1191 /// Element wise atomic memory intrinsics are can only be unordered,
1192 /// therefore nosync.
1193 case Intrinsic::memset_element_unordered_atomic:
1194 case Intrinsic::memmove_element_unordered_atomic:
1195 case Intrinsic::memcpy_element_unordered_atomic:
1196 return true;
1197 case Intrinsic::memset:
1198 case Intrinsic::memmove:
1199 case Intrinsic::memcpy:
1200 if (!cast<MemIntrinsic>(II)->isVolatile())
1201 return true;
1202 return false;
1203 default:
1204 return false;
1207 return false;
1210 bool AANoSyncImpl::isVolatile(Instruction *I) {
1211 assert(!ImmutableCallSite(I) && !isa<CallBase>(I) &&
1212 "Calls should not be checked here");
1214 switch (I->getOpcode()) {
1215 case Instruction::AtomicRMW:
1216 return cast<AtomicRMWInst>(I)->isVolatile();
1217 case Instruction::Store:
1218 return cast<StoreInst>(I)->isVolatile();
1219 case Instruction::Load:
1220 return cast<LoadInst>(I)->isVolatile();
1221 case Instruction::AtomicCmpXchg:
1222 return cast<AtomicCmpXchgInst>(I)->isVolatile();
1223 default:
1224 return false;
1228 ChangeStatus AANoSyncImpl::updateImpl(Attributor &A) {
1230 auto CheckRWInstForNoSync = [&](Instruction &I) {
1231 /// We are looking for volatile instructions or Non-Relaxed atomics.
1232 /// FIXME: We should ipmrove the handling of intrinsics.
1234 if (isa<IntrinsicInst>(&I) && isNoSyncIntrinsic(&I))
1235 return true;
1237 if (ImmutableCallSite ICS = ImmutableCallSite(&I)) {
1238 if (ICS.hasFnAttr(Attribute::NoSync))
1239 return true;
1241 const auto &NoSyncAA =
1242 A.getAAFor<AANoSync>(*this, IRPosition::callsite_function(ICS));
1243 if (NoSyncAA.isAssumedNoSync())
1244 return true;
1245 return false;
1248 if (!isVolatile(&I) && !isNonRelaxedAtomic(&I))
1249 return true;
1251 return false;
1254 auto CheckForNoSync = [&](Instruction &I) {
1255 // At this point we handled all read/write effects and they are all
1256 // nosync, so they can be skipped.
1257 if (I.mayReadOrWriteMemory())
1258 return true;
1260 // non-convergent and readnone imply nosync.
1261 return !ImmutableCallSite(&I).isConvergent();
1264 if (!A.checkForAllReadWriteInstructions(CheckRWInstForNoSync, *this) ||
1265 !A.checkForAllCallLikeInstructions(CheckForNoSync, *this))
1266 return indicatePessimisticFixpoint();
1268 return ChangeStatus::UNCHANGED;
1271 struct AANoSyncFunction final : public AANoSyncImpl {
1272 AANoSyncFunction(const IRPosition &IRP) : AANoSyncImpl(IRP) {}
1274 /// See AbstractAttribute::trackStatistics()
1275 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nosync) }
1278 /// NoSync attribute deduction for a call sites.
1279 using AANoSyncCallSite = AANoSyncFunction;
1281 /// ------------------------ No-Free Attributes ----------------------------
1283 struct AANoFreeImpl : public AANoFree {
1284 AANoFreeImpl(const IRPosition &IRP) : AANoFree(IRP) {}
1286 /// See AbstractAttribute::initialize(...).
1287 void initialize(Attributor &A) override {
1288 if (hasAttr({Attribute::NoFree}))
1289 indicateOptimisticFixpoint();
1292 /// See AbstractAttribute::updateImpl(...).
1293 ChangeStatus updateImpl(Attributor &A) override {
1294 auto CheckForNoFree = [&](Instruction &I) {
1295 ImmutableCallSite ICS(&I);
1296 if (ICS.hasFnAttr(Attribute::NoFree))
1297 return true;
1299 const auto &NoFreeAA =
1300 A.getAAFor<AANoFree>(*this, IRPosition::callsite_function(ICS));
1301 return NoFreeAA.isAssumedNoFree();
1304 if (!A.checkForAllCallLikeInstructions(CheckForNoFree, *this))
1305 return indicatePessimisticFixpoint();
1306 return ChangeStatus::UNCHANGED;
1309 /// See AbstractAttribute::getAsStr().
1310 const std::string getAsStr() const override {
1311 return getAssumed() ? "nofree" : "may-free";
1315 struct AANoFreeFunction final : public AANoFreeImpl {
1316 AANoFreeFunction(const IRPosition &IRP) : AANoFreeImpl(IRP) {}
1318 /// See AbstractAttribute::trackStatistics()
1319 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(nofree) }
1322 /// NoFree attribute deduction for a call sites.
1323 using AANoFreeCallSite = AANoFreeFunction;
1325 /// ------------------------ NonNull Argument Attribute ------------------------
1326 struct AANonNullImpl : AANonNull {
1327 AANonNullImpl(const IRPosition &IRP) : AANonNull(IRP) {}
1329 /// See AbstractAttribute::initialize(...).
1330 void initialize(Attributor &A) override {
1331 if (hasAttr({Attribute::NonNull, Attribute::Dereferenceable}))
1332 indicateOptimisticFixpoint();
1335 /// See AbstractAttribute::getAsStr().
1336 const std::string getAsStr() const override {
1337 return getAssumed() ? "nonnull" : "may-null";
1341 /// NonNull attribute for a floating value.
1342 struct AANonNullFloating : AANonNullImpl {
1343 AANonNullFloating(const IRPosition &IRP) : AANonNullImpl(IRP) {}
1345 /// See AbstractAttribute::initialize(...).
1346 void initialize(Attributor &A) override {
1347 AANonNullImpl::initialize(A);
1349 if (isAtFixpoint())
1350 return;
1352 const IRPosition &IRP = getIRPosition();
1353 const Value &V = IRP.getAssociatedValue();
1354 const DataLayout &DL = A.getDataLayout();
1356 // TODO: This context sensitive query should be removed once we can do
1357 // context sensitive queries in the genericValueTraversal below.
1358 if (isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr, IRP.getCtxI(),
1359 /* TODO: DT */ nullptr))
1360 indicateOptimisticFixpoint();
1363 /// See AbstractAttribute::updateImpl(...).
1364 ChangeStatus updateImpl(Attributor &A) override {
1365 const DataLayout &DL = A.getDataLayout();
1367 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
1368 bool Stripped) -> bool {
1369 const auto &AA = A.getAAFor<AANonNull>(*this, IRPosition::value(V));
1370 if (!Stripped && this == &AA) {
1371 if (!isKnownNonZero(&V, DL, 0, /* TODO: AC */ nullptr,
1372 /* TODO: CtxI */ nullptr,
1373 /* TODO: DT */ nullptr))
1374 T.indicatePessimisticFixpoint();
1375 } else {
1376 // Use abstract attribute information.
1377 const AANonNull::StateType &NS =
1378 static_cast<const AANonNull::StateType &>(AA.getState());
1379 T ^= NS;
1381 return T.isValidState();
1384 StateType T;
1385 if (!genericValueTraversal<AANonNull, StateType>(A, getIRPosition(), *this,
1386 T, VisitValueCB))
1387 return indicatePessimisticFixpoint();
1389 return clampStateAndIndicateChange(getState(), T);
1392 /// See AbstractAttribute::trackStatistics()
1393 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1396 /// NonNull attribute for function return value.
1397 struct AANonNullReturned final
1398 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl> {
1399 AANonNullReturned(const IRPosition &IRP)
1400 : AAReturnedFromReturnedValues<AANonNull, AANonNullImpl>(IRP) {}
1402 /// See AbstractAttribute::trackStatistics()
1403 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(nonnull) }
1406 /// NonNull attribute for function argument.
1407 struct AANonNullArgument final
1408 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl> {
1409 AANonNullArgument(const IRPosition &IRP)
1410 : AAArgumentFromCallSiteArguments<AANonNull, AANonNullImpl>(IRP) {}
1412 /// See AbstractAttribute::trackStatistics()
1413 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(nonnull) }
1416 struct AANonNullCallSiteArgument final : AANonNullFloating {
1417 AANonNullCallSiteArgument(const IRPosition &IRP) : AANonNullFloating(IRP) {}
1419 /// See AbstractAttribute::trackStatistics()
1420 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(nonnul) }
1423 /// NonNull attribute for a call site return position.
1424 struct AANonNullCallSiteReturned final
1425 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl> {
1426 AANonNullCallSiteReturned(const IRPosition &IRP)
1427 : AACallSiteReturnedFromReturned<AANonNull, AANonNullImpl>(IRP) {}
1429 /// See AbstractAttribute::trackStatistics()
1430 void trackStatistics() const override { STATS_DECLTRACK_CSRET_ATTR(nonnull) }
1433 /// ------------------------ No-Recurse Attributes ----------------------------
1435 struct AANoRecurseImpl : public AANoRecurse {
1436 AANoRecurseImpl(const IRPosition &IRP) : AANoRecurse(IRP) {}
1438 /// See AbstractAttribute::initialize(...).
1439 void initialize(Attributor &A) override {
1440 if (hasAttr({getAttrKind()})) {
1441 indicateOptimisticFixpoint();
1442 return;
1446 /// See AbstractAttribute::getAsStr()
1447 const std::string getAsStr() const override {
1448 return getAssumed() ? "norecurse" : "may-recurse";
1452 struct AANoRecurseFunction final : AANoRecurseImpl {
1453 AANoRecurseFunction(const IRPosition &IRP) : AANoRecurseImpl(IRP) {}
1455 /// See AbstractAttribute::updateImpl(...).
1456 ChangeStatus updateImpl(Attributor &A) override {
1457 // TODO: Implement this.
1458 return indicatePessimisticFixpoint();
1461 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(norecurse) }
1464 using AANoRecurseCallSite = AANoRecurseFunction;
1466 /// ------------------------ Will-Return Attributes ----------------------------
1468 // Helper function that checks whether a function has any cycle.
1469 // TODO: Replace with more efficent code
1470 static bool containsCycle(Function &F) {
1471 SmallPtrSet<BasicBlock *, 32> Visited;
1473 // Traverse BB by dfs and check whether successor is already visited.
1474 for (BasicBlock *BB : depth_first(&F)) {
1475 Visited.insert(BB);
1476 for (auto *SuccBB : successors(BB)) {
1477 if (Visited.count(SuccBB))
1478 return true;
1481 return false;
1484 // Helper function that checks the function have a loop which might become an
1485 // endless loop
1486 // FIXME: Any cycle is regarded as endless loop for now.
1487 // We have to allow some patterns.
1488 static bool containsPossiblyEndlessLoop(Function *F) {
1489 return !F || !F->hasExactDefinition() || containsCycle(*F);
1492 struct AAWillReturnImpl : public AAWillReturn {
1493 AAWillReturnImpl(const IRPosition &IRP) : AAWillReturn(IRP) {}
1495 /// See AbstractAttribute::initialize(...).
1496 void initialize(Attributor &A) override {
1497 if (hasAttr({Attribute::WillReturn})) {
1498 indicateOptimisticFixpoint();
1499 return;
1502 Function *F = getAssociatedFunction();
1503 if (containsPossiblyEndlessLoop(F))
1504 indicatePessimisticFixpoint();
1507 /// See AbstractAttribute::updateImpl(...).
1508 ChangeStatus updateImpl(Attributor &A) override {
1509 auto CheckForWillReturn = [&](Instruction &I) {
1510 IRPosition IPos = IRPosition::callsite_function(ImmutableCallSite(&I));
1511 const auto &WillReturnAA = A.getAAFor<AAWillReturn>(*this, IPos);
1512 if (WillReturnAA.isKnownWillReturn())
1513 return true;
1514 if (!WillReturnAA.isAssumedWillReturn())
1515 return false;
1516 const auto &NoRecurseAA = A.getAAFor<AANoRecurse>(*this, IPos);
1517 return NoRecurseAA.isAssumedNoRecurse();
1520 if (!A.checkForAllCallLikeInstructions(CheckForWillReturn, *this))
1521 return indicatePessimisticFixpoint();
1523 return ChangeStatus::UNCHANGED;
1526 /// See AbstractAttribute::getAsStr()
1527 const std::string getAsStr() const override {
1528 return getAssumed() ? "willreturn" : "may-noreturn";
1532 struct AAWillReturnFunction final : AAWillReturnImpl {
1533 AAWillReturnFunction(const IRPosition &IRP) : AAWillReturnImpl(IRP) {}
1535 /// See AbstractAttribute::trackStatistics()
1536 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(willreturn) }
1539 /// WillReturn attribute deduction for a call sites.
1540 using AAWillReturnCallSite = AAWillReturnFunction;
1542 /// ------------------------ NoAlias Argument Attribute ------------------------
1544 struct AANoAliasImpl : AANoAlias {
1545 AANoAliasImpl(const IRPosition &IRP) : AANoAlias(IRP) {}
1547 /// See AbstractAttribute::initialize(...).
1548 void initialize(Attributor &A) override {
1549 if (hasAttr({Attribute::NoAlias}))
1550 indicateOptimisticFixpoint();
1553 const std::string getAsStr() const override {
1554 return getAssumed() ? "noalias" : "may-alias";
1558 /// NoAlias attribute for a floating value.
1559 struct AANoAliasFloating final : AANoAliasImpl {
1560 AANoAliasFloating(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1562 /// See AbstractAttribute::updateImpl(...).
1563 ChangeStatus updateImpl(Attributor &A) override {
1564 // TODO: Implement this.
1565 return indicatePessimisticFixpoint();
1568 /// See AbstractAttribute::trackStatistics()
1569 void trackStatistics() const override {
1570 STATS_DECLTRACK_FLOATING_ATTR(noalias)
1574 /// NoAlias attribute for an argument.
1575 struct AANoAliasArgument final : AANoAliasImpl {
1576 AANoAliasArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1578 /// See AbstractAttribute::updateImpl(...).
1579 ChangeStatus updateImpl(Attributor &A) override {
1580 // TODO: Implement this.
1581 return indicatePessimisticFixpoint();
1584 /// See AbstractAttribute::trackStatistics()
1585 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1588 struct AANoAliasCallSiteArgument final : AANoAliasImpl {
1589 AANoAliasCallSiteArgument(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1591 /// See AbstractAttribute::updateImpl(...).
1592 ChangeStatus updateImpl(Attributor &A) override {
1593 // TODO: Implement this.
1594 return indicatePessimisticFixpoint();
1597 /// See AbstractAttribute::trackStatistics()
1598 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(noalias) }
1601 /// NoAlias attribute for function return value.
1602 struct AANoAliasReturned final : AANoAliasImpl {
1603 AANoAliasReturned(const IRPosition &IRP) : AANoAliasImpl(IRP) {}
1605 /// See AbstractAttribute::updateImpl(...).
1606 virtual ChangeStatus updateImpl(Attributor &A) override {
1608 auto CheckReturnValue = [&](Value &RV) -> bool {
1609 if (Constant *C = dyn_cast<Constant>(&RV))
1610 if (C->isNullValue() || isa<UndefValue>(C))
1611 return true;
1613 /// For now, we can only deduce noalias if we have call sites.
1614 /// FIXME: add more support.
1615 ImmutableCallSite ICS(&RV);
1616 if (!ICS)
1617 return false;
1619 const auto &NoAliasAA =
1620 A.getAAFor<AANoAlias>(*this, IRPosition::callsite_returned(ICS));
1621 if (!NoAliasAA.isAssumedNoAlias())
1622 return false;
1624 /// FIXME: We can improve capture check in two ways:
1625 /// 1. Use the AANoCapture facilities.
1626 /// 2. Use the location of return insts for escape queries.
1627 if (PointerMayBeCaptured(&RV, /* ReturnCaptures */ false,
1628 /* StoreCaptures */ true))
1629 return false;
1631 return true;
1634 if (!A.checkForAllReturnedValues(CheckReturnValue, *this))
1635 return indicatePessimisticFixpoint();
1637 return ChangeStatus::UNCHANGED;
1640 /// See AbstractAttribute::trackStatistics()
1641 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(noalias) }
1644 /// NoAlias attribute deduction for a call site return value.
1645 using AANoAliasCallSiteReturned = AANoAliasReturned;
1647 /// -------------------AAIsDead Function Attribute-----------------------
1649 struct AAIsDeadImpl : public AAIsDead {
1650 AAIsDeadImpl(const IRPosition &IRP) : AAIsDead(IRP) {}
1652 void initialize(Attributor &A) override {
1653 const Function *F = getAssociatedFunction();
1655 if (F->hasInternalLinkage())
1656 return;
1658 if (!F || !F->hasExactDefinition()) {
1659 indicatePessimisticFixpoint();
1660 return;
1663 exploreFromEntry(A, F);
1666 void exploreFromEntry(Attributor &A, const Function *F) {
1667 ToBeExploredPaths.insert(&(F->getEntryBlock().front()));
1668 AssumedLiveBlocks.insert(&(F->getEntryBlock()));
1670 for (size_t i = 0; i < ToBeExploredPaths.size(); ++i)
1671 if (const Instruction *NextNoReturnI =
1672 findNextNoReturn(A, ToBeExploredPaths[i]))
1673 NoReturnCalls.insert(NextNoReturnI);
1676 /// Find the next assumed noreturn instruction in the block of \p I starting
1677 /// from, thus including, \p I.
1679 /// The caller is responsible to monitor the ToBeExploredPaths set as new
1680 /// instructions discovered in other basic block will be placed in there.
1682 /// \returns The next assumed noreturn instructions in the block of \p I
1683 /// starting from, thus including, \p I.
1684 const Instruction *findNextNoReturn(Attributor &A, const Instruction *I);
1686 /// See AbstractAttribute::getAsStr().
1687 const std::string getAsStr() const override {
1688 return "Live[#BB " + std::to_string(AssumedLiveBlocks.size()) + "/" +
1689 std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
1690 std::to_string(NoReturnCalls.size()) + "]";
1693 /// See AbstractAttribute::manifest(...).
1694 ChangeStatus manifest(Attributor &A) override {
1695 assert(getState().isValidState() &&
1696 "Attempted to manifest an invalid state!");
1698 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1699 Function &F = *getAssociatedFunction();
1701 if (AssumedLiveBlocks.empty()) {
1702 F.replaceAllUsesWith(UndefValue::get(F.getType()));
1703 return ChangeStatus::CHANGED;
1706 // Flag to determine if we can change an invoke to a call assuming the
1707 // callee is nounwind. This is not possible if the personality of the
1708 // function allows to catch asynchronous exceptions.
1709 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
1711 for (const Instruction *NRC : NoReturnCalls) {
1712 Instruction *I = const_cast<Instruction *>(NRC);
1713 BasicBlock *BB = I->getParent();
1714 Instruction *SplitPos = I->getNextNode();
1715 // TODO: mark stuff before unreachable instructions as dead.
1716 if (isa_and_nonnull<UnreachableInst>(SplitPos))
1717 continue;
1719 if (auto *II = dyn_cast<InvokeInst>(I)) {
1720 // If we keep the invoke the split position is at the beginning of the
1721 // normal desitination block (it invokes a noreturn function after all).
1722 BasicBlock *NormalDestBB = II->getNormalDest();
1723 SplitPos = &NormalDestBB->front();
1725 /// Invoke is replaced with a call and unreachable is placed after it if
1726 /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
1727 /// and only place an unreachable in the normal successor.
1728 if (Invoke2CallAllowed) {
1729 if (II->getCalledFunction()) {
1730 const IRPosition &IPos = IRPosition::callsite_function(*II);
1731 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
1732 if (AANoUnw.isAssumedNoUnwind()) {
1733 LLVM_DEBUG(dbgs()
1734 << "[AAIsDead] Replace invoke with call inst\n");
1735 // We do not need an invoke (II) but instead want a call followed
1736 // by an unreachable. However, we do not remove II as other
1737 // abstract attributes might have it cached as part of their
1738 // results. Given that we modify the CFG anyway, we simply keep II
1739 // around but in a new dead block. To avoid II being live through
1740 // a different edge we have to ensure the block we place it in is
1741 // only reached from the current block of II and then not reached
1742 // at all when we insert the unreachable.
1743 SplitBlockPredecessors(NormalDestBB, {BB}, ".i2c");
1744 CallInst *CI = createCallMatchingInvoke(II);
1745 CI->insertBefore(II);
1746 CI->takeName(II);
1747 II->replaceAllUsesWith(CI);
1748 SplitPos = CI->getNextNode();
1754 BB = SplitPos->getParent();
1755 SplitBlock(BB, SplitPos);
1756 changeToUnreachable(BB->getTerminator(), /* UseLLVMTrap */ false);
1757 HasChanged = ChangeStatus::CHANGED;
1760 return HasChanged;
1763 /// See AbstractAttribute::updateImpl(...).
1764 ChangeStatus updateImpl(Attributor &A) override;
1766 /// See AAIsDead::isAssumedDead(BasicBlock *).
1767 bool isAssumedDead(const BasicBlock *BB) const override {
1768 assert(BB->getParent() == getAssociatedFunction() &&
1769 "BB must be in the same anchor scope function.");
1771 if (!getAssumed())
1772 return false;
1773 return !AssumedLiveBlocks.count(BB);
1776 /// See AAIsDead::isKnownDead(BasicBlock *).
1777 bool isKnownDead(const BasicBlock *BB) const override {
1778 return getKnown() && isAssumedDead(BB);
1781 /// See AAIsDead::isAssumed(Instruction *I).
1782 bool isAssumedDead(const Instruction *I) const override {
1783 assert(I->getParent()->getParent() == getAssociatedFunction() &&
1784 "Instruction must be in the same anchor scope function.");
1786 if (!getAssumed())
1787 return false;
1789 // If it is not in AssumedLiveBlocks then it for sure dead.
1790 // Otherwise, it can still be after noreturn call in a live block.
1791 if (!AssumedLiveBlocks.count(I->getParent()))
1792 return true;
1794 // If it is not after a noreturn call, than it is live.
1795 return isAfterNoReturn(I);
1798 /// See AAIsDead::isKnownDead(Instruction *I).
1799 bool isKnownDead(const Instruction *I) const override {
1800 return getKnown() && isAssumedDead(I);
1803 /// Check if instruction is after noreturn call, in other words, assumed dead.
1804 bool isAfterNoReturn(const Instruction *I) const;
1806 /// Determine if \p F might catch asynchronous exceptions.
1807 static bool mayCatchAsynchronousExceptions(const Function &F) {
1808 return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F);
1811 /// Collection of to be explored paths.
1812 SmallSetVector<const Instruction *, 8> ToBeExploredPaths;
1814 /// Collection of all assumed live BasicBlocks.
1815 DenseSet<const BasicBlock *> AssumedLiveBlocks;
1817 /// Collection of calls with noreturn attribute, assumed or knwon.
1818 SmallSetVector<const Instruction *, 4> NoReturnCalls;
1821 struct AAIsDeadFunction final : public AAIsDeadImpl {
1822 AAIsDeadFunction(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
1824 /// See AbstractAttribute::trackStatistics()
1825 void trackStatistics() const override {
1826 STATS_DECL(DeadInternalFunction, Function,
1827 "Number of internal functions classified as dead (no live callsite)");
1828 BUILD_STAT_NAME(DeadInternalFunction, Function) +=
1829 (getAssociatedFunction()->hasInternalLinkage() &&
1830 AssumedLiveBlocks.empty())
1832 : 0;
1833 STATS_DECL(DeadBlocks, Function,
1834 "Number of basic blocks classified as dead");
1835 BUILD_STAT_NAME(DeadBlocks, Function) +=
1836 getAssociatedFunction()->size() - AssumedLiveBlocks.size();
1837 STATS_DECL(PartiallyDeadBlocks, Function,
1838 "Number of basic blocks classified as partially dead");
1839 BUILD_STAT_NAME(PartiallyDeadBlocks, Function) += NoReturnCalls.size();
1843 bool AAIsDeadImpl::isAfterNoReturn(const Instruction *I) const {
1844 const Instruction *PrevI = I->getPrevNode();
1845 while (PrevI) {
1846 if (NoReturnCalls.count(PrevI))
1847 return true;
1848 PrevI = PrevI->getPrevNode();
1850 return false;
1853 const Instruction *AAIsDeadImpl::findNextNoReturn(Attributor &A,
1854 const Instruction *I) {
1855 const BasicBlock *BB = I->getParent();
1856 const Function &F = *BB->getParent();
1858 // Flag to determine if we can change an invoke to a call assuming the callee
1859 // is nounwind. This is not possible if the personality of the function allows
1860 // to catch asynchronous exceptions.
1861 bool Invoke2CallAllowed = !mayCatchAsynchronousExceptions(F);
1863 // TODO: We should have a function that determines if an "edge" is dead.
1864 // Edges could be from an instruction to the next or from a terminator
1865 // to the successor. For now, we need to special case the unwind block
1866 // of InvokeInst below.
1868 while (I) {
1869 ImmutableCallSite ICS(I);
1871 if (ICS) {
1872 const IRPosition &IPos = IRPosition::callsite_function(ICS);
1873 // Regarless of the no-return property of an invoke instruction we only
1874 // learn that the regular successor is not reachable through this
1875 // instruction but the unwind block might still be.
1876 if (auto *Invoke = dyn_cast<InvokeInst>(I)) {
1877 // Use nounwind to justify the unwind block is dead as well.
1878 const auto &AANoUnw = A.getAAFor<AANoUnwind>(*this, IPos);
1879 if (!Invoke2CallAllowed || !AANoUnw.isAssumedNoUnwind()) {
1880 AssumedLiveBlocks.insert(Invoke->getUnwindDest());
1881 ToBeExploredPaths.insert(&Invoke->getUnwindDest()->front());
1885 const auto &NoReturnAA = A.getAAFor<AANoReturn>(*this, IPos);
1886 if (NoReturnAA.isAssumedNoReturn())
1887 return I;
1890 I = I->getNextNode();
1893 // get new paths (reachable blocks).
1894 for (const BasicBlock *SuccBB : successors(BB)) {
1895 AssumedLiveBlocks.insert(SuccBB);
1896 ToBeExploredPaths.insert(&SuccBB->front());
1899 // No noreturn instruction found.
1900 return nullptr;
1903 ChangeStatus AAIsDeadImpl::updateImpl(Attributor &A) {
1904 const Function *F = getAssociatedFunction();
1905 ChangeStatus Status = ChangeStatus::UNCHANGED;
1907 if (F->hasInternalLinkage() && AssumedLiveBlocks.empty()) {
1908 auto CallSiteCheck = [&](CallSite) { return false; };
1910 // All callsites of F are dead.
1911 if (A.checkForAllCallSites(CallSiteCheck, *this, true))
1912 return ChangeStatus::UNCHANGED;
1914 // There exists at least one live call site, so we explore the function.
1915 Status = ChangeStatus::CHANGED;
1917 exploreFromEntry(A, F);
1920 // Temporary collection to iterate over existing noreturn instructions. This
1921 // will alow easier modification of NoReturnCalls collection
1922 SmallVector<const Instruction *, 8> NoReturnChanged;
1924 for (const Instruction *I : NoReturnCalls)
1925 NoReturnChanged.push_back(I);
1927 for (const Instruction *I : NoReturnChanged) {
1928 size_t Size = ToBeExploredPaths.size();
1930 const Instruction *NextNoReturnI = findNextNoReturn(A, I);
1931 if (NextNoReturnI != I) {
1932 Status = ChangeStatus::CHANGED;
1933 NoReturnCalls.remove(I);
1934 if (NextNoReturnI)
1935 NoReturnCalls.insert(NextNoReturnI);
1938 // Explore new paths.
1939 while (Size != ToBeExploredPaths.size()) {
1940 Status = ChangeStatus::CHANGED;
1941 if (const Instruction *NextNoReturnI =
1942 findNextNoReturn(A, ToBeExploredPaths[Size++]))
1943 NoReturnCalls.insert(NextNoReturnI);
1947 LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
1948 << AssumedLiveBlocks.size() << " Total number of blocks: "
1949 << getAssociatedFunction()->size() << "\n");
1951 // If we know everything is live there is no need to query for liveness.
1952 if (NoReturnCalls.empty() &&
1953 getAssociatedFunction()->size() == AssumedLiveBlocks.size()) {
1954 // Indicating a pessimistic fixpoint will cause the state to be "invalid"
1955 // which will cause the Attributor to not return the AAIsDead on request,
1956 // which will prevent us from querying isAssumedDead().
1957 indicatePessimisticFixpoint();
1958 assert(!isValidState() && "Expected an invalid state!");
1961 return Status;
1964 /// Liveness information for a call sites.
1965 struct AAIsDeadCallSite final : AAIsDeadImpl {
1966 AAIsDeadCallSite(const IRPosition &IRP) : AAIsDeadImpl(IRP) {}
1968 /// See AbstractAttribute::initialize(...).
1969 void initialize(Attributor &A) override {
1970 // TODO: Once we have call site specific value information we can provide
1971 // call site specific liveness liveness information and then it makes
1972 // sense to specialize attributes for call sites instead of
1973 // redirecting requests to the callee.
1974 llvm_unreachable("Abstract attributes for liveness are not "
1975 "supported for call sites yet!");
1978 /// See AbstractAttribute::updateImpl(...).
1979 ChangeStatus updateImpl(Attributor &A) override {
1980 return indicatePessimisticFixpoint();
1983 /// See AbstractAttribute::trackStatistics()
1984 void trackStatistics() const override {}
1987 /// -------------------- Dereferenceable Argument Attribute --------------------
1989 template <>
1990 ChangeStatus clampStateAndIndicateChange<DerefState>(DerefState &S,
1991 const DerefState &R) {
1992 ChangeStatus CS0 = clampStateAndIndicateChange<IntegerState>(
1993 S.DerefBytesState, R.DerefBytesState);
1994 ChangeStatus CS1 =
1995 clampStateAndIndicateChange<IntegerState>(S.GlobalState, R.GlobalState);
1996 return CS0 | CS1;
1999 struct AADereferenceableImpl : AADereferenceable {
2000 AADereferenceableImpl(const IRPosition &IRP) : AADereferenceable(IRP) {}
2001 using StateType = DerefState;
2003 void initialize(Attributor &A) override {
2004 SmallVector<Attribute, 4> Attrs;
2005 getAttrs({Attribute::Dereferenceable, Attribute::DereferenceableOrNull},
2006 Attrs);
2007 for (const Attribute &Attr : Attrs)
2008 takeKnownDerefBytesMaximum(Attr.getValueAsInt());
2010 NonNullAA = &A.getAAFor<AANonNull>(*this, getIRPosition());
2013 /// See AbstractAttribute::getState()
2014 /// {
2015 StateType &getState() override { return *this; }
2016 const StateType &getState() const override { return *this; }
2017 /// }
2019 void getDeducedAttributes(LLVMContext &Ctx,
2020 SmallVectorImpl<Attribute> &Attrs) const override {
2021 // TODO: Add *_globally support
2022 if (isAssumedNonNull())
2023 Attrs.emplace_back(Attribute::getWithDereferenceableBytes(
2024 Ctx, getAssumedDereferenceableBytes()));
2025 else
2026 Attrs.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
2027 Ctx, getAssumedDereferenceableBytes()));
2030 /// See AbstractAttribute::getAsStr().
2031 const std::string getAsStr() const override {
2032 if (!getAssumedDereferenceableBytes())
2033 return "unknown-dereferenceable";
2034 return std::string("dereferenceable") +
2035 (isAssumedNonNull() ? "" : "_or_null") +
2036 (isAssumedGlobal() ? "_globally" : "") + "<" +
2037 std::to_string(getKnownDereferenceableBytes()) + "-" +
2038 std::to_string(getAssumedDereferenceableBytes()) + ">";
2042 /// Dereferenceable attribute for a floating value.
2043 struct AADereferenceableFloating : AADereferenceableImpl {
2044 AADereferenceableFloating(const IRPosition &IRP)
2045 : AADereferenceableImpl(IRP) {}
2047 /// See AbstractAttribute::updateImpl(...).
2048 ChangeStatus updateImpl(Attributor &A) override {
2049 const DataLayout &DL = A.getDataLayout();
2051 auto VisitValueCB = [&](Value &V, DerefState &T, bool Stripped) -> bool {
2052 unsigned IdxWidth =
2053 DL.getIndexSizeInBits(V.getType()->getPointerAddressSpace());
2054 APInt Offset(IdxWidth, 0);
2055 const Value *Base =
2056 V.stripAndAccumulateInBoundsConstantOffsets(DL, Offset);
2058 const auto &AA =
2059 A.getAAFor<AADereferenceable>(*this, IRPosition::value(*Base));
2060 int64_t DerefBytes = 0;
2061 if (!Stripped && this == &AA) {
2062 // Use IR information if we did not strip anything.
2063 // TODO: track globally.
2064 bool CanBeNull;
2065 DerefBytes = Base->getPointerDereferenceableBytes(DL, CanBeNull);
2066 T.GlobalState.indicatePessimisticFixpoint();
2067 } else {
2068 const DerefState &DS = static_cast<const DerefState &>(AA.getState());
2069 DerefBytes = DS.DerefBytesState.getAssumed();
2070 T.GlobalState &= DS.GlobalState;
2073 // For now we do not try to "increase" dereferenceability due to negative
2074 // indices as we first have to come up with code to deal with loops and
2075 // for overflows of the dereferenceable bytes.
2076 int64_t OffsetSExt = Offset.getSExtValue();
2077 if (OffsetSExt < 0)
2078 Offset = 0;
2080 T.takeAssumedDerefBytesMinimum(
2081 std::max(int64_t(0), DerefBytes - OffsetSExt));
2083 if (this == &AA) {
2084 if (!Stripped) {
2085 // If nothing was stripped IR information is all we got.
2086 T.takeKnownDerefBytesMaximum(
2087 std::max(int64_t(0), DerefBytes - OffsetSExt));
2088 T.indicatePessimisticFixpoint();
2089 } else if (OffsetSExt > 0) {
2090 // If something was stripped but there is circular reasoning we look
2091 // for the offset. If it is positive we basically decrease the
2092 // dereferenceable bytes in a circluar loop now, which will simply
2093 // drive them down to the known value in a very slow way which we
2094 // can accelerate.
2095 T.indicatePessimisticFixpoint();
2099 return T.isValidState();
2102 DerefState T;
2103 if (!genericValueTraversal<AADereferenceable, DerefState>(
2104 A, getIRPosition(), *this, T, VisitValueCB))
2105 return indicatePessimisticFixpoint();
2107 return clampStateAndIndicateChange(getState(), T);
2110 /// See AbstractAttribute::trackStatistics()
2111 void trackStatistics() const override {
2112 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable)
2116 /// Dereferenceable attribute for a return value.
2117 struct AADereferenceableReturned final
2118 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2119 DerefState> {
2120 AADereferenceableReturned(const IRPosition &IRP)
2121 : AAReturnedFromReturnedValues<AADereferenceable, AADereferenceableImpl,
2122 DerefState>(IRP) {}
2124 /// See AbstractAttribute::trackStatistics()
2125 void trackStatistics() const override {
2126 STATS_DECLTRACK_FNRET_ATTR(dereferenceable)
2130 /// Dereferenceable attribute for an argument
2131 struct AADereferenceableArgument final
2132 : AAArgumentFromCallSiteArguments<AADereferenceable, AADereferenceableImpl,
2133 DerefState> {
2134 AADereferenceableArgument(const IRPosition &IRP)
2135 : AAArgumentFromCallSiteArguments<AADereferenceable,
2136 AADereferenceableImpl, DerefState>(
2137 IRP) {}
2139 /// See AbstractAttribute::trackStatistics()
2140 void trackStatistics() const override{
2141 STATS_DECLTRACK_ARG_ATTR(dereferenceable)
2145 /// Dereferenceable attribute for a call site argument.
2146 struct AADereferenceableCallSiteArgument final : AADereferenceableFloating {
2147 AADereferenceableCallSiteArgument(const IRPosition &IRP)
2148 : AADereferenceableFloating(IRP) {}
2150 /// See AbstractAttribute::trackStatistics()
2151 void trackStatistics() const override {
2152 STATS_DECLTRACK_CSARG_ATTR(dereferenceable)
2156 /// Dereferenceable attribute deduction for a call site return value.
2157 using AADereferenceableCallSiteReturned = AADereferenceableReturned;
2159 // ------------------------ Align Argument Attribute ------------------------
2161 struct AAAlignImpl : AAAlign {
2162 AAAlignImpl(const IRPosition &IRP) : AAAlign(IRP) {}
2164 // Max alignemnt value allowed in IR
2165 static const unsigned MAX_ALIGN = 1U << 29;
2167 /// See AbstractAttribute::initialize(...).
2168 void initialize(Attributor &A) override {
2169 takeAssumedMinimum(MAX_ALIGN);
2171 SmallVector<Attribute, 4> Attrs;
2172 getAttrs({Attribute::Alignment}, Attrs);
2173 for (const Attribute &Attr : Attrs)
2174 takeKnownMaximum(Attr.getValueAsInt());
2177 // TODO: Provide a helper to determine the implied ABI alignment and check in
2178 // the existing manifest method and a new one for AAAlignImpl that value
2179 // to avoid making the alignment explicit if it did not improve.
2181 /// See AbstractAttribute::getDeducedAttributes
2182 virtual void
2183 getDeducedAttributes(LLVMContext &Ctx,
2184 SmallVectorImpl<Attribute> &Attrs) const override {
2185 if (getAssumedAlign() > 1)
2186 Attrs.emplace_back(Attribute::getWithAlignment(Ctx, getAssumedAlign()));
2189 /// See AbstractAttribute::getAsStr().
2190 const std::string getAsStr() const override {
2191 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2192 "-" + std::to_string(getAssumedAlign()) + ">")
2193 : "unknown-align";
2197 /// Align attribute for a floating value.
2198 struct AAAlignFloating : AAAlignImpl {
2199 AAAlignFloating(const IRPosition &IRP) : AAAlignImpl(IRP) {}
2201 /// See AbstractAttribute::manifest(...).
2202 ChangeStatus manifest(Attributor &A) override {
2203 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2205 // Check for users that allow alignment annotations.
2206 Value &AnchorVal = getIRPosition().getAnchorValue();
2207 for (const Use &U : AnchorVal.uses()) {
2208 if (auto *SI = dyn_cast<StoreInst>(U.getUser())) {
2209 if (SI->getPointerOperand() == &AnchorVal)
2210 if (SI->getAlignment() < getAssumedAlign()) {
2211 STATS_DECLTRACK(AAAlign, Store,
2212 "Number of times alignemnt added to a store");
2213 SI->setAlignment(getAssumedAlign());
2214 Changed = ChangeStatus::CHANGED;
2216 } else if (auto *LI = dyn_cast<LoadInst>(U.getUser())) {
2217 if (LI->getPointerOperand() == &AnchorVal)
2218 if (LI->getAlignment() < getAssumedAlign()) {
2219 LI->setAlignment(getAssumedAlign());
2220 STATS_DECLTRACK(AAAlign, Load,
2221 "Number of times alignemnt added to a load");
2222 Changed = ChangeStatus::CHANGED;
2227 return AAAlignImpl::manifest(A) | Changed;
2230 /// See AbstractAttribute::updateImpl(...).
2231 ChangeStatus updateImpl(Attributor &A) override {
2232 const DataLayout &DL = A.getDataLayout();
2234 auto VisitValueCB = [&](Value &V, AAAlign::StateType &T,
2235 bool Stripped) -> bool {
2236 const auto &AA = A.getAAFor<AAAlign>(*this, IRPosition::value(V));
2237 if (!Stripped && this == &AA) {
2238 // Use only IR information if we did not strip anything.
2239 T.takeKnownMaximum(V.getPointerAlignment(DL));
2240 T.indicatePessimisticFixpoint();
2241 } else {
2242 // Use abstract attribute information.
2243 const AAAlign::StateType &DS =
2244 static_cast<const AAAlign::StateType &>(AA.getState());
2245 T ^= DS;
2247 return T.isValidState();
2250 StateType T;
2251 if (!genericValueTraversal<AAAlign, StateType>(A, getIRPosition(), *this, T,
2252 VisitValueCB))
2253 return indicatePessimisticFixpoint();
2255 // TODO: If we know we visited all incoming values, thus no are assumed
2256 // dead, we can take the known information from the state T.
2257 return clampStateAndIndicateChange(getState(), T);
2260 /// See AbstractAttribute::trackStatistics()
2261 void trackStatistics() const override { STATS_DECLTRACK_FLOATING_ATTR(align) }
2264 /// Align attribute for function return value.
2265 struct AAAlignReturned final
2266 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl> {
2267 AAAlignReturned(const IRPosition &IRP)
2268 : AAReturnedFromReturnedValues<AAAlign, AAAlignImpl>(IRP) {}
2270 /// See AbstractAttribute::trackStatistics()
2271 void trackStatistics() const override { STATS_DECLTRACK_FNRET_ATTR(aligned) }
2274 /// Align attribute for function argument.
2275 struct AAAlignArgument final
2276 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl> {
2277 AAAlignArgument(const IRPosition &IRP)
2278 : AAArgumentFromCallSiteArguments<AAAlign, AAAlignImpl>(IRP) {}
2280 /// See AbstractAttribute::trackStatistics()
2281 void trackStatistics() const override { STATS_DECLTRACK_ARG_ATTR(aligned) }
2284 struct AAAlignCallSiteArgument final : AAAlignFloating {
2285 AAAlignCallSiteArgument(const IRPosition &IRP) : AAAlignFloating(IRP) {}
2287 /// See AbstractAttribute::manifest(...).
2288 ChangeStatus manifest(Attributor &A) override {
2289 return AAAlignImpl::manifest(A);
2292 /// See AbstractAttribute::trackStatistics()
2293 void trackStatistics() const override { STATS_DECLTRACK_CSARG_ATTR(aligned) }
2296 /// Align attribute deduction for a call site return value.
2297 using AAAlignCallSiteReturned = AAAlignReturned;
2299 /// ------------------ Function No-Return Attribute ----------------------------
2300 struct AANoReturnImpl : public AANoReturn {
2301 AANoReturnImpl(const IRPosition &IRP) : AANoReturn(IRP) {}
2303 /// See AbstractAttribute::getAsStr().
2304 const std::string getAsStr() const override {
2305 return getAssumed() ? "noreturn" : "may-return";
2308 /// See AbstractAttribute::initialize(...).
2309 void initialize(Attributor &A) override {
2310 if (hasAttr({getAttrKind()}))
2311 indicateOptimisticFixpoint();
2314 /// See AbstractAttribute::updateImpl(Attributor &A).
2315 virtual ChangeStatus updateImpl(Attributor &A) override {
2316 auto CheckForNoReturn = [](Instruction &) { return false; };
2317 if (!A.checkForAllInstructions(CheckForNoReturn, *this,
2318 {(unsigned)Instruction::Ret}))
2319 return indicatePessimisticFixpoint();
2320 return ChangeStatus::UNCHANGED;
2324 struct AANoReturnFunction final : AANoReturnImpl {
2325 AANoReturnFunction(const IRPosition &IRP) : AANoReturnImpl(IRP) {}
2327 /// See AbstractAttribute::trackStatistics()
2328 void trackStatistics() const override { STATS_DECLTRACK_FN_ATTR(noreturn) }
2331 /// NoReturn attribute deduction for a call sites.
2332 using AANoReturnCallSite = AANoReturnFunction;
2334 /// ----------------------------------------------------------------------------
2335 /// Attributor
2336 /// ----------------------------------------------------------------------------
2338 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
2339 const AAIsDead *LivenessAA) {
2340 const Instruction *CtxI = AA.getIRPosition().getCtxI();
2341 if (!CtxI)
2342 return false;
2344 if (!LivenessAA)
2345 LivenessAA =
2346 &getAAFor<AAIsDead>(AA, IRPosition::function(*CtxI->getFunction()),
2347 /* TrackDependence */ false);
2349 // Don't check liveness for AAIsDead.
2350 if (&AA == LivenessAA)
2351 return false;
2353 if (!LivenessAA->isAssumedDead(CtxI))
2354 return false;
2356 // We actually used liveness information so we have to record a dependence.
2357 recordDependence(*LivenessAA, AA);
2359 return true;
2362 bool Attributor::checkForAllCallSites(const function_ref<bool(CallSite)> &Pred,
2363 const AbstractAttribute &QueryingAA,
2364 bool RequireAllCallSites) {
2365 // We can try to determine information from
2366 // the call sites. However, this is only possible all call sites are known,
2367 // hence the function has internal linkage.
2368 const IRPosition &IRP = QueryingAA.getIRPosition();
2369 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2370 if (!AssociatedFunction)
2371 return false;
2373 if (RequireAllCallSites && !AssociatedFunction->hasInternalLinkage()) {
2374 LLVM_DEBUG(
2375 dbgs()
2376 << "[Attributor] Function " << AssociatedFunction->getName()
2377 << " has no internal linkage, hence not all call sites are known\n");
2378 return false;
2381 for (const Use &U : AssociatedFunction->uses()) {
2382 Instruction *I = dyn_cast<Instruction>(U.getUser());
2383 // TODO: Deal with abstract call sites here.
2384 if (!I)
2385 return false;
2387 Function *Caller = I->getFunction();
2389 const auto &LivenessAA = getAAFor<AAIsDead>(
2390 QueryingAA, IRPosition::function(*Caller), /* TrackDependence */ false);
2392 // Skip dead calls.
2393 if (LivenessAA.isAssumedDead(I)) {
2394 // We actually used liveness information so we have to record a
2395 // dependence.
2396 recordDependence(LivenessAA, QueryingAA);
2397 continue;
2400 CallSite CS(U.getUser());
2401 if (!CS || !CS.isCallee(&U) || !CS.getCaller()->hasExactDefinition()) {
2402 if (!RequireAllCallSites)
2403 continue;
2405 LLVM_DEBUG(dbgs() << "[Attributor] User " << *U.getUser()
2406 << " is an invalid use of "
2407 << AssociatedFunction->getName() << "\n");
2408 return false;
2411 if (Pred(CS))
2412 continue;
2414 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
2415 << *CS.getInstruction() << "\n");
2416 return false;
2419 return true;
2422 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
2423 const function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)>
2424 &Pred,
2425 const AbstractAttribute &QueryingAA) {
2427 const IRPosition &IRP = QueryingAA.getIRPosition();
2428 // Since we need to provide return instructions we have to have an exact
2429 // definition.
2430 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2431 if (!AssociatedFunction || !AssociatedFunction->hasExactDefinition())
2432 return false;
2434 // If this is a call site query we use the call site specific return values
2435 // and liveness information.
2436 // TODO: use the function scope once we have call site AAReturnedValues.
2437 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2438 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
2439 if (!AARetVal.getState().isValidState())
2440 return false;
2442 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
2445 bool Attributor::checkForAllReturnedValues(
2446 const function_ref<bool(Value &)> &Pred,
2447 const AbstractAttribute &QueryingAA) {
2449 const IRPosition &IRP = QueryingAA.getIRPosition();
2450 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2451 if (!AssociatedFunction || !AssociatedFunction->hasExactDefinition())
2452 return false;
2454 // TODO: use the function scope once we have call site AAReturnedValues.
2455 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2456 const auto &AARetVal = getAAFor<AAReturnedValues>(QueryingAA, QueryIRP);
2457 if (!AARetVal.getState().isValidState())
2458 return false;
2460 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
2461 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
2462 return Pred(RV);
2466 bool Attributor::checkForAllInstructions(
2467 const llvm::function_ref<bool(Instruction &)> &Pred,
2468 const AbstractAttribute &QueryingAA, const ArrayRef<unsigned> &Opcodes) {
2470 const IRPosition &IRP = QueryingAA.getIRPosition();
2471 // Since we need to provide instructions we have to have an exact definition.
2472 const Function *AssociatedFunction = IRP.getAssociatedFunction();
2473 if (!AssociatedFunction || !AssociatedFunction->hasExactDefinition())
2474 return false;
2476 // TODO: use the function scope once we have call site AAReturnedValues.
2477 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2478 const auto &LivenessAA =
2479 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
2480 bool AnyDead = false;
2482 auto &OpcodeInstMap =
2483 InfoCache.getOpcodeInstMapForFunction(*AssociatedFunction);
2484 for (unsigned Opcode : Opcodes) {
2485 for (Instruction *I : OpcodeInstMap[Opcode]) {
2486 // Skip dead instructions.
2487 if (LivenessAA.isAssumedDead(I)) {
2488 AnyDead = true;
2489 continue;
2492 if (!Pred(*I))
2493 return false;
2497 // If we actually used liveness information so we have to record a dependence.
2498 if (AnyDead)
2499 recordDependence(LivenessAA, QueryingAA);
2501 return true;
2504 bool Attributor::checkForAllReadWriteInstructions(
2505 const llvm::function_ref<bool(Instruction &)> &Pred,
2506 AbstractAttribute &QueryingAA) {
2508 const Function *AssociatedFunction =
2509 QueryingAA.getIRPosition().getAssociatedFunction();
2510 if (!AssociatedFunction)
2511 return false;
2513 // TODO: use the function scope once we have call site AAReturnedValues.
2514 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
2515 const auto &LivenessAA =
2516 getAAFor<AAIsDead>(QueryingAA, QueryIRP, /* TrackDependence */ false);
2517 bool AnyDead = false;
2519 for (Instruction *I :
2520 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
2521 // Skip dead instructions.
2522 if (LivenessAA.isAssumedDead(I)) {
2523 AnyDead = true;
2524 continue;
2527 if (!Pred(*I))
2528 return false;
2531 // If we actually used liveness information so we have to record a dependence.
2532 if (AnyDead)
2533 recordDependence(LivenessAA, QueryingAA);
2535 return true;
2538 ChangeStatus Attributor::run() {
2539 // Initialize all abstract attributes, allow new ones to be created.
2540 for (unsigned u = 0; u < AllAbstractAttributes.size(); u++)
2541 AllAbstractAttributes[u]->initialize(*this);
2543 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
2544 << AllAbstractAttributes.size()
2545 << " abstract attributes.\n");
2547 // Now that all abstract attributes are collected and initialized we start
2548 // the abstract analysis.
2550 unsigned IterationCounter = 1;
2552 SmallVector<AbstractAttribute *, 64> ChangedAAs;
2553 SetVector<AbstractAttribute *> Worklist;
2554 Worklist.insert(AllAbstractAttributes.begin(), AllAbstractAttributes.end());
2556 bool RecomputeDependences = false;
2558 do {
2559 // Remember the size to determine new attributes.
2560 size_t NumAAs = AllAbstractAttributes.size();
2561 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
2562 << ", Worklist size: " << Worklist.size() << "\n");
2564 // If dependences (=QueryMap) are recomputed we have to look at all abstract
2565 // attributes again, regardless of what changed in the last iteration.
2566 if (RecomputeDependences) {
2567 LLVM_DEBUG(
2568 dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
2569 QueryMap.clear();
2570 ChangedAAs.clear();
2571 Worklist.insert(AllAbstractAttributes.begin(),
2572 AllAbstractAttributes.end());
2575 // Add all abstract attributes that are potentially dependent on one that
2576 // changed to the work list.
2577 for (AbstractAttribute *ChangedAA : ChangedAAs) {
2578 auto &QuerriedAAs = QueryMap[ChangedAA];
2579 Worklist.insert(QuerriedAAs.begin(), QuerriedAAs.end());
2582 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2583 << ", Worklist+Dependent size: " << Worklist.size()
2584 << "\n");
2586 // Reset the changed set.
2587 ChangedAAs.clear();
2589 // Update all abstract attribute in the work list and record the ones that
2590 // changed.
2591 for (AbstractAttribute *AA : Worklist)
2592 if (!isAssumedDead(*AA, nullptr))
2593 if (AA->update(*this) == ChangeStatus::CHANGED)
2594 ChangedAAs.push_back(AA);
2596 // Check if we recompute the dependences in the next iteration.
2597 RecomputeDependences = (DepRecomputeInterval > 0 &&
2598 IterationCounter % DepRecomputeInterval == 0);
2600 // Add attributes to the changed set if they have been created in the last
2601 // iteration.
2602 ChangedAAs.append(AllAbstractAttributes.begin() + NumAAs,
2603 AllAbstractAttributes.end());
2605 // Reset the work list and repopulate with the changed abstract attributes.
2606 // Note that dependent ones are added above.
2607 Worklist.clear();
2608 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2610 } while (!Worklist.empty() && IterationCounter++ < MaxFixpointIterations);
2612 size_t NumFinalAAs = AllAbstractAttributes.size();
2614 if (VerifyMaxFixpointIterations && IterationCounter != MaxFixpointIterations) {
2615 errs() << "\n[Attributor] Fixpoint iteration done after: "
2616 << IterationCounter << "/" << MaxFixpointIterations
2617 << " iterations\n";
2618 llvm_unreachable("The fixpoint was not reached with exactly the number of "
2619 "specified iterations!");
2622 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2623 << IterationCounter << "/" << MaxFixpointIterations
2624 << " iterations\n");
2627 bool FinishedAtFixpoint = Worklist.empty();
2629 // Reset abstract arguments not settled in a sound fixpoint by now. This
2630 // happens when we stopped the fixpoint iteration early. Note that only the
2631 // ones marked as "changed" *and* the ones transitively depending on them
2632 // need to be reverted to a pessimistic state. Others might not be in a
2633 // fixpoint state but we can use the optimistic results for them anyway.
2634 SmallPtrSet<AbstractAttribute *, 32> Visited;
2635 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2636 AbstractAttribute *ChangedAA = ChangedAAs[u];
2637 if (!Visited.insert(ChangedAA).second)
2638 continue;
2640 AbstractState &State = ChangedAA->getState();
2641 if (!State.isAtFixpoint()) {
2642 State.indicatePessimisticFixpoint();
2644 NumAttributesTimedOut++;
2647 auto &QuerriedAAs = QueryMap[ChangedAA];
2648 ChangedAAs.append(QuerriedAAs.begin(), QuerriedAAs.end());
2651 LLVM_DEBUG({
2652 if (!Visited.empty())
2653 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2654 << " abstract attributes.\n";
2657 unsigned NumManifested = 0;
2658 unsigned NumAtFixpoint = 0;
2659 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2660 for (AbstractAttribute *AA : AllAbstractAttributes) {
2661 AbstractState &State = AA->getState();
2663 // If there is not already a fixpoint reached, we can now take the
2664 // optimistic state. This is correct because we enforced a pessimistic one
2665 // on abstract attributes that were transitively dependent on a changed one
2666 // already above.
2667 if (!State.isAtFixpoint())
2668 State.indicateOptimisticFixpoint();
2670 // If the state is invalid, we do not try to manifest it.
2671 if (!State.isValidState())
2672 continue;
2674 // Skip dead code.
2675 if (isAssumedDead(*AA, nullptr))
2676 continue;
2677 // Manifest the state and record if we changed the IR.
2678 ChangeStatus LocalChange = AA->manifest(*this);
2679 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2680 AA->trackStatistics();
2682 ManifestChange = ManifestChange | LocalChange;
2684 NumAtFixpoint++;
2685 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2688 (void)NumManifested;
2689 (void)NumAtFixpoint;
2690 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2691 << " arguments while " << NumAtFixpoint
2692 << " were in a valid fixpoint state\n");
2694 // If verification is requested, we finished this run at a fixpoint, and the
2695 // IR was changed, we re-run the whole fixpoint analysis, starting at
2696 // re-initialization of the arguments. This re-run should not result in an IR
2697 // change. Though, the (virtual) state of attributes at the end of the re-run
2698 // might be more optimistic than the known state or the IR state if the better
2699 // state cannot be manifested.
2700 if (VerifyAttributor && FinishedAtFixpoint &&
2701 ManifestChange == ChangeStatus::CHANGED) {
2702 VerifyAttributor = false;
2703 ChangeStatus VerifyStatus = run();
2704 if (VerifyStatus != ChangeStatus::UNCHANGED)
2705 llvm_unreachable(
2706 "Attributor verification failed, re-run did result in an IR change "
2707 "even after a fixpoint was reached in the original run. (False "
2708 "positives possible!)");
2709 VerifyAttributor = true;
2712 NumAttributesManifested += NumManifested;
2713 NumAttributesValidFixpoint += NumAtFixpoint;
2715 (void)NumFinalAAs;
2716 assert(
2717 NumFinalAAs == AllAbstractAttributes.size() &&
2718 "Expected the final number of abstract attributes to remain unchanged!");
2720 // Delete stuff at the end to avoid invalid references and a nice order.
2721 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete " << ToBeDeletedFunctions.size()
2722 << " functions and " << ToBeDeletedBlocks.size()
2723 << " blocks and " << ToBeDeletedInsts.size()
2724 << " instructions\n");
2725 for (Instruction *I : ToBeDeletedInsts) {
2726 if (I->hasNUsesOrMore(1))
2727 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2728 I->eraseFromParent();
2730 for (BasicBlock *BB : ToBeDeletedBlocks) {
2731 // TODO: Check if we need to replace users (PHIs, indirect branches?)
2732 BB->eraseFromParent();
2734 for (Function *Fn : ToBeDeletedFunctions) {
2735 Fn->replaceAllUsesWith(UndefValue::get(Fn->getType()));
2736 Fn->eraseFromParent();
2739 return ManifestChange;
2742 /// Helper function that checks if an abstract attribute of type \p AAType
2743 /// should be created for IR position \p IRP and if so creates and registers it
2744 /// with the Attributor \p A.
2746 /// This method will look at the provided whitelist. If one is given and the
2747 /// kind \p AAType::ID is not contained, no abstract attribute is created.
2749 /// \returns The created abstract argument, or nullptr if none was created.
2750 template <typename AAType>
2751 static const AAType *checkAndRegisterAA(const IRPosition &IRP, Attributor &A,
2752 DenseSet<const char *> *Whitelist) {
2753 if (Whitelist && !Whitelist->count(&AAType::ID))
2754 return nullptr;
2756 return &A.registerAA<AAType>(*new AAType(IRP));
2759 void Attributor::identifyDefaultAbstractAttributes(
2760 Function &F, DenseSet<const char *> *Whitelist) {
2762 IRPosition FPos = IRPosition::function(F);
2764 // Check for dead BasicBlocks in every function.
2765 // We need dead instruction detection because we do not want to deal with
2766 // broken IR in which SSA rules do not apply.
2767 checkAndRegisterAA<AAIsDeadFunction>(FPos, *this, /* Whitelist */ nullptr);
2769 // Every function might be "will-return".
2770 checkAndRegisterAA<AAWillReturnFunction>(FPos, *this, Whitelist);
2772 // Every function can be nounwind.
2773 checkAndRegisterAA<AANoUnwindFunction>(FPos, *this, Whitelist);
2775 // Every function might be marked "nosync"
2776 checkAndRegisterAA<AANoSyncFunction>(FPos, *this, Whitelist);
2778 // Every function might be "no-free".
2779 checkAndRegisterAA<AANoFreeFunction>(FPos, *this, Whitelist);
2781 // Every function might be "no-return".
2782 checkAndRegisterAA<AANoReturnFunction>(FPos, *this, Whitelist);
2784 // Return attributes are only appropriate if the return type is non void.
2785 Type *ReturnType = F.getReturnType();
2786 if (!ReturnType->isVoidTy()) {
2787 // Argument attribute "returned" --- Create only one per function even
2788 // though it is an argument attribute.
2789 checkAndRegisterAA<AAReturnedValuesFunction>(FPos, *this, Whitelist);
2791 if (ReturnType->isPointerTy()) {
2792 IRPosition RetPos = IRPosition::returned(F);
2794 // Every function with pointer return type might be marked align.
2795 checkAndRegisterAA<AAAlignReturned>(RetPos, *this, Whitelist);
2797 // Every function with pointer return type might be marked nonnull.
2798 checkAndRegisterAA<AANonNullReturned>(RetPos, *this, Whitelist);
2800 // Every function with pointer return type might be marked noalias.
2801 checkAndRegisterAA<AANoAliasReturned>(RetPos, *this, Whitelist);
2803 // Every function with pointer return type might be marked
2804 // dereferenceable.
2805 checkAndRegisterAA<AADereferenceableReturned>(RetPos, *this, Whitelist);
2809 for (Argument &Arg : F.args()) {
2810 if (Arg.getType()->isPointerTy()) {
2811 IRPosition ArgPos = IRPosition::argument(Arg);
2812 // Every argument with pointer type might be marked nonnull.
2813 checkAndRegisterAA<AANonNullArgument>(ArgPos, *this, Whitelist);
2815 // Every argument with pointer type might be marked dereferenceable.
2816 checkAndRegisterAA<AADereferenceableArgument>(ArgPos, *this, Whitelist);
2818 // Every argument with pointer type might be marked align.
2819 checkAndRegisterAA<AAAlignArgument>(ArgPos, *this, Whitelist);
2823 // Walk all instructions to find more attribute opportunities and also
2824 // interesting instructions that might be queried by abstract attributes
2825 // during their initialization or update.
2826 auto &ReadOrWriteInsts = InfoCache.FuncRWInstsMap[&F];
2827 auto &InstOpcodeMap = InfoCache.FuncInstOpcodeMap[&F];
2829 for (Instruction &I : instructions(&F)) {
2830 bool IsInterestingOpcode = false;
2832 // To allow easy access to all instructions in a function with a given
2833 // opcode we store them in the InfoCache. As not all opcodes are interesting
2834 // to concrete attributes we only cache the ones that are as identified in
2835 // the following switch.
2836 // Note: There are no concrete attributes now so this is initially empty.
2837 switch (I.getOpcode()) {
2838 default:
2839 assert((!ImmutableCallSite(&I)) && (!isa<CallBase>(&I)) &&
2840 "New call site/base instruction type needs to be known int the "
2841 "attributor.");
2842 break;
2843 case Instruction::Load:
2844 // The alignment of a pointer is interesting for loads.
2845 checkAndRegisterAA<AAAlignFloating>(
2846 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()), *this,
2847 Whitelist);
2848 break;
2849 case Instruction::Store:
2850 // The alignment of a pointer is interesting for stores.
2851 checkAndRegisterAA<AAAlignFloating>(
2852 IRPosition::value(*cast<StoreInst>(I).getPointerOperand()), *this,
2853 Whitelist);
2854 break;
2855 case Instruction::Call:
2856 case Instruction::CallBr:
2857 case Instruction::Invoke:
2858 case Instruction::CleanupRet:
2859 case Instruction::CatchSwitch:
2860 case Instruction::Resume:
2861 case Instruction::Ret:
2862 IsInterestingOpcode = true;
2864 if (IsInterestingOpcode)
2865 InstOpcodeMap[I.getOpcode()].push_back(&I);
2866 if (I.mayReadOrWriteMemory())
2867 ReadOrWriteInsts.push_back(&I);
2869 CallSite CS(&I);
2870 if (CS && CS.getCalledFunction()) {
2871 for (int i = 0, e = CS.getCalledFunction()->arg_size(); i < e; i++) {
2872 if (!CS.getArgument(i)->getType()->isPointerTy())
2873 continue;
2874 IRPosition CSArgPos = IRPosition::callsite_argument(CS, i);
2876 // Call site argument attribute "non-null".
2877 checkAndRegisterAA<AANonNullCallSiteArgument>(CSArgPos, *this,
2878 Whitelist);
2880 // Call site argument attribute "dereferenceable".
2881 checkAndRegisterAA<AADereferenceableCallSiteArgument>(CSArgPos, *this,
2882 Whitelist);
2884 // Call site argument attribute "align".
2885 checkAndRegisterAA<AAAlignCallSiteArgument>(CSArgPos, *this, Whitelist);
2891 /// Helpers to ease debugging through output streams and print calls.
2893 ///{
2894 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
2895 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
2898 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
2899 switch (AP) {
2900 case IRPosition::IRP_INVALID:
2901 return OS << "inv";
2902 case IRPosition::IRP_FLOAT:
2903 return OS << "flt";
2904 case IRPosition::IRP_RETURNED:
2905 return OS << "fn_ret";
2906 case IRPosition::IRP_CALL_SITE_RETURNED:
2907 return OS << "cs_ret";
2908 case IRPosition::IRP_FUNCTION:
2909 return OS << "fn";
2910 case IRPosition::IRP_CALL_SITE:
2911 return OS << "cs";
2912 case IRPosition::IRP_ARGUMENT:
2913 return OS << "arg";
2914 case IRPosition::IRP_CALL_SITE_ARGUMENT:
2915 return OS << "cs_arg";
2917 llvm_unreachable("Unknown attribute position!");
2920 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
2921 const Value &AV = Pos.getAssociatedValue();
2922 return OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
2923 << Pos.getAnchorValue().getName() << "@" << Pos.getArgNo() << "]}";
2926 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerState &S) {
2927 return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")"
2928 << static_cast<const AbstractState &>(S);
2931 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
2932 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
2935 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
2936 AA.print(OS);
2937 return OS;
2940 void AbstractAttribute::print(raw_ostream &OS) const {
2941 OS << "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
2942 << "]";
2944 ///}
2946 /// ----------------------------------------------------------------------------
2947 /// Pass (Manager) Boilerplate
2948 /// ----------------------------------------------------------------------------
2950 static bool runAttributorOnModule(Module &M) {
2951 if (DisableAttributor)
2952 return false;
2954 LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M.size()
2955 << " functions.\n");
2957 // Create an Attributor and initially empty information cache that is filled
2958 // while we identify default attribute opportunities.
2959 InformationCache InfoCache(M.getDataLayout());
2960 Attributor A(InfoCache, DepRecInterval);
2962 for (Function &F : M) {
2963 // TODO: Not all attributes require an exact definition. Find a way to
2964 // enable deduction for some but not all attributes in case the
2965 // definition might be changed at runtime, see also
2966 // http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html.
2967 // TODO: We could always determine abstract attributes and if sufficient
2968 // information was found we could duplicate the functions that do not
2969 // have an exact definition.
2970 if (!F.hasExactDefinition()) {
2971 NumFnWithoutExactDefinition++;
2972 continue;
2975 // For now we ignore naked and optnone functions.
2976 if (F.hasFnAttribute(Attribute::Naked) ||
2977 F.hasFnAttribute(Attribute::OptimizeNone))
2978 continue;
2980 NumFnWithExactDefinition++;
2982 // Populate the Attributor with abstract attribute opportunities in the
2983 // function and the information cache with IR information.
2984 A.identifyDefaultAbstractAttributes(F);
2987 return A.run() == ChangeStatus::CHANGED;
2990 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
2991 if (runAttributorOnModule(M)) {
2992 // FIXME: Think about passes we will preserve and add them here.
2993 return PreservedAnalyses::none();
2995 return PreservedAnalyses::all();
2998 namespace {
3000 struct AttributorLegacyPass : public ModulePass {
3001 static char ID;
3003 AttributorLegacyPass() : ModulePass(ID) {
3004 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
3007 bool runOnModule(Module &M) override {
3008 if (skipModule(M))
3009 return false;
3010 return runAttributorOnModule(M);
3013 void getAnalysisUsage(AnalysisUsage &AU) const override {
3014 // FIXME: Think about passes we will preserve and add them here.
3015 AU.setPreservesCFG();
3019 } // end anonymous namespace
3021 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
3023 char AttributorLegacyPass::ID = 0;
3025 const char AAReturnedValues::ID = 0;
3026 const char AANoUnwind::ID = 0;
3027 const char AANoSync::ID = 0;
3028 const char AANoFree::ID = 0;
3029 const char AANonNull::ID = 0;
3030 const char AANoRecurse::ID = 0;
3031 const char AAWillReturn::ID = 0;
3032 const char AANoAlias::ID = 0;
3033 const char AANoReturn::ID = 0;
3034 const char AAIsDead::ID = 0;
3035 const char AADereferenceable::ID = 0;
3036 const char AAAlign::ID = 0;
3038 // Macro magic to create the static generator function for attributes that
3039 // follow the naming scheme.
3041 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
3042 case IRPosition::PK: \
3043 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
3045 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
3046 case IRPosition::PK: \
3047 AA = new CLASS##SUFFIX(IRP); \
3048 break;
3050 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
3051 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
3052 CLASS *AA = nullptr; \
3053 switch (IRP.getPositionKind()) { \
3054 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
3055 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
3056 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
3057 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
3058 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
3059 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
3060 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
3061 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
3063 AA->initialize(A); \
3064 return *AA; \
3067 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
3068 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
3069 CLASS *AA = nullptr; \
3070 switch (IRP.getPositionKind()) { \
3071 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
3072 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
3073 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
3074 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
3075 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
3076 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
3077 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
3078 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
3080 AA->initialize(A); \
3081 return *AA; \
3084 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind)
3085 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync)
3086 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree)
3087 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse)
3088 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn)
3089 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn)
3090 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead)
3091 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues)
3093 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull)
3094 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias)
3095 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable)
3096 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign)
3098 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
3099 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
3100 #undef SWITCH_PK_CREATE
3101 #undef SWITCH_PK_INV
3103 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
3104 "Deduce and propagate attributes", false, false)
3105 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
3106 "Deduce and propagate attributes", false, false)