1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file implements an inter procedural pass that deduces and/or propagating
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
14 //===----------------------------------------------------------------------===//
16 #include "llvm/Transforms/IPO/Attributor.h"
18 #include "llvm/ADT/DepthFirstIterator.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/SmallPtrSet.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/EHPersonalities.h"
25 #include "llvm/Analysis/GlobalsModRef.h"
26 #include "llvm/Analysis/Loads.h"
27 #include "llvm/Analysis/ValueTracking.h"
28 #include "llvm/IR/Argument.h"
29 #include "llvm/IR/Attributes.h"
30 #include "llvm/IR/CFG.h"
31 #include "llvm/IR/InstIterator.h"
32 #include "llvm/IR/IntrinsicInst.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
37 #include "llvm/Transforms/Utils/Local.h"
43 #define DEBUG_TYPE "attributor"
45 STATISTIC(NumFnWithExactDefinition
,
46 "Number of function with exact definitions");
47 STATISTIC(NumFnWithoutExactDefinition
,
48 "Number of function without exact definitions");
49 STATISTIC(NumAttributesTimedOut
,
50 "Number of abstract attributes timed out before fixpoint");
51 STATISTIC(NumAttributesValidFixpoint
,
52 "Number of abstract attributes in a valid fixpoint state");
53 STATISTIC(NumAttributesManifested
,
54 "Number of abstract attributes manifested in IR");
56 // Some helper macros to deal with statistics tracking.
59 // For simple IR attribute tracking overload trackStatistics in the abstract
60 // attribute and choose the right STATS_DECLTRACK_********* macro,
62 // void trackStatistics() const override {
63 // STATS_DECLTRACK_ARG_ATTR(returned)
65 // If there is a single "increment" side one can use the macro
66 // STATS_DECLTRACK with a custom message. If there are multiple increment
67 // sides, STATS_DECL and STATS_TRACK can also be used separatly.
69 #define BUILD_STAT_MSG_IR_ATTR(TYPE, NAME) \
70 ("Number of " #TYPE " marked '" #NAME "'")
71 #define BUILD_STAT_NAME(NAME, TYPE) NumIR##TYPE##_##NAME
72 #define STATS_DECL_(NAME, MSG) STATISTIC(NAME, MSG);
73 #define STATS_DECL(NAME, TYPE, MSG) \
74 STATS_DECL_(BUILD_STAT_NAME(NAME, TYPE), MSG);
75 #define STATS_TRACK(NAME, TYPE) ++(BUILD_STAT_NAME(NAME, TYPE));
76 #define STATS_DECLTRACK(NAME, TYPE, MSG) \
78 STATS_DECL(NAME, TYPE, MSG) \
79 STATS_TRACK(NAME, TYPE) \
81 #define STATS_DECLTRACK_ARG_ATTR(NAME) \
82 STATS_DECLTRACK(NAME, Arguments, BUILD_STAT_MSG_IR_ATTR(arguments, NAME))
83 #define STATS_DECLTRACK_CSARG_ATTR(NAME) \
84 STATS_DECLTRACK(NAME, CSArguments, \
85 BUILD_STAT_MSG_IR_ATTR(call site arguments, NAME))
86 #define STATS_DECLTRACK_FN_ATTR(NAME) \
87 STATS_DECLTRACK(NAME, Function, BUILD_STAT_MSG_IR_ATTR(functions, NAME))
88 #define STATS_DECLTRACK_CS_ATTR(NAME) \
89 STATS_DECLTRACK(NAME, CS, BUILD_STAT_MSG_IR_ATTR(call site, NAME))
90 #define STATS_DECLTRACK_FNRET_ATTR(NAME) \
91 STATS_DECLTRACK(NAME, FunctionReturn, \
92 BUILD_STAT_MSG_IR_ATTR(function returns, NAME))
93 #define STATS_DECLTRACK_CSRET_ATTR(NAME) \
94 STATS_DECLTRACK(NAME, CSReturn, \
95 BUILD_STAT_MSG_IR_ATTR(call site returns, NAME))
96 #define STATS_DECLTRACK_FLOATING_ATTR(NAME) \
97 STATS_DECLTRACK(NAME, Floating, \
98 ("Number of floating values known to be '" #NAME "'"))
100 // TODO: Determine a good default value.
102 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
103 // (when run with the first 5 abstract attributes). The results also indicate
104 // that we never reach 32 iterations but always find a fixpoint sooner.
106 // This will become more evolved once we perform two interleaved fixpoint
107 // iterations: bottom-up and top-down.
108 static cl::opt
<unsigned>
109 MaxFixpointIterations("attributor-max-iterations", cl::Hidden
,
110 cl::desc("Maximal number of fixpoint iterations."),
112 static cl::opt
<bool> VerifyMaxFixpointIterations(
113 "attributor-max-iterations-verify", cl::Hidden
,
114 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
117 static cl::opt
<bool> DisableAttributor(
118 "attributor-disable", cl::Hidden
,
119 cl::desc("Disable the attributor inter-procedural deduction pass."),
122 static cl::opt
<bool> ManifestInternal(
123 "attributor-manifest-internal", cl::Hidden
,
124 cl::desc("Manifest Attributor internal string attributes."),
127 static cl::opt
<bool> VerifyAttributor(
128 "attributor-verify", cl::Hidden
,
129 cl::desc("Verify the Attributor deduction and "
130 "manifestation of attributes -- may issue false-positive errors"),
133 static cl::opt
<unsigned> DepRecInterval(
134 "attributor-dependence-recompute-interval", cl::Hidden
,
135 cl::desc("Number of iterations until dependences are recomputed."),
138 /// Logic operators for the change status enum class.
141 ChangeStatus
llvm::operator|(ChangeStatus l
, ChangeStatus r
) {
142 return l
== ChangeStatus::CHANGED
? l
: r
;
144 ChangeStatus
llvm::operator&(ChangeStatus l
, ChangeStatus r
) {
145 return l
== ChangeStatus::UNCHANGED
? l
: r
;
149 /// Recursively visit all values that might become \p IRP at some point. This
150 /// will be done by looking through cast instructions, selects, phis, and calls
151 /// with the "returned" attribute. Once we cannot look through the value any
152 /// further, the callback \p VisitValueCB is invoked and passed the current
153 /// value, the \p State, and a flag to indicate if we stripped anything. To
154 /// limit how much effort is invested, we will never visit more values than
155 /// specified by \p MaxValues.
156 template <typename AAType
, typename StateTy
>
157 bool genericValueTraversal(
158 Attributor
&A
, IRPosition IRP
, const AAType
&QueryingAA
, StateTy
&State
,
159 const function_ref
<bool(Value
&, StateTy
&, bool)> &VisitValueCB
,
162 const AAIsDead
*LivenessAA
= nullptr;
163 if (IRP
.getAnchorScope())
164 LivenessAA
= &A
.getAAFor
<AAIsDead
>(
165 QueryingAA
, IRPosition::function(*IRP
.getAnchorScope()),
166 /* TrackDependence */ false);
167 bool AnyDead
= false;
169 // TODO: Use Positions here to allow context sensitivity in VisitValueCB
170 SmallPtrSet
<Value
*, 16> Visited
;
171 SmallVector
<Value
*, 16> Worklist
;
172 Worklist
.push_back(&IRP
.getAssociatedValue());
176 Value
*V
= Worklist
.pop_back_val();
178 // Check if we should process the current value. To prevent endless
179 // recursion keep a record of the values we followed!
180 if (!Visited
.insert(V
).second
)
183 // Make sure we limit the compile time for complex expressions.
184 if (Iteration
++ >= MaxValues
)
187 // Explicitly look through calls with a "returned" attribute if we do
188 // not have a pointer as stripPointerCasts only works on them.
189 Value
*NewV
= nullptr;
190 if (V
->getType()->isPointerTy()) {
191 NewV
= V
->stripPointerCasts();
194 if (CS
&& CS
.getCalledFunction()) {
195 for (Argument
&Arg
: CS
.getCalledFunction()->args())
196 if (Arg
.hasReturnedAttr()) {
197 NewV
= CS
.getArgOperand(Arg
.getArgNo());
202 if (NewV
&& NewV
!= V
) {
203 Worklist
.push_back(NewV
);
207 // Look through select instructions, visit both potential values.
208 if (auto *SI
= dyn_cast
<SelectInst
>(V
)) {
209 Worklist
.push_back(SI
->getTrueValue());
210 Worklist
.push_back(SI
->getFalseValue());
214 // Look through phi nodes, visit all live operands.
215 if (auto *PHI
= dyn_cast
<PHINode
>(V
)) {
217 "Expected liveness in the presence of instructions!");
218 for (unsigned u
= 0, e
= PHI
->getNumIncomingValues(); u
< e
; u
++) {
219 const BasicBlock
*IncomingBB
= PHI
->getIncomingBlock(u
);
220 if (LivenessAA
->isAssumedDead(IncomingBB
->getTerminator())) {
224 Worklist
.push_back(PHI
->getIncomingValue(u
));
229 // Once a leaf is reached we inform the user through the callback.
230 if (!VisitValueCB(*V
, State
, Iteration
> 1))
232 } while (!Worklist
.empty());
234 // If we actually used liveness information so we have to record a dependence.
236 A
.recordDependence(*LivenessAA
, QueryingAA
);
238 // All values have been visited.
242 /// Return true if \p New is equal or worse than \p Old.
243 static bool isEqualOrWorse(const Attribute
&New
, const Attribute
&Old
) {
244 if (!Old
.isIntAttribute())
247 return Old
.getValueAsInt() >= New
.getValueAsInt();
250 /// Return true if the information provided by \p Attr was added to the
251 /// attribute list \p Attrs. This is only the case if it was not already present
252 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
253 static bool addIfNotExistent(LLVMContext
&Ctx
, const Attribute
&Attr
,
254 AttributeList
&Attrs
, int AttrIdx
) {
256 if (Attr
.isEnumAttribute()) {
257 Attribute::AttrKind Kind
= Attr
.getKindAsEnum();
258 if (Attrs
.hasAttribute(AttrIdx
, Kind
))
259 if (isEqualOrWorse(Attr
, Attrs
.getAttribute(AttrIdx
, Kind
)))
261 Attrs
= Attrs
.addAttribute(Ctx
, AttrIdx
, Attr
);
264 if (Attr
.isStringAttribute()) {
265 StringRef Kind
= Attr
.getKindAsString();
266 if (Attrs
.hasAttribute(AttrIdx
, Kind
))
267 if (isEqualOrWorse(Attr
, Attrs
.getAttribute(AttrIdx
, Kind
)))
269 Attrs
= Attrs
.addAttribute(Ctx
, AttrIdx
, Attr
);
272 if (Attr
.isIntAttribute()) {
273 Attribute::AttrKind Kind
= Attr
.getKindAsEnum();
274 if (Attrs
.hasAttribute(AttrIdx
, Kind
))
275 if (isEqualOrWorse(Attr
, Attrs
.getAttribute(AttrIdx
, Kind
)))
277 Attrs
= Attrs
.removeAttribute(Ctx
, AttrIdx
, Kind
);
278 Attrs
= Attrs
.addAttribute(Ctx
, AttrIdx
, Attr
);
282 llvm_unreachable("Expected enum or string attribute!");
285 ChangeStatus
AbstractAttribute::update(Attributor
&A
) {
286 ChangeStatus HasChanged
= ChangeStatus::UNCHANGED
;
287 if (getState().isAtFixpoint())
290 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
292 HasChanged
= updateImpl(A
);
294 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged
<< " " << *this
301 IRAttributeManifest::manifestAttrs(Attributor
&A
, IRPosition
&IRP
,
302 const ArrayRef
<Attribute
> &DeducedAttrs
) {
303 Function
*ScopeFn
= IRP
.getAssociatedFunction();
304 IRPosition::Kind PK
= IRP
.getPositionKind();
306 // In the following some generic code that will manifest attributes in
307 // DeducedAttrs if they improve the current IR. Due to the different
308 // annotation positions we use the underlying AttributeList interface.
312 case IRPosition::IRP_INVALID
:
313 case IRPosition::IRP_FLOAT
:
314 return ChangeStatus::UNCHANGED
;
315 case IRPosition::IRP_ARGUMENT
:
316 case IRPosition::IRP_FUNCTION
:
317 case IRPosition::IRP_RETURNED
:
318 Attrs
= ScopeFn
->getAttributes();
320 case IRPosition::IRP_CALL_SITE
:
321 case IRPosition::IRP_CALL_SITE_RETURNED
:
322 case IRPosition::IRP_CALL_SITE_ARGUMENT
:
323 Attrs
= ImmutableCallSite(&IRP
.getAnchorValue()).getAttributes();
327 ChangeStatus HasChanged
= ChangeStatus::UNCHANGED
;
328 LLVMContext
&Ctx
= IRP
.getAnchorValue().getContext();
329 for (const Attribute
&Attr
: DeducedAttrs
) {
330 if (!addIfNotExistent(Ctx
, Attr
, Attrs
, IRP
.getAttrIdx()))
333 HasChanged
= ChangeStatus::CHANGED
;
336 if (HasChanged
== ChangeStatus::UNCHANGED
)
340 case IRPosition::IRP_ARGUMENT
:
341 case IRPosition::IRP_FUNCTION
:
342 case IRPosition::IRP_RETURNED
:
343 ScopeFn
->setAttributes(Attrs
);
345 case IRPosition::IRP_CALL_SITE
:
346 case IRPosition::IRP_CALL_SITE_RETURNED
:
347 case IRPosition::IRP_CALL_SITE_ARGUMENT
:
348 CallSite(&IRP
.getAnchorValue()).setAttributes(Attrs
);
350 case IRPosition::IRP_INVALID
:
351 case IRPosition::IRP_FLOAT
:
358 const IRPosition
IRPosition::EmptyKey(255);
359 const IRPosition
IRPosition::TombstoneKey(256);
361 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition
&IRP
) {
362 IRPositions
.emplace_back(IRP
);
364 ImmutableCallSite
ICS(&IRP
.getAnchorValue());
365 switch (IRP
.getPositionKind()) {
366 case IRPosition::IRP_INVALID
:
367 case IRPosition::IRP_FLOAT
:
368 case IRPosition::IRP_FUNCTION
:
370 case IRPosition::IRP_ARGUMENT
:
371 case IRPosition::IRP_RETURNED
:
372 IRPositions
.emplace_back(
373 IRPosition::function(*IRP
.getAssociatedFunction()));
375 case IRPosition::IRP_CALL_SITE
:
376 assert(ICS
&& "Expected call site!");
377 // TODO: We need to look at the operand bundles similar to the redirection
379 if (!ICS
.hasOperandBundles())
380 if (const Function
*Callee
= ICS
.getCalledFunction())
381 IRPositions
.emplace_back(IRPosition::function(*Callee
));
383 case IRPosition::IRP_CALL_SITE_RETURNED
:
384 assert(ICS
&& "Expected call site!");
385 // TODO: We need to look at the operand bundles similar to the redirection
387 if (!ICS
.hasOperandBundles()) {
388 if (const Function
*Callee
= ICS
.getCalledFunction()) {
389 IRPositions
.emplace_back(IRPosition::returned(*Callee
));
390 IRPositions
.emplace_back(IRPosition::function(*Callee
));
393 IRPositions
.emplace_back(
394 IRPosition::callsite_function(cast
<CallBase
>(*ICS
.getInstruction())));
396 case IRPosition::IRP_CALL_SITE_ARGUMENT
: {
397 int ArgNo
= IRP
.getArgNo();
398 assert(ICS
&& ArgNo
>= 0 && "Expected call site!");
399 // TODO: We need to look at the operand bundles similar to the redirection
401 if (!ICS
.hasOperandBundles()) {
402 const Function
*Callee
= ICS
.getCalledFunction();
403 if (Callee
&& Callee
->arg_size() > unsigned(ArgNo
))
404 IRPositions
.emplace_back(IRPosition::argument(*Callee
->getArg(ArgNo
)));
406 IRPositions
.emplace_back(IRPosition::function(*Callee
));
408 IRPositions
.emplace_back(IRPosition::value(IRP
.getAssociatedValue()));
414 bool IRPosition::hasAttr(ArrayRef
<Attribute::AttrKind
> AKs
) const {
415 for (const IRPosition
&EquivIRP
: SubsumingPositionIterator(*this))
416 for (Attribute::AttrKind AK
: AKs
)
417 if (EquivIRP
.getAttr(AK
).getKindAsEnum() == AK
)
422 void IRPosition::getAttrs(ArrayRef
<Attribute::AttrKind
> AKs
,
423 SmallVectorImpl
<Attribute
> &Attrs
) const {
424 for (const IRPosition
&EquivIRP
: SubsumingPositionIterator(*this))
425 for (Attribute::AttrKind AK
: AKs
) {
426 const Attribute
&Attr
= EquivIRP
.getAttr(AK
);
427 if (Attr
.getKindAsEnum() == AK
)
428 Attrs
.push_back(Attr
);
432 void IRPosition::verify() {
433 switch (KindOrArgNo
) {
435 assert(KindOrArgNo
>= 0 && "Expected argument or call site argument!");
436 assert((isa
<CallBase
>(AnchorVal
) || isa
<Argument
>(AnchorVal
)) &&
437 "Expected call base or argument for positive attribute index!");
438 if (isa
<Argument
>(AnchorVal
)) {
439 assert(cast
<Argument
>(AnchorVal
)->getArgNo() == unsigned(getArgNo()) &&
440 "Argument number mismatch!");
441 assert(cast
<Argument
>(AnchorVal
) == &getAssociatedValue() &&
442 "Associated value mismatch!");
444 assert(cast
<CallBase
>(*AnchorVal
).arg_size() > unsigned(getArgNo()) &&
445 "Call site argument number mismatch!");
446 assert(cast
<CallBase
>(*AnchorVal
).getArgOperand(getArgNo()) ==
447 &getAssociatedValue() &&
448 "Associated value mismatch!");
452 assert(!AnchorVal
&& "Expected no value for an invalid position!");
455 assert((!isa
<CallBase
>(&getAssociatedValue()) &&
456 !isa
<Argument
>(&getAssociatedValue())) &&
457 "Expected specialized kind for call base and argument values!");
460 assert(isa
<Function
>(AnchorVal
) &&
461 "Expected function for a 'returned' position!");
462 assert(AnchorVal
== &getAssociatedValue() && "Associated value mismatch!");
464 case IRP_CALL_SITE_RETURNED
:
465 assert((isa
<CallBase
>(AnchorVal
)) &&
466 "Expected call base for 'call site returned' position!");
467 assert(AnchorVal
== &getAssociatedValue() && "Associated value mismatch!");
470 assert((isa
<CallBase
>(AnchorVal
)) &&
471 "Expected call base for 'call site function' position!");
472 assert(AnchorVal
== &getAssociatedValue() && "Associated value mismatch!");
475 assert(isa
<Function
>(AnchorVal
) &&
476 "Expected function for a 'function' position!");
477 assert(AnchorVal
== &getAssociatedValue() && "Associated value mismatch!");
482 /// Helper functions to clamp a state \p S of type \p StateType with the
483 /// information in \p R and indicate/return if \p S did change (as-in update is
484 /// required to be run again).
487 template <typename StateType
>
488 ChangeStatus
clampStateAndIndicateChange(StateType
&S
, const StateType
&R
);
491 ChangeStatus clampStateAndIndicateChange
<IntegerState
>(IntegerState
&S
,
492 const IntegerState
&R
) {
493 auto Assumed
= S
.getAssumed();
495 return Assumed
== S
.getAssumed() ? ChangeStatus::UNCHANGED
496 : ChangeStatus::CHANGED
;
500 ChangeStatus clampStateAndIndicateChange
<BooleanState
>(BooleanState
&S
,
501 const BooleanState
&R
) {
502 return clampStateAndIndicateChange
<IntegerState
>(S
, R
);
506 /// Clamp the information known for all returned values of a function
507 /// (identified by \p QueryingAA) into \p S.
508 template <typename AAType
, typename StateType
= typename
AAType::StateType
>
509 static void clampReturnedValueStates(Attributor
&A
, const AAType
&QueryingAA
,
511 LLVM_DEBUG(dbgs() << "[Attributor] Clamp return value states for "
512 << static_cast<const AbstractAttribute
&>(QueryingAA
)
513 << " into " << S
<< "\n");
515 assert((QueryingAA
.getIRPosition().getPositionKind() ==
516 IRPosition::IRP_RETURNED
||
517 QueryingAA
.getIRPosition().getPositionKind() ==
518 IRPosition::IRP_CALL_SITE_RETURNED
) &&
519 "Can only clamp returned value states for a function returned or call "
520 "site returned position!");
522 // Use an optional state as there might not be any return values and we want
523 // to join (IntegerState::operator&) the state of all there are.
524 Optional
<StateType
> T
;
526 // Callback for each possibly returned value.
527 auto CheckReturnValue
= [&](Value
&RV
) -> bool {
528 const IRPosition
&RVPos
= IRPosition::value(RV
);
529 const AAType
&AA
= A
.getAAFor
<AAType
>(QueryingAA
, RVPos
);
530 LLVM_DEBUG(dbgs() << "[Attributor] RV: " << RV
<< " AA: " << AA
.getAsStr()
531 << " @ " << RVPos
<< "\n");
532 const StateType
&AAS
= static_cast<const StateType
&>(AA
.getState());
537 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS
<< " RV State: " << T
539 return T
->isValidState();
542 if (!A
.checkForAllReturnedValues(CheckReturnValue
, QueryingAA
))
543 S
.indicatePessimisticFixpoint();
544 else if (T
.hasValue())
548 /// Helper class for generic deduction: return value -> returned position.
549 template <typename AAType
, typename Base
,
550 typename StateType
= typename
AAType::StateType
>
551 struct AAReturnedFromReturnedValues
: public Base
{
552 AAReturnedFromReturnedValues(const IRPosition
&IRP
) : Base(IRP
) {}
554 /// See AbstractAttribute::updateImpl(...).
555 ChangeStatus
updateImpl(Attributor
&A
) override
{
557 clampReturnedValueStates
<AAType
, StateType
>(A
, *this, S
);
558 // TODO: If we know we visited all returned values, thus no are assumed
559 // dead, we can take the known information from the state T.
560 return clampStateAndIndicateChange
<StateType
>(this->getState(), S
);
564 /// Clamp the information known at all call sites for a given argument
565 /// (identified by \p QueryingAA) into \p S.
566 template <typename AAType
, typename StateType
= typename
AAType::StateType
>
567 static void clampCallSiteArgumentStates(Attributor
&A
, const AAType
&QueryingAA
,
569 LLVM_DEBUG(dbgs() << "[Attributor] Clamp call site argument states for "
570 << static_cast<const AbstractAttribute
&>(QueryingAA
)
571 << " into " << S
<< "\n");
573 assert(QueryingAA
.getIRPosition().getPositionKind() ==
574 IRPosition::IRP_ARGUMENT
&&
575 "Can only clamp call site argument states for an argument position!");
577 // Use an optional state as there might not be any return values and we want
578 // to join (IntegerState::operator&) the state of all there are.
579 Optional
<StateType
> T
;
581 // The argument number which is also the call site argument number.
582 unsigned ArgNo
= QueryingAA
.getIRPosition().getArgNo();
584 auto CallSiteCheck
= [&](CallSite CS
) {
585 const IRPosition
&CSArgPos
= IRPosition::callsite_argument(CS
, ArgNo
);
586 const AAType
&AA
= A
.getAAFor
<AAType
>(QueryingAA
, CSArgPos
);
587 LLVM_DEBUG(dbgs() << "[Attributor] CS: " << *CS
.getInstruction()
588 << " AA: " << AA
.getAsStr() << " @" << CSArgPos
<< "\n");
589 const StateType
&AAS
= static_cast<const StateType
&>(AA
.getState());
594 LLVM_DEBUG(dbgs() << "[Attributor] AA State: " << AAS
<< " CSA State: " << T
596 return T
->isValidState();
599 if (!A
.checkForAllCallSites(CallSiteCheck
, QueryingAA
, true))
600 S
.indicatePessimisticFixpoint();
601 else if (T
.hasValue())
605 /// Helper class for generic deduction: call site argument -> argument position.
606 template <typename AAType
, typename Base
,
607 typename StateType
= typename
AAType::StateType
>
608 struct AAArgumentFromCallSiteArguments
: public Base
{
609 AAArgumentFromCallSiteArguments(const IRPosition
&IRP
) : Base(IRP
) {}
611 /// See AbstractAttribute::updateImpl(...).
612 ChangeStatus
updateImpl(Attributor
&A
) override
{
614 clampCallSiteArgumentStates
<AAType
, StateType
>(A
, *this, S
);
615 // TODO: If we know we visited all incoming values, thus no are assumed
616 // dead, we can take the known information from the state T.
617 return clampStateAndIndicateChange
<StateType
>(this->getState(), S
);
621 /// Helper class for generic replication: function returned -> cs returned.
622 template <typename AAType
, typename Base
>
623 struct AACallSiteReturnedFromReturned
: public Base
{
624 AACallSiteReturnedFromReturned(const IRPosition
&IRP
) : Base(IRP
) {}
626 /// See AbstractAttribute::updateImpl(...).
627 ChangeStatus
updateImpl(Attributor
&A
) override
{
628 assert(this->getIRPosition().getPositionKind() ==
629 IRPosition::IRP_CALL_SITE_RETURNED
&&
630 "Can only wrap function returned positions for call site returned "
632 auto &S
= this->getState();
634 const Function
*AssociatedFunction
=
635 this->getIRPosition().getAssociatedFunction();
636 if (!AssociatedFunction
)
637 return S
.indicatePessimisticFixpoint();
639 IRPosition FnPos
= IRPosition::returned(*AssociatedFunction
);
640 const AAType
&AA
= A
.getAAFor
<AAType
>(*this, FnPos
);
641 return clampStateAndIndicateChange(
642 S
, static_cast<const typename
AAType::StateType
&>(AA
.getState()));
646 /// -----------------------NoUnwind Function Attribute--------------------------
648 struct AANoUnwindImpl
: AANoUnwind
{
649 AANoUnwindImpl(const IRPosition
&IRP
) : AANoUnwind(IRP
) {}
651 const std::string
getAsStr() const override
{
652 return getAssumed() ? "nounwind" : "may-unwind";
655 /// See AbstractAttribute::updateImpl(...).
656 ChangeStatus
updateImpl(Attributor
&A
) override
{
658 (unsigned)Instruction::Invoke
, (unsigned)Instruction::CallBr
,
659 (unsigned)Instruction::Call
, (unsigned)Instruction::CleanupRet
,
660 (unsigned)Instruction::CatchSwitch
, (unsigned)Instruction::Resume
};
662 auto CheckForNoUnwind
= [&](Instruction
&I
) {
666 if (ImmutableCallSite ICS
= ImmutableCallSite(&I
)) {
667 const auto &NoUnwindAA
=
668 A
.getAAFor
<AANoUnwind
>(*this, IRPosition::callsite_function(ICS
));
669 return NoUnwindAA
.isAssumedNoUnwind();
674 if (!A
.checkForAllInstructions(CheckForNoUnwind
, *this, Opcodes
))
675 return indicatePessimisticFixpoint();
677 return ChangeStatus::UNCHANGED
;
681 struct AANoUnwindFunction final
: public AANoUnwindImpl
{
682 AANoUnwindFunction(const IRPosition
&IRP
) : AANoUnwindImpl(IRP
) {}
684 /// See AbstractAttribute::trackStatistics()
685 void trackStatistics() const override
{ STATS_DECLTRACK_FN_ATTR(nounwind
) }
688 /// NoUnwind attribute deduction for a call sites.
689 struct AANoUnwindCallSite final
: AANoUnwindImpl
{
690 AANoUnwindCallSite(const IRPosition
&IRP
) : AANoUnwindImpl(IRP
) {}
692 /// See AbstractAttribute::initialize(...).
693 void initialize(Attributor
&A
) override
{
694 AANoUnwindImpl::initialize(A
);
695 Function
*F
= getAssociatedFunction();
697 indicatePessimisticFixpoint();
700 /// See AbstractAttribute::updateImpl(...).
701 ChangeStatus
updateImpl(Attributor
&A
) override
{
702 // TODO: Once we have call site specific value information we can provide
703 // call site specific liveness information and then it makes
704 // sense to specialize attributes for call sites arguments instead of
705 // redirecting requests to the callee argument.
706 Function
*F
= getAssociatedFunction();
707 const IRPosition
&FnPos
= IRPosition::function(*F
);
708 auto &FnAA
= A
.getAAFor
<AANoUnwind
>(*this, FnPos
);
709 return clampStateAndIndicateChange(
711 static_cast<const AANoUnwind::StateType
&>(FnAA
.getState()));
714 /// See AbstractAttribute::trackStatistics()
715 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(nounwind
); }
718 /// --------------------- Function Return Values -------------------------------
720 /// "Attribute" that collects all potential returned values and the return
721 /// instructions that they arise from.
723 /// If there is a unique returned value R, the manifest method will:
724 /// - mark R with the "returned" attribute, if R is an argument.
725 class AAReturnedValuesImpl
: public AAReturnedValues
, public AbstractState
{
727 /// Mapping of values potentially returned by the associated function to the
728 /// return instructions that might return them.
729 MapVector
<Value
*, SmallSetVector
<ReturnInst
*, 4>> ReturnedValues
;
731 /// Mapping to remember the number of returned values for a call site such
732 /// that we can avoid updates if nothing changed.
733 DenseMap
<const CallBase
*, unsigned> NumReturnedValuesPerKnownAA
;
735 /// Set of unresolved calls returned by the associated function.
736 SmallSetVector
<CallBase
*, 4> UnresolvedCalls
;
741 bool IsFixed
= false;
742 bool IsValidState
= true;
746 AAReturnedValuesImpl(const IRPosition
&IRP
) : AAReturnedValues(IRP
) {}
748 /// See AbstractAttribute::initialize(...).
749 void initialize(Attributor
&A
) override
{
753 ReturnedValues
.clear();
755 Function
*F
= getAssociatedFunction();
757 indicatePessimisticFixpoint();
761 // The map from instruction opcodes to those instructions in the function.
762 auto &OpcodeInstMap
= A
.getInfoCache().getOpcodeInstMapForFunction(*F
);
764 // Look through all arguments, if one is marked as returned we are done.
765 for (Argument
&Arg
: F
->args()) {
766 if (Arg
.hasReturnedAttr()) {
767 auto &ReturnInstSet
= ReturnedValues
[&Arg
];
768 for (Instruction
*RI
: OpcodeInstMap
[Instruction::Ret
])
769 ReturnInstSet
.insert(cast
<ReturnInst
>(RI
));
771 indicateOptimisticFixpoint();
776 if (!F
->hasExactDefinition())
777 indicatePessimisticFixpoint();
780 /// See AbstractAttribute::manifest(...).
781 ChangeStatus
manifest(Attributor
&A
) override
;
783 /// See AbstractAttribute::getState(...).
784 AbstractState
&getState() override
{ return *this; }
786 /// See AbstractAttribute::getState(...).
787 const AbstractState
&getState() const override
{ return *this; }
789 /// See AbstractAttribute::updateImpl(Attributor &A).
790 ChangeStatus
updateImpl(Attributor
&A
) override
;
792 llvm::iterator_range
<iterator
> returned_values() override
{
793 return llvm::make_range(ReturnedValues
.begin(), ReturnedValues
.end());
796 llvm::iterator_range
<const_iterator
> returned_values() const override
{
797 return llvm::make_range(ReturnedValues
.begin(), ReturnedValues
.end());
800 const SmallSetVector
<CallBase
*, 4> &getUnresolvedCalls() const override
{
801 return UnresolvedCalls
;
804 /// Return the number of potential return values, -1 if unknown.
805 size_t getNumReturnValues() const override
{
806 return isValidState() ? ReturnedValues
.size() : -1;
809 /// Return an assumed unique return value if a single candidate is found. If
810 /// there cannot be one, return a nullptr. If it is not clear yet, return the
811 /// Optional::NoneType.
812 Optional
<Value
*> getAssumedUniqueReturnValue(Attributor
&A
) const;
814 /// See AbstractState::checkForAllReturnedValues(...).
815 bool checkForAllReturnedValuesAndReturnInsts(
816 const function_ref
<bool(Value
&, const SmallSetVector
<ReturnInst
*, 4> &)>
817 &Pred
) const override
;
819 /// Pretty print the attribute similar to the IR representation.
820 const std::string
getAsStr() const override
;
822 /// See AbstractState::isAtFixpoint().
823 bool isAtFixpoint() const override
{ return IsFixed
; }
825 /// See AbstractState::isValidState().
826 bool isValidState() const override
{ return IsValidState
; }
828 /// See AbstractState::indicateOptimisticFixpoint(...).
829 ChangeStatus
indicateOptimisticFixpoint() override
{
831 return ChangeStatus::UNCHANGED
;
834 ChangeStatus
indicatePessimisticFixpoint() override
{
836 IsValidState
= false;
837 return ChangeStatus::CHANGED
;
841 ChangeStatus
AAReturnedValuesImpl::manifest(Attributor
&A
) {
842 ChangeStatus Changed
= ChangeStatus::UNCHANGED
;
845 assert(isValidState());
846 STATS_DECLTRACK(KnownReturnValues
, FunctionReturn
,
847 "Number of function with known return values");
849 // Check if we have an assumed unique return value that we could manifest.
850 Optional
<Value
*> UniqueRV
= getAssumedUniqueReturnValue(A
);
852 if (!UniqueRV
.hasValue() || !UniqueRV
.getValue())
856 STATS_DECLTRACK(UniqueReturnValue
, FunctionReturn
,
857 "Number of function with unique return");
859 // Callback to replace the uses of CB with the constant C.
860 auto ReplaceCallSiteUsersWith
= [](CallBase
&CB
, Constant
&C
) {
861 if (CB
.getNumUses() == 0)
862 return ChangeStatus::UNCHANGED
;
863 CB
.replaceAllUsesWith(&C
);
864 return ChangeStatus::CHANGED
;
867 // If the assumed unique return value is an argument, annotate it.
868 if (auto *UniqueRVArg
= dyn_cast
<Argument
>(UniqueRV
.getValue())) {
869 getIRPosition() = IRPosition::argument(*UniqueRVArg
);
870 Changed
= IRAttribute::manifest(A
);
871 } else if (auto *RVC
= dyn_cast
<Constant
>(UniqueRV
.getValue())) {
872 // We can replace the returned value with the unique returned constant.
873 Value
&AnchorValue
= getAnchorValue();
874 if (Function
*F
= dyn_cast
<Function
>(&AnchorValue
)) {
875 for (const Use
&U
: F
->uses())
876 if (CallBase
*CB
= dyn_cast
<CallBase
>(U
.getUser()))
877 if (CB
->isCallee(&U
))
878 Changed
= ReplaceCallSiteUsersWith(*CB
, *RVC
) | Changed
;
880 assert(isa
<CallBase
>(AnchorValue
) &&
881 "Expcected a function or call base anchor!");
882 Changed
= ReplaceCallSiteUsersWith(cast
<CallBase
>(AnchorValue
), *RVC
);
884 if (Changed
== ChangeStatus::CHANGED
)
885 STATS_DECLTRACK(UniqueConstantReturnValue
, FunctionReturn
,
886 "Number of function returns replaced by constant return");
892 const std::string
AAReturnedValuesImpl::getAsStr() const {
893 return (isAtFixpoint() ? "returns(#" : "may-return(#") +
894 (isValidState() ? std::to_string(getNumReturnValues()) : "?") +
895 ")[#UC: " + std::to_string(UnresolvedCalls
.size()) + "]";
899 AAReturnedValuesImpl::getAssumedUniqueReturnValue(Attributor
&A
) const {
900 // If checkForAllReturnedValues provides a unique value, ignoring potential
901 // undef values that can also be present, it is assumed to be the actual
902 // return value and forwarded to the caller of this method. If there are
903 // multiple, a nullptr is returned indicating there cannot be a unique
905 Optional
<Value
*> UniqueRV
;
907 auto Pred
= [&](Value
&RV
) -> bool {
908 // If we found a second returned value and neither the current nor the saved
909 // one is an undef, there is no unique returned value. Undefs are special
910 // since we can pretend they have any value.
911 if (UniqueRV
.hasValue() && UniqueRV
!= &RV
&&
912 !(isa
<UndefValue
>(RV
) || isa
<UndefValue
>(UniqueRV
.getValue()))) {
917 // Do not overwrite a value with an undef.
918 if (!UniqueRV
.hasValue() || !isa
<UndefValue
>(RV
))
924 if (!A
.checkForAllReturnedValues(Pred
, *this))
930 bool AAReturnedValuesImpl::checkForAllReturnedValuesAndReturnInsts(
931 const function_ref
<bool(Value
&, const SmallSetVector
<ReturnInst
*, 4> &)>
936 // Check all returned values but ignore call sites as long as we have not
937 // encountered an overdefined one during an update.
938 for (auto &It
: ReturnedValues
) {
939 Value
*RV
= It
.first
;
941 CallBase
*CB
= dyn_cast
<CallBase
>(RV
);
942 if (CB
&& !UnresolvedCalls
.count(CB
))
945 if (!Pred(*RV
, It
.second
))
952 ChangeStatus
AAReturnedValuesImpl::updateImpl(Attributor
&A
) {
953 size_t NumUnresolvedCalls
= UnresolvedCalls
.size();
954 bool Changed
= false;
956 // State used in the value traversals starting in returned values.
958 // The map in which we collect return values -> return instrs.
959 decltype(ReturnedValues
) &RetValsMap
;
960 // The flag to indicate a change.
962 // The return instrs we come from.
963 SmallSetVector
<ReturnInst
*, 4> RetInsts
;
966 // Callback for a leaf value returned by the associated function.
967 auto VisitValueCB
= [](Value
&Val
, RVState
&RVS
, bool) -> bool {
968 auto Size
= RVS
.RetValsMap
[&Val
].size();
969 RVS
.RetValsMap
[&Val
].insert(RVS
.RetInsts
.begin(), RVS
.RetInsts
.end());
970 bool Inserted
= RVS
.RetValsMap
[&Val
].size() != Size
;
971 RVS
.Changed
|= Inserted
;
974 dbgs() << "[AAReturnedValues] 1 Add new returned value " << Val
975 << " => " << RVS
.RetInsts
.size() << "\n";
980 // Helper method to invoke the generic value traversal.
981 auto VisitReturnedValue
= [&](Value
&RV
, RVState
&RVS
) {
982 IRPosition RetValPos
= IRPosition::value(RV
);
983 return genericValueTraversal
<AAReturnedValues
, RVState
>(A
, RetValPos
, *this,
987 // Callback for all "return intructions" live in the associated function.
988 auto CheckReturnInst
= [this, &VisitReturnedValue
, &Changed
](Instruction
&I
) {
989 ReturnInst
&Ret
= cast
<ReturnInst
>(I
);
990 RVState
RVS({ReturnedValues
, Changed
, {}});
991 RVS
.RetInsts
.insert(&Ret
);
992 return VisitReturnedValue(*Ret
.getReturnValue(), RVS
);
995 // Start by discovering returned values from all live returned instructions in
996 // the associated function.
997 if (!A
.checkForAllInstructions(CheckReturnInst
, *this, {Instruction::Ret
}))
998 return indicatePessimisticFixpoint();
1000 // Once returned values "directly" present in the code are handled we try to
1001 // resolve returned calls.
1002 decltype(ReturnedValues
) NewRVsMap
;
1003 for (auto &It
: ReturnedValues
) {
1004 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Returned value: " << *It
.first
1005 << " by #" << It
.second
.size() << " RIs\n");
1006 CallBase
*CB
= dyn_cast
<CallBase
>(It
.first
);
1007 if (!CB
|| UnresolvedCalls
.count(CB
))
1010 if (!CB
->getCalledFunction()) {
1011 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1013 UnresolvedCalls
.insert(CB
);
1017 // TODO: use the function scope once we have call site AAReturnedValues.
1018 const auto &RetValAA
= A
.getAAFor
<AAReturnedValues
>(
1019 *this, IRPosition::function(*CB
->getCalledFunction()));
1020 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Found another AAReturnedValues: "
1021 << static_cast<const AbstractAttribute
&>(RetValAA
)
1024 // Skip dead ends, thus if we do not know anything about the returned
1025 // call we mark it as unresolved and it will stay that way.
1026 if (!RetValAA
.getState().isValidState()) {
1027 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Unresolved call: " << *CB
1029 UnresolvedCalls
.insert(CB
);
1033 // Do not try to learn partial information. If the callee has unresolved
1034 // return values we will treat the call as unresolved/opaque.
1035 auto &RetValAAUnresolvedCalls
= RetValAA
.getUnresolvedCalls();
1036 if (!RetValAAUnresolvedCalls
.empty()) {
1037 UnresolvedCalls
.insert(CB
);
1041 // Now check if we can track transitively returned values. If possible, thus
1042 // if all return value can be represented in the current scope, do so.
1043 bool Unresolved
= false;
1044 for (auto &RetValAAIt
: RetValAA
.returned_values()) {
1045 Value
*RetVal
= RetValAAIt
.first
;
1046 if (isa
<Argument
>(RetVal
) || isa
<CallBase
>(RetVal
) ||
1047 isa
<Constant
>(RetVal
))
1049 // Anything that did not fit in the above categories cannot be resolved,
1050 // mark the call as unresolved.
1051 LLVM_DEBUG(dbgs() << "[AAReturnedValues] transitively returned value "
1052 "cannot be translated: "
1053 << *RetVal
<< "\n");
1054 UnresolvedCalls
.insert(CB
);
1062 // Now track transitively returned values.
1063 unsigned &NumRetAA
= NumReturnedValuesPerKnownAA
[CB
];
1064 if (NumRetAA
== RetValAA
.getNumReturnValues()) {
1065 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Skip call as it has not "
1066 "changed since it was seen last\n");
1069 NumRetAA
= RetValAA
.getNumReturnValues();
1071 for (auto &RetValAAIt
: RetValAA
.returned_values()) {
1072 Value
*RetVal
= RetValAAIt
.first
;
1073 if (Argument
*Arg
= dyn_cast
<Argument
>(RetVal
)) {
1074 // Arguments are mapped to call site operands and we begin the traversal
1076 bool Unused
= false;
1077 RVState
RVS({NewRVsMap
, Unused
, RetValAAIt
.second
});
1078 VisitReturnedValue(*CB
->getArgOperand(Arg
->getArgNo()), RVS
);
1080 } else if (isa
<CallBase
>(RetVal
)) {
1081 // Call sites are resolved by the callee attribute over time, no need to
1082 // do anything for us.
1084 } else if (isa
<Constant
>(RetVal
)) {
1085 // Constants are valid everywhere, we can simply take them.
1086 NewRVsMap
[RetVal
].insert(It
.second
.begin(), It
.second
.end());
1092 // To avoid modifications to the ReturnedValues map while we iterate over it
1093 // we kept record of potential new entries in a copy map, NewRVsMap.
1094 for (auto &It
: NewRVsMap
) {
1095 assert(!It
.second
.empty() && "Entry does not add anything.");
1096 auto &ReturnInsts
= ReturnedValues
[It
.first
];
1097 for (ReturnInst
*RI
: It
.second
)
1098 if (ReturnInsts
.insert(RI
)) {
1099 LLVM_DEBUG(dbgs() << "[AAReturnedValues] Add new returned value "
1100 << *It
.first
<< " => " << *RI
<< "\n");
1105 Changed
|= (NumUnresolvedCalls
!= UnresolvedCalls
.size());
1106 return Changed
? ChangeStatus::CHANGED
: ChangeStatus::UNCHANGED
;
1109 struct AAReturnedValuesFunction final
: public AAReturnedValuesImpl
{
1110 AAReturnedValuesFunction(const IRPosition
&IRP
) : AAReturnedValuesImpl(IRP
) {}
1112 /// See AbstractAttribute::trackStatistics()
1113 void trackStatistics() const override
{ STATS_DECLTRACK_ARG_ATTR(returned
) }
1116 /// Returned values information for a call sites.
1117 struct AAReturnedValuesCallSite final
: AAReturnedValuesImpl
{
1118 AAReturnedValuesCallSite(const IRPosition
&IRP
) : AAReturnedValuesImpl(IRP
) {}
1120 /// See AbstractAttribute::initialize(...).
1121 void initialize(Attributor
&A
) override
{
1122 // TODO: Once we have call site specific value information we can provide
1123 // call site specific liveness information and then it makes
1124 // sense to specialize attributes for call sites instead of
1125 // redirecting requests to the callee.
1126 llvm_unreachable("Abstract attributes for returned values are not "
1127 "supported for call sites yet!");
1130 /// See AbstractAttribute::updateImpl(...).
1131 ChangeStatus
updateImpl(Attributor
&A
) override
{
1132 return indicatePessimisticFixpoint();
1135 /// See AbstractAttribute::trackStatistics()
1136 void trackStatistics() const override
{}
1139 /// ------------------------ NoSync Function Attribute -------------------------
1141 struct AANoSyncImpl
: AANoSync
{
1142 AANoSyncImpl(const IRPosition
&IRP
) : AANoSync(IRP
) {}
1144 const std::string
getAsStr() const override
{
1145 return getAssumed() ? "nosync" : "may-sync";
1148 /// See AbstractAttribute::updateImpl(...).
1149 ChangeStatus
updateImpl(Attributor
&A
) override
;
1151 /// Helper function used to determine whether an instruction is non-relaxed
1152 /// atomic. In other words, if an atomic instruction does not have unordered
1153 /// or monotonic ordering
1154 static bool isNonRelaxedAtomic(Instruction
*I
);
1156 /// Helper function used to determine whether an instruction is volatile.
1157 static bool isVolatile(Instruction
*I
);
1159 /// Helper function uset to check if intrinsic is volatile (memcpy, memmove,
1161 static bool isNoSyncIntrinsic(Instruction
*I
);
1164 bool AANoSyncImpl::isNonRelaxedAtomic(Instruction
*I
) {
1168 AtomicOrdering Ordering
;
1169 switch (I
->getOpcode()) {
1170 case Instruction::AtomicRMW
:
1171 Ordering
= cast
<AtomicRMWInst
>(I
)->getOrdering();
1173 case Instruction::Store
:
1174 Ordering
= cast
<StoreInst
>(I
)->getOrdering();
1176 case Instruction::Load
:
1177 Ordering
= cast
<LoadInst
>(I
)->getOrdering();
1179 case Instruction::Fence
: {
1180 auto *FI
= cast
<FenceInst
>(I
);
1181 if (FI
->getSyncScopeID() == SyncScope::SingleThread
)
1183 Ordering
= FI
->getOrdering();
1186 case Instruction::AtomicCmpXchg
: {
1187 AtomicOrdering Success
= cast
<AtomicCmpXchgInst
>(I
)->getSuccessOrdering();
1188 AtomicOrdering Failure
= cast
<AtomicCmpXchgInst
>(I
)->getFailureOrdering();
1189 // Only if both are relaxed, than it can be treated as relaxed.
1190 // Otherwise it is non-relaxed.
1191 if (Success
!= AtomicOrdering::Unordered
&&
1192 Success
!= AtomicOrdering::Monotonic
)
1194 if (Failure
!= AtomicOrdering::Unordered
&&
1195 Failure
!= AtomicOrdering::Monotonic
)
1201 "New atomic operations need to be known in the attributor.");
1205 if (Ordering
== AtomicOrdering::Unordered
||
1206 Ordering
== AtomicOrdering::Monotonic
)
1211 /// Checks if an intrinsic is nosync. Currently only checks mem* intrinsics.
1212 /// FIXME: We should ipmrove the handling of intrinsics.
1213 bool AANoSyncImpl::isNoSyncIntrinsic(Instruction
*I
) {
1214 if (auto *II
= dyn_cast
<IntrinsicInst
>(I
)) {
1215 switch (II
->getIntrinsicID()) {
1216 /// Element wise atomic memory intrinsics are can only be unordered,
1217 /// therefore nosync.
1218 case Intrinsic::memset_element_unordered_atomic
:
1219 case Intrinsic::memmove_element_unordered_atomic
:
1220 case Intrinsic::memcpy_element_unordered_atomic
:
1222 case Intrinsic::memset
:
1223 case Intrinsic::memmove
:
1224 case Intrinsic::memcpy
:
1225 if (!cast
<MemIntrinsic
>(II
)->isVolatile())
1235 bool AANoSyncImpl::isVolatile(Instruction
*I
) {
1236 assert(!ImmutableCallSite(I
) && !isa
<CallBase
>(I
) &&
1237 "Calls should not be checked here");
1239 switch (I
->getOpcode()) {
1240 case Instruction::AtomicRMW
:
1241 return cast
<AtomicRMWInst
>(I
)->isVolatile();
1242 case Instruction::Store
:
1243 return cast
<StoreInst
>(I
)->isVolatile();
1244 case Instruction::Load
:
1245 return cast
<LoadInst
>(I
)->isVolatile();
1246 case Instruction::AtomicCmpXchg
:
1247 return cast
<AtomicCmpXchgInst
>(I
)->isVolatile();
1253 ChangeStatus
AANoSyncImpl::updateImpl(Attributor
&A
) {
1255 auto CheckRWInstForNoSync
= [&](Instruction
&I
) {
1256 /// We are looking for volatile instructions or Non-Relaxed atomics.
1257 /// FIXME: We should ipmrove the handling of intrinsics.
1259 if (isa
<IntrinsicInst
>(&I
) && isNoSyncIntrinsic(&I
))
1262 if (ImmutableCallSite ICS
= ImmutableCallSite(&I
)) {
1263 if (ICS
.hasFnAttr(Attribute::NoSync
))
1266 const auto &NoSyncAA
=
1267 A
.getAAFor
<AANoSync
>(*this, IRPosition::callsite_function(ICS
));
1268 if (NoSyncAA
.isAssumedNoSync())
1273 if (!isVolatile(&I
) && !isNonRelaxedAtomic(&I
))
1279 auto CheckForNoSync
= [&](Instruction
&I
) {
1280 // At this point we handled all read/write effects and they are all
1281 // nosync, so they can be skipped.
1282 if (I
.mayReadOrWriteMemory())
1285 // non-convergent and readnone imply nosync.
1286 return !ImmutableCallSite(&I
).isConvergent();
1289 if (!A
.checkForAllReadWriteInstructions(CheckRWInstForNoSync
, *this) ||
1290 !A
.checkForAllCallLikeInstructions(CheckForNoSync
, *this))
1291 return indicatePessimisticFixpoint();
1293 return ChangeStatus::UNCHANGED
;
1296 struct AANoSyncFunction final
: public AANoSyncImpl
{
1297 AANoSyncFunction(const IRPosition
&IRP
) : AANoSyncImpl(IRP
) {}
1299 /// See AbstractAttribute::trackStatistics()
1300 void trackStatistics() const override
{ STATS_DECLTRACK_FN_ATTR(nosync
) }
1303 /// NoSync attribute deduction for a call sites.
1304 struct AANoSyncCallSite final
: AANoSyncImpl
{
1305 AANoSyncCallSite(const IRPosition
&IRP
) : AANoSyncImpl(IRP
) {}
1307 /// See AbstractAttribute::initialize(...).
1308 void initialize(Attributor
&A
) override
{
1309 AANoSyncImpl::initialize(A
);
1310 Function
*F
= getAssociatedFunction();
1312 indicatePessimisticFixpoint();
1315 /// See AbstractAttribute::updateImpl(...).
1316 ChangeStatus
updateImpl(Attributor
&A
) override
{
1317 // TODO: Once we have call site specific value information we can provide
1318 // call site specific liveness information and then it makes
1319 // sense to specialize attributes for call sites arguments instead of
1320 // redirecting requests to the callee argument.
1321 Function
*F
= getAssociatedFunction();
1322 const IRPosition
&FnPos
= IRPosition::function(*F
);
1323 auto &FnAA
= A
.getAAFor
<AANoSync
>(*this, FnPos
);
1324 return clampStateAndIndicateChange(
1325 getState(), static_cast<const AANoSync::StateType
&>(FnAA
.getState()));
1328 /// See AbstractAttribute::trackStatistics()
1329 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(nosync
); }
1332 /// ------------------------ No-Free Attributes ----------------------------
1334 struct AANoFreeImpl
: public AANoFree
{
1335 AANoFreeImpl(const IRPosition
&IRP
) : AANoFree(IRP
) {}
1337 /// See AbstractAttribute::updateImpl(...).
1338 ChangeStatus
updateImpl(Attributor
&A
) override
{
1339 auto CheckForNoFree
= [&](Instruction
&I
) {
1340 ImmutableCallSite
ICS(&I
);
1341 if (ICS
.hasFnAttr(Attribute::NoFree
))
1344 const auto &NoFreeAA
=
1345 A
.getAAFor
<AANoFree
>(*this, IRPosition::callsite_function(ICS
));
1346 return NoFreeAA
.isAssumedNoFree();
1349 if (!A
.checkForAllCallLikeInstructions(CheckForNoFree
, *this))
1350 return indicatePessimisticFixpoint();
1351 return ChangeStatus::UNCHANGED
;
1354 /// See AbstractAttribute::getAsStr().
1355 const std::string
getAsStr() const override
{
1356 return getAssumed() ? "nofree" : "may-free";
1360 struct AANoFreeFunction final
: public AANoFreeImpl
{
1361 AANoFreeFunction(const IRPosition
&IRP
) : AANoFreeImpl(IRP
) {}
1363 /// See AbstractAttribute::trackStatistics()
1364 void trackStatistics() const override
{ STATS_DECLTRACK_FN_ATTR(nofree
) }
1367 /// NoFree attribute deduction for a call sites.
1368 struct AANoFreeCallSite final
: AANoFreeImpl
{
1369 AANoFreeCallSite(const IRPosition
&IRP
) : AANoFreeImpl(IRP
) {}
1371 /// See AbstractAttribute::initialize(...).
1372 void initialize(Attributor
&A
) override
{
1373 AANoFreeImpl::initialize(A
);
1374 Function
*F
= getAssociatedFunction();
1376 indicatePessimisticFixpoint();
1379 /// See AbstractAttribute::updateImpl(...).
1380 ChangeStatus
updateImpl(Attributor
&A
) override
{
1381 // TODO: Once we have call site specific value information we can provide
1382 // call site specific liveness information and then it makes
1383 // sense to specialize attributes for call sites arguments instead of
1384 // redirecting requests to the callee argument.
1385 Function
*F
= getAssociatedFunction();
1386 const IRPosition
&FnPos
= IRPosition::function(*F
);
1387 auto &FnAA
= A
.getAAFor
<AANoFree
>(*this, FnPos
);
1388 return clampStateAndIndicateChange(
1389 getState(), static_cast<const AANoFree::StateType
&>(FnAA
.getState()));
1392 /// See AbstractAttribute::trackStatistics()
1393 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(nofree
); }
1396 /// ------------------------ NonNull Argument Attribute ------------------------
1397 struct AANonNullImpl
: AANonNull
{
1398 AANonNullImpl(const IRPosition
&IRP
) : AANonNull(IRP
) {}
1400 /// See AbstractAttribute::initialize(...).
1401 void initialize(Attributor
&A
) override
{
1402 if (hasAttr({Attribute::NonNull
, Attribute::Dereferenceable
}))
1403 indicateOptimisticFixpoint();
1405 AANonNull::initialize(A
);
1408 /// See AbstractAttribute::getAsStr().
1409 const std::string
getAsStr() const override
{
1410 return getAssumed() ? "nonnull" : "may-null";
1414 /// NonNull attribute for a floating value.
1415 struct AANonNullFloating
: AANonNullImpl
{
1416 AANonNullFloating(const IRPosition
&IRP
) : AANonNullImpl(IRP
) {}
1418 /// See AbstractAttribute::initialize(...).
1419 void initialize(Attributor
&A
) override
{
1420 AANonNullImpl::initialize(A
);
1425 const IRPosition
&IRP
= getIRPosition();
1426 const Value
&V
= IRP
.getAssociatedValue();
1427 const DataLayout
&DL
= A
.getDataLayout();
1429 // TODO: This context sensitive query should be removed once we can do
1430 // context sensitive queries in the genericValueTraversal below.
1431 if (isKnownNonZero(&V
, DL
, 0, /* TODO: AC */ nullptr, IRP
.getCtxI(),
1432 /* TODO: DT */ nullptr))
1433 indicateOptimisticFixpoint();
1436 /// See AbstractAttribute::updateImpl(...).
1437 ChangeStatus
updateImpl(Attributor
&A
) override
{
1438 const DataLayout
&DL
= A
.getDataLayout();
1440 auto VisitValueCB
= [&](Value
&V
, AAAlign::StateType
&T
,
1441 bool Stripped
) -> bool {
1442 const auto &AA
= A
.getAAFor
<AANonNull
>(*this, IRPosition::value(V
));
1443 if (!Stripped
&& this == &AA
) {
1444 if (!isKnownNonZero(&V
, DL
, 0, /* TODO: AC */ nullptr,
1445 /* TODO: CtxI */ nullptr,
1446 /* TODO: DT */ nullptr))
1447 T
.indicatePessimisticFixpoint();
1449 // Use abstract attribute information.
1450 const AANonNull::StateType
&NS
=
1451 static_cast<const AANonNull::StateType
&>(AA
.getState());
1454 return T
.isValidState();
1458 if (!genericValueTraversal
<AANonNull
, StateType
>(A
, getIRPosition(), *this,
1460 return indicatePessimisticFixpoint();
1462 return clampStateAndIndicateChange(getState(), T
);
1465 /// See AbstractAttribute::trackStatistics()
1466 void trackStatistics() const override
{ STATS_DECLTRACK_FNRET_ATTR(nonnull
) }
1469 /// NonNull attribute for function return value.
1470 struct AANonNullReturned final
1471 : AAReturnedFromReturnedValues
<AANonNull
, AANonNullImpl
> {
1472 AANonNullReturned(const IRPosition
&IRP
)
1473 : AAReturnedFromReturnedValues
<AANonNull
, AANonNullImpl
>(IRP
) {}
1475 /// See AbstractAttribute::trackStatistics()
1476 void trackStatistics() const override
{ STATS_DECLTRACK_FNRET_ATTR(nonnull
) }
1479 /// NonNull attribute for function argument.
1480 struct AANonNullArgument final
1481 : AAArgumentFromCallSiteArguments
<AANonNull
, AANonNullImpl
> {
1482 AANonNullArgument(const IRPosition
&IRP
)
1483 : AAArgumentFromCallSiteArguments
<AANonNull
, AANonNullImpl
>(IRP
) {}
1485 /// See AbstractAttribute::trackStatistics()
1486 void trackStatistics() const override
{ STATS_DECLTRACK_ARG_ATTR(nonnull
) }
1489 struct AANonNullCallSiteArgument final
: AANonNullFloating
{
1490 AANonNullCallSiteArgument(const IRPosition
&IRP
) : AANonNullFloating(IRP
) {}
1492 /// See AbstractAttribute::trackStatistics()
1493 void trackStatistics() const override
{ STATS_DECLTRACK_CSARG_ATTR(nonnull
) }
1496 /// NonNull attribute for a call site return position.
1497 struct AANonNullCallSiteReturned final
1498 : AACallSiteReturnedFromReturned
<AANonNull
, AANonNullImpl
> {
1499 AANonNullCallSiteReturned(const IRPosition
&IRP
)
1500 : AACallSiteReturnedFromReturned
<AANonNull
, AANonNullImpl
>(IRP
) {}
1502 /// See AbstractAttribute::trackStatistics()
1503 void trackStatistics() const override
{ STATS_DECLTRACK_CSRET_ATTR(nonnull
) }
1506 /// ------------------------ No-Recurse Attributes ----------------------------
1508 struct AANoRecurseImpl
: public AANoRecurse
{
1509 AANoRecurseImpl(const IRPosition
&IRP
) : AANoRecurse(IRP
) {}
1511 /// See AbstractAttribute::getAsStr()
1512 const std::string
getAsStr() const override
{
1513 return getAssumed() ? "norecurse" : "may-recurse";
1517 struct AANoRecurseFunction final
: AANoRecurseImpl
{
1518 AANoRecurseFunction(const IRPosition
&IRP
) : AANoRecurseImpl(IRP
) {}
1520 /// See AbstractAttribute::updateImpl(...).
1521 ChangeStatus
updateImpl(Attributor
&A
) override
{
1522 // TODO: Implement this.
1523 return indicatePessimisticFixpoint();
1526 void trackStatistics() const override
{ STATS_DECLTRACK_FN_ATTR(norecurse
) }
1529 /// NoRecurse attribute deduction for a call sites.
1530 struct AANoRecurseCallSite final
: AANoRecurseImpl
{
1531 AANoRecurseCallSite(const IRPosition
&IRP
) : AANoRecurseImpl(IRP
) {}
1533 /// See AbstractAttribute::initialize(...).
1534 void initialize(Attributor
&A
) override
{
1535 AANoRecurseImpl::initialize(A
);
1536 Function
*F
= getAssociatedFunction();
1538 indicatePessimisticFixpoint();
1541 /// See AbstractAttribute::updateImpl(...).
1542 ChangeStatus
updateImpl(Attributor
&A
) override
{
1543 // TODO: Once we have call site specific value information we can provide
1544 // call site specific liveness information and then it makes
1545 // sense to specialize attributes for call sites arguments instead of
1546 // redirecting requests to the callee argument.
1547 Function
*F
= getAssociatedFunction();
1548 const IRPosition
&FnPos
= IRPosition::function(*F
);
1549 auto &FnAA
= A
.getAAFor
<AANoRecurse
>(*this, FnPos
);
1550 return clampStateAndIndicateChange(
1552 static_cast<const AANoRecurse::StateType
&>(FnAA
.getState()));
1555 /// See AbstractAttribute::trackStatistics()
1556 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(norecurse
); }
1559 /// ------------------------ Will-Return Attributes ----------------------------
1561 // Helper function that checks whether a function has any cycle.
1562 // TODO: Replace with more efficent code
1563 static bool containsCycle(Function
&F
) {
1564 SmallPtrSet
<BasicBlock
*, 32> Visited
;
1566 // Traverse BB by dfs and check whether successor is already visited.
1567 for (BasicBlock
*BB
: depth_first(&F
)) {
1569 for (auto *SuccBB
: successors(BB
)) {
1570 if (Visited
.count(SuccBB
))
1577 // Helper function that checks the function have a loop which might become an
1579 // FIXME: Any cycle is regarded as endless loop for now.
1580 // We have to allow some patterns.
1581 static bool containsPossiblyEndlessLoop(Function
*F
) {
1582 return !F
|| !F
->hasExactDefinition() || containsCycle(*F
);
1585 struct AAWillReturnImpl
: public AAWillReturn
{
1586 AAWillReturnImpl(const IRPosition
&IRP
) : AAWillReturn(IRP
) {}
1588 /// See AbstractAttribute::initialize(...).
1589 void initialize(Attributor
&A
) override
{
1590 AAWillReturn::initialize(A
);
1592 Function
*F
= getAssociatedFunction();
1593 if (containsPossiblyEndlessLoop(F
))
1594 indicatePessimisticFixpoint();
1597 /// See AbstractAttribute::updateImpl(...).
1598 ChangeStatus
updateImpl(Attributor
&A
) override
{
1599 auto CheckForWillReturn
= [&](Instruction
&I
) {
1600 IRPosition IPos
= IRPosition::callsite_function(ImmutableCallSite(&I
));
1601 const auto &WillReturnAA
= A
.getAAFor
<AAWillReturn
>(*this, IPos
);
1602 if (WillReturnAA
.isKnownWillReturn())
1604 if (!WillReturnAA
.isAssumedWillReturn())
1606 const auto &NoRecurseAA
= A
.getAAFor
<AANoRecurse
>(*this, IPos
);
1607 return NoRecurseAA
.isAssumedNoRecurse();
1610 if (!A
.checkForAllCallLikeInstructions(CheckForWillReturn
, *this))
1611 return indicatePessimisticFixpoint();
1613 return ChangeStatus::UNCHANGED
;
1616 /// See AbstractAttribute::getAsStr()
1617 const std::string
getAsStr() const override
{
1618 return getAssumed() ? "willreturn" : "may-noreturn";
1622 struct AAWillReturnFunction final
: AAWillReturnImpl
{
1623 AAWillReturnFunction(const IRPosition
&IRP
) : AAWillReturnImpl(IRP
) {}
1625 /// See AbstractAttribute::trackStatistics()
1626 void trackStatistics() const override
{ STATS_DECLTRACK_FN_ATTR(willreturn
) }
1629 /// WillReturn attribute deduction for a call sites.
1630 struct AAWillReturnCallSite final
: AAWillReturnImpl
{
1631 AAWillReturnCallSite(const IRPosition
&IRP
) : AAWillReturnImpl(IRP
) {}
1633 /// See AbstractAttribute::initialize(...).
1634 void initialize(Attributor
&A
) override
{
1635 AAWillReturnImpl::initialize(A
);
1636 Function
*F
= getAssociatedFunction();
1638 indicatePessimisticFixpoint();
1641 /// See AbstractAttribute::updateImpl(...).
1642 ChangeStatus
updateImpl(Attributor
&A
) override
{
1643 // TODO: Once we have call site specific value information we can provide
1644 // call site specific liveness information and then it makes
1645 // sense to specialize attributes for call sites arguments instead of
1646 // redirecting requests to the callee argument.
1647 Function
*F
= getAssociatedFunction();
1648 const IRPosition
&FnPos
= IRPosition::function(*F
);
1649 auto &FnAA
= A
.getAAFor
<AAWillReturn
>(*this, FnPos
);
1650 return clampStateAndIndicateChange(
1652 static_cast<const AAWillReturn::StateType
&>(FnAA
.getState()));
1655 /// See AbstractAttribute::trackStatistics()
1656 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(willreturn
); }
1659 /// ------------------------ NoAlias Argument Attribute ------------------------
1661 struct AANoAliasImpl
: AANoAlias
{
1662 AANoAliasImpl(const IRPosition
&IRP
) : AANoAlias(IRP
) {}
1664 const std::string
getAsStr() const override
{
1665 return getAssumed() ? "noalias" : "may-alias";
1669 /// NoAlias attribute for a floating value.
1670 struct AANoAliasFloating final
: AANoAliasImpl
{
1671 AANoAliasFloating(const IRPosition
&IRP
) : AANoAliasImpl(IRP
) {}
1673 /// See AbstractAttribute::initialize(...).
1674 void initialize(Attributor
&A
) override
{
1675 // TODO: It isn't sound to initialize as the same with `AANoAliasImpl`
1676 // because `noalias` may not be valid in the current position.
1679 /// See AbstractAttribute::updateImpl(...).
1680 ChangeStatus
updateImpl(Attributor
&A
) override
{
1681 // TODO: Implement this.
1682 return indicatePessimisticFixpoint();
1685 /// See AbstractAttribute::trackStatistics()
1686 void trackStatistics() const override
{
1687 STATS_DECLTRACK_FLOATING_ATTR(noalias
)
1691 /// NoAlias attribute for an argument.
1692 struct AANoAliasArgument final
1693 : AAArgumentFromCallSiteArguments
<AANoAlias
, AANoAliasImpl
> {
1694 AANoAliasArgument(const IRPosition
&IRP
)
1695 : AAArgumentFromCallSiteArguments
<AANoAlias
, AANoAliasImpl
>(IRP
) {}
1697 /// See AbstractAttribute::trackStatistics()
1698 void trackStatistics() const override
{ STATS_DECLTRACK_ARG_ATTR(noalias
) }
1701 struct AANoAliasCallSiteArgument final
: AANoAliasImpl
{
1702 AANoAliasCallSiteArgument(const IRPosition
&IRP
) : AANoAliasImpl(IRP
) {}
1704 /// See AbstractAttribute::initialize(...).
1705 void initialize(Attributor
&A
) override
{
1706 // See callsite argument attribute and callee argument attribute.
1707 ImmutableCallSite
ICS(&getAnchorValue());
1708 if (ICS
.paramHasAttr(getArgNo(), Attribute::NoAlias
))
1709 indicateOptimisticFixpoint();
1712 /// See AbstractAttribute::updateImpl(...).
1713 ChangeStatus
updateImpl(Attributor
&A
) override
{
1714 // TODO: Implement this.
1715 return indicatePessimisticFixpoint();
1718 /// See AbstractAttribute::trackStatistics()
1719 void trackStatistics() const override
{ STATS_DECLTRACK_CSARG_ATTR(noalias
) }
1722 /// NoAlias attribute for function return value.
1723 struct AANoAliasReturned final
: AANoAliasImpl
{
1724 AANoAliasReturned(const IRPosition
&IRP
) : AANoAliasImpl(IRP
) {}
1726 /// See AbstractAttribute::updateImpl(...).
1727 virtual ChangeStatus
updateImpl(Attributor
&A
) override
{
1729 auto CheckReturnValue
= [&](Value
&RV
) -> bool {
1730 if (Constant
*C
= dyn_cast
<Constant
>(&RV
))
1731 if (C
->isNullValue() || isa
<UndefValue
>(C
))
1734 /// For now, we can only deduce noalias if we have call sites.
1735 /// FIXME: add more support.
1736 ImmutableCallSite
ICS(&RV
);
1740 const IRPosition
&RVPos
= IRPosition::value(RV
);
1741 const auto &NoAliasAA
= A
.getAAFor
<AANoAlias
>(*this, RVPos
);
1742 if (!NoAliasAA
.isAssumedNoAlias())
1745 const auto &NoCaptureAA
= A
.getAAFor
<AANoCapture
>(*this, RVPos
);
1746 return NoCaptureAA
.isAssumedNoCaptureMaybeReturned();
1749 if (!A
.checkForAllReturnedValues(CheckReturnValue
, *this))
1750 return indicatePessimisticFixpoint();
1752 return ChangeStatus::UNCHANGED
;
1755 /// See AbstractAttribute::trackStatistics()
1756 void trackStatistics() const override
{ STATS_DECLTRACK_FNRET_ATTR(noalias
) }
1759 /// NoAlias attribute deduction for a call site return value.
1760 struct AANoAliasCallSiteReturned final
: AANoAliasImpl
{
1761 AANoAliasCallSiteReturned(const IRPosition
&IRP
) : AANoAliasImpl(IRP
) {}
1763 /// See AbstractAttribute::initialize(...).
1764 void initialize(Attributor
&A
) override
{
1765 AANoAliasImpl::initialize(A
);
1766 Function
*F
= getAssociatedFunction();
1768 indicatePessimisticFixpoint();
1771 /// See AbstractAttribute::updateImpl(...).
1772 ChangeStatus
updateImpl(Attributor
&A
) override
{
1773 // TODO: Once we have call site specific value information we can provide
1774 // call site specific liveness information and then it makes
1775 // sense to specialize attributes for call sites arguments instead of
1776 // redirecting requests to the callee argument.
1777 Function
*F
= getAssociatedFunction();
1778 const IRPosition
&FnPos
= IRPosition::returned(*F
);
1779 auto &FnAA
= A
.getAAFor
<AANoAlias
>(*this, FnPos
);
1780 return clampStateAndIndicateChange(
1781 getState(), static_cast<const AANoAlias::StateType
&>(FnAA
.getState()));
1784 /// See AbstractAttribute::trackStatistics()
1785 void trackStatistics() const override
{ STATS_DECLTRACK_CSRET_ATTR(noalias
); }
1788 /// -------------------AAIsDead Function Attribute-----------------------
1790 struct AAIsDeadImpl
: public AAIsDead
{
1791 AAIsDeadImpl(const IRPosition
&IRP
) : AAIsDead(IRP
) {}
1793 void initialize(Attributor
&A
) override
{
1794 const Function
*F
= getAssociatedFunction();
1795 if (F
&& !F
->isDeclaration())
1796 exploreFromEntry(A
, F
);
1799 void exploreFromEntry(Attributor
&A
, const Function
*F
) {
1800 ToBeExploredPaths
.insert(&(F
->getEntryBlock().front()));
1801 assumeLive(A
, F
->getEntryBlock());
1803 for (size_t i
= 0; i
< ToBeExploredPaths
.size(); ++i
)
1804 if (const Instruction
*NextNoReturnI
=
1805 findNextNoReturn(A
, ToBeExploredPaths
[i
]))
1806 NoReturnCalls
.insert(NextNoReturnI
);
1809 /// Find the next assumed noreturn instruction in the block of \p I starting
1810 /// from, thus including, \p I.
1812 /// The caller is responsible to monitor the ToBeExploredPaths set as new
1813 /// instructions discovered in other basic block will be placed in there.
1815 /// \returns The next assumed noreturn instructions in the block of \p I
1816 /// starting from, thus including, \p I.
1817 const Instruction
*findNextNoReturn(Attributor
&A
, const Instruction
*I
);
1819 /// See AbstractAttribute::getAsStr().
1820 const std::string
getAsStr() const override
{
1821 return "Live[#BB " + std::to_string(AssumedLiveBlocks
.size()) + "/" +
1822 std::to_string(getAssociatedFunction()->size()) + "][#NRI " +
1823 std::to_string(NoReturnCalls
.size()) + "]";
1826 /// See AbstractAttribute::manifest(...).
1827 ChangeStatus
manifest(Attributor
&A
) override
{
1828 assert(getState().isValidState() &&
1829 "Attempted to manifest an invalid state!");
1831 ChangeStatus HasChanged
= ChangeStatus::UNCHANGED
;
1832 Function
&F
= *getAssociatedFunction();
1834 if (AssumedLiveBlocks
.empty()) {
1835 A
.deleteAfterManifest(F
);
1836 return ChangeStatus::CHANGED
;
1839 // Flag to determine if we can change an invoke to a call assuming the
1840 // callee is nounwind. This is not possible if the personality of the
1841 // function allows to catch asynchronous exceptions.
1842 bool Invoke2CallAllowed
= !mayCatchAsynchronousExceptions(F
);
1844 for (const Instruction
*NRC
: NoReturnCalls
) {
1845 Instruction
*I
= const_cast<Instruction
*>(NRC
);
1846 BasicBlock
*BB
= I
->getParent();
1847 Instruction
*SplitPos
= I
->getNextNode();
1848 // TODO: mark stuff before unreachable instructions as dead.
1849 if (isa_and_nonnull
<UnreachableInst
>(SplitPos
))
1852 if (auto *II
= dyn_cast
<InvokeInst
>(I
)) {
1853 // If we keep the invoke the split position is at the beginning of the
1854 // normal desitination block (it invokes a noreturn function after all).
1855 BasicBlock
*NormalDestBB
= II
->getNormalDest();
1856 SplitPos
= &NormalDestBB
->front();
1858 /// Invoke is replaced with a call and unreachable is placed after it if
1859 /// the callee is nounwind and noreturn. Otherwise, we keep the invoke
1860 /// and only place an unreachable in the normal successor.
1861 if (Invoke2CallAllowed
) {
1862 if (II
->getCalledFunction()) {
1863 const IRPosition
&IPos
= IRPosition::callsite_function(*II
);
1864 const auto &AANoUnw
= A
.getAAFor
<AANoUnwind
>(*this, IPos
);
1865 if (AANoUnw
.isAssumedNoUnwind()) {
1867 << "[AAIsDead] Replace invoke with call inst\n");
1868 // We do not need an invoke (II) but instead want a call followed
1869 // by an unreachable. However, we do not remove II as other
1870 // abstract attributes might have it cached as part of their
1871 // results. Given that we modify the CFG anyway, we simply keep II
1872 // around but in a new dead block. To avoid II being live through
1873 // a different edge we have to ensure the block we place it in is
1874 // only reached from the current block of II and then not reached
1875 // at all when we insert the unreachable.
1876 SplitBlockPredecessors(NormalDestBB
, {BB
}, ".i2c");
1877 CallInst
*CI
= createCallMatchingInvoke(II
);
1878 CI
->insertBefore(II
);
1880 II
->replaceAllUsesWith(CI
);
1881 SplitPos
= CI
->getNextNode();
1886 if (SplitPos
== &NormalDestBB
->front()) {
1887 // If this is an invoke of a noreturn function the edge to the normal
1888 // destination block is dead but not necessarily the block itself.
1889 // TODO: We need to move to an edge based system during deduction and
1891 assert(!NormalDestBB
->isLandingPad() &&
1892 "Expected the normal destination not to be a landingpad!");
1893 BasicBlock
*SplitBB
=
1894 SplitBlockPredecessors(NormalDestBB
, {BB
}, ".dead");
1895 // The split block is live even if it contains only an unreachable
1896 // instruction at the end.
1897 assumeLive(A
, *SplitBB
);
1898 SplitPos
= SplitBB
->getTerminator();
1902 BB
= SplitPos
->getParent();
1903 SplitBlock(BB
, SplitPos
);
1904 changeToUnreachable(BB
->getTerminator(), /* UseLLVMTrap */ false);
1905 HasChanged
= ChangeStatus::CHANGED
;
1908 for (BasicBlock
&BB
: F
)
1909 if (!AssumedLiveBlocks
.count(&BB
))
1910 A
.deleteAfterManifest(BB
);
1915 /// See AbstractAttribute::updateImpl(...).
1916 ChangeStatus
updateImpl(Attributor
&A
) override
;
1918 /// See AAIsDead::isAssumedDead(BasicBlock *).
1919 bool isAssumedDead(const BasicBlock
*BB
) const override
{
1920 assert(BB
->getParent() == getAssociatedFunction() &&
1921 "BB must be in the same anchor scope function.");
1925 return !AssumedLiveBlocks
.count(BB
);
1928 /// See AAIsDead::isKnownDead(BasicBlock *).
1929 bool isKnownDead(const BasicBlock
*BB
) const override
{
1930 return getKnown() && isAssumedDead(BB
);
1933 /// See AAIsDead::isAssumed(Instruction *I).
1934 bool isAssumedDead(const Instruction
*I
) const override
{
1935 assert(I
->getParent()->getParent() == getAssociatedFunction() &&
1936 "Instruction must be in the same anchor scope function.");
1941 // If it is not in AssumedLiveBlocks then it for sure dead.
1942 // Otherwise, it can still be after noreturn call in a live block.
1943 if (!AssumedLiveBlocks
.count(I
->getParent()))
1946 // If it is not after a noreturn call, than it is live.
1947 return isAfterNoReturn(I
);
1950 /// See AAIsDead::isKnownDead(Instruction *I).
1951 bool isKnownDead(const Instruction
*I
) const override
{
1952 return getKnown() && isAssumedDead(I
);
1955 /// Check if instruction is after noreturn call, in other words, assumed dead.
1956 bool isAfterNoReturn(const Instruction
*I
) const;
1958 /// Determine if \p F might catch asynchronous exceptions.
1959 static bool mayCatchAsynchronousExceptions(const Function
&F
) {
1960 return F
.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F
);
1963 /// Assume \p BB is (partially) live now and indicate to the Attributor \p A
1964 /// that internal function called from \p BB should now be looked at.
1965 void assumeLive(Attributor
&A
, const BasicBlock
&BB
) {
1966 if (!AssumedLiveBlocks
.insert(&BB
).second
)
1969 // We assume that all of BB is (probably) live now and if there are calls to
1970 // internal functions we will assume that those are now live as well. This
1971 // is a performance optimization for blocks with calls to a lot of internal
1972 // functions. It can however cause dead functions to be treated as live.
1973 for (const Instruction
&I
: BB
)
1974 if (ImmutableCallSite ICS
= ImmutableCallSite(&I
))
1975 if (const Function
*F
= ICS
.getCalledFunction())
1976 if (F
->hasInternalLinkage())
1977 A
.markLiveInternalFunction(*F
);
1980 /// Collection of to be explored paths.
1981 SmallSetVector
<const Instruction
*, 8> ToBeExploredPaths
;
1983 /// Collection of all assumed live BasicBlocks.
1984 DenseSet
<const BasicBlock
*> AssumedLiveBlocks
;
1986 /// Collection of calls with noreturn attribute, assumed or knwon.
1987 SmallSetVector
<const Instruction
*, 4> NoReturnCalls
;
1990 struct AAIsDeadFunction final
: public AAIsDeadImpl
{
1991 AAIsDeadFunction(const IRPosition
&IRP
) : AAIsDeadImpl(IRP
) {}
1993 /// See AbstractAttribute::trackStatistics()
1994 void trackStatistics() const override
{
1995 STATS_DECL(PartiallyDeadBlocks
, Function
,
1996 "Number of basic blocks classified as partially dead");
1997 BUILD_STAT_NAME(PartiallyDeadBlocks
, Function
) += NoReturnCalls
.size();
2001 bool AAIsDeadImpl::isAfterNoReturn(const Instruction
*I
) const {
2002 const Instruction
*PrevI
= I
->getPrevNode();
2004 if (NoReturnCalls
.count(PrevI
))
2006 PrevI
= PrevI
->getPrevNode();
2011 const Instruction
*AAIsDeadImpl::findNextNoReturn(Attributor
&A
,
2012 const Instruction
*I
) {
2013 const BasicBlock
*BB
= I
->getParent();
2014 const Function
&F
= *BB
->getParent();
2016 // Flag to determine if we can change an invoke to a call assuming the callee
2017 // is nounwind. This is not possible if the personality of the function allows
2018 // to catch asynchronous exceptions.
2019 bool Invoke2CallAllowed
= !mayCatchAsynchronousExceptions(F
);
2021 // TODO: We should have a function that determines if an "edge" is dead.
2022 // Edges could be from an instruction to the next or from a terminator
2023 // to the successor. For now, we need to special case the unwind block
2024 // of InvokeInst below.
2027 ImmutableCallSite
ICS(I
);
2030 const IRPosition
&IPos
= IRPosition::callsite_function(ICS
);
2031 // Regarless of the no-return property of an invoke instruction we only
2032 // learn that the regular successor is not reachable through this
2033 // instruction but the unwind block might still be.
2034 if (auto *Invoke
= dyn_cast
<InvokeInst
>(I
)) {
2035 // Use nounwind to justify the unwind block is dead as well.
2036 const auto &AANoUnw
= A
.getAAFor
<AANoUnwind
>(*this, IPos
);
2037 if (!Invoke2CallAllowed
|| !AANoUnw
.isAssumedNoUnwind()) {
2038 assumeLive(A
, *Invoke
->getUnwindDest());
2039 ToBeExploredPaths
.insert(&Invoke
->getUnwindDest()->front());
2043 const auto &NoReturnAA
= A
.getAAFor
<AANoReturn
>(*this, IPos
);
2044 if (NoReturnAA
.isAssumedNoReturn())
2048 I
= I
->getNextNode();
2051 // get new paths (reachable blocks).
2052 for (const BasicBlock
*SuccBB
: successors(BB
)) {
2053 assumeLive(A
, *SuccBB
);
2054 ToBeExploredPaths
.insert(&SuccBB
->front());
2057 // No noreturn instruction found.
2061 ChangeStatus
AAIsDeadImpl::updateImpl(Attributor
&A
) {
2062 ChangeStatus Status
= ChangeStatus::UNCHANGED
;
2064 // Temporary collection to iterate over existing noreturn instructions. This
2065 // will alow easier modification of NoReturnCalls collection
2066 SmallVector
<const Instruction
*, 8> NoReturnChanged
;
2068 for (const Instruction
*I
: NoReturnCalls
)
2069 NoReturnChanged
.push_back(I
);
2071 for (const Instruction
*I
: NoReturnChanged
) {
2072 size_t Size
= ToBeExploredPaths
.size();
2074 const Instruction
*NextNoReturnI
= findNextNoReturn(A
, I
);
2075 if (NextNoReturnI
!= I
) {
2076 Status
= ChangeStatus::CHANGED
;
2077 NoReturnCalls
.remove(I
);
2079 NoReturnCalls
.insert(NextNoReturnI
);
2082 // Explore new paths.
2083 while (Size
!= ToBeExploredPaths
.size()) {
2084 Status
= ChangeStatus::CHANGED
;
2085 if (const Instruction
*NextNoReturnI
=
2086 findNextNoReturn(A
, ToBeExploredPaths
[Size
++]))
2087 NoReturnCalls
.insert(NextNoReturnI
);
2091 LLVM_DEBUG(dbgs() << "[AAIsDead] AssumedLiveBlocks: "
2092 << AssumedLiveBlocks
.size() << " Total number of blocks: "
2093 << getAssociatedFunction()->size() << "\n");
2095 // If we know everything is live there is no need to query for liveness.
2096 if (NoReturnCalls
.empty() &&
2097 getAssociatedFunction()->size() == AssumedLiveBlocks
.size()) {
2098 // Indicating a pessimistic fixpoint will cause the state to be "invalid"
2099 // which will cause the Attributor to not return the AAIsDead on request,
2100 // which will prevent us from querying isAssumedDead().
2101 indicatePessimisticFixpoint();
2102 assert(!isValidState() && "Expected an invalid state!");
2103 Status
= ChangeStatus::CHANGED
;
2109 /// Liveness information for a call sites.
2110 struct AAIsDeadCallSite final
: AAIsDeadImpl
{
2111 AAIsDeadCallSite(const IRPosition
&IRP
) : AAIsDeadImpl(IRP
) {}
2113 /// See AbstractAttribute::initialize(...).
2114 void initialize(Attributor
&A
) override
{
2115 // TODO: Once we have call site specific value information we can provide
2116 // call site specific liveness information and then it makes
2117 // sense to specialize attributes for call sites instead of
2118 // redirecting requests to the callee.
2119 llvm_unreachable("Abstract attributes for liveness are not "
2120 "supported for call sites yet!");
2123 /// See AbstractAttribute::updateImpl(...).
2124 ChangeStatus
updateImpl(Attributor
&A
) override
{
2125 return indicatePessimisticFixpoint();
2128 /// See AbstractAttribute::trackStatistics()
2129 void trackStatistics() const override
{}
2132 /// -------------------- Dereferenceable Argument Attribute --------------------
2135 ChangeStatus clampStateAndIndicateChange
<DerefState
>(DerefState
&S
,
2136 const DerefState
&R
) {
2137 ChangeStatus CS0
= clampStateAndIndicateChange
<IntegerState
>(
2138 S
.DerefBytesState
, R
.DerefBytesState
);
2140 clampStateAndIndicateChange
<IntegerState
>(S
.GlobalState
, R
.GlobalState
);
2144 struct AADereferenceableImpl
: AADereferenceable
{
2145 AADereferenceableImpl(const IRPosition
&IRP
) : AADereferenceable(IRP
) {}
2146 using StateType
= DerefState
;
2148 void initialize(Attributor
&A
) override
{
2149 SmallVector
<Attribute
, 4> Attrs
;
2150 getAttrs({Attribute::Dereferenceable
, Attribute::DereferenceableOrNull
},
2152 for (const Attribute
&Attr
: Attrs
)
2153 takeKnownDerefBytesMaximum(Attr
.getValueAsInt());
2155 NonNullAA
= &A
.getAAFor
<AANonNull
>(*this, getIRPosition());
2157 const IRPosition
&IRP
= this->getIRPosition();
2158 bool IsFnInterface
= IRP
.isFnInterfaceKind();
2159 const Function
*FnScope
= IRP
.getAnchorScope();
2160 if (IsFnInterface
&& (!FnScope
|| !FnScope
->hasExactDefinition()))
2161 indicatePessimisticFixpoint();
2164 /// See AbstractAttribute::getState()
2166 StateType
&getState() override
{ return *this; }
2167 const StateType
&getState() const override
{ return *this; }
2170 void getDeducedAttributes(LLVMContext
&Ctx
,
2171 SmallVectorImpl
<Attribute
> &Attrs
) const override
{
2172 // TODO: Add *_globally support
2173 if (isAssumedNonNull())
2174 Attrs
.emplace_back(Attribute::getWithDereferenceableBytes(
2175 Ctx
, getAssumedDereferenceableBytes()));
2177 Attrs
.emplace_back(Attribute::getWithDereferenceableOrNullBytes(
2178 Ctx
, getAssumedDereferenceableBytes()));
2181 /// See AbstractAttribute::getAsStr().
2182 const std::string
getAsStr() const override
{
2183 if (!getAssumedDereferenceableBytes())
2184 return "unknown-dereferenceable";
2185 return std::string("dereferenceable") +
2186 (isAssumedNonNull() ? "" : "_or_null") +
2187 (isAssumedGlobal() ? "_globally" : "") + "<" +
2188 std::to_string(getKnownDereferenceableBytes()) + "-" +
2189 std::to_string(getAssumedDereferenceableBytes()) + ">";
2193 /// Dereferenceable attribute for a floating value.
2194 struct AADereferenceableFloating
: AADereferenceableImpl
{
2195 AADereferenceableFloating(const IRPosition
&IRP
)
2196 : AADereferenceableImpl(IRP
) {}
2198 /// See AbstractAttribute::updateImpl(...).
2199 ChangeStatus
updateImpl(Attributor
&A
) override
{
2200 const DataLayout
&DL
= A
.getDataLayout();
2202 auto VisitValueCB
= [&](Value
&V
, DerefState
&T
, bool Stripped
) -> bool {
2204 DL
.getIndexSizeInBits(V
.getType()->getPointerAddressSpace());
2205 APInt
Offset(IdxWidth
, 0);
2207 V
.stripAndAccumulateInBoundsConstantOffsets(DL
, Offset
);
2210 A
.getAAFor
<AADereferenceable
>(*this, IRPosition::value(*Base
));
2211 int64_t DerefBytes
= 0;
2212 if (!Stripped
&& this == &AA
) {
2213 // Use IR information if we did not strip anything.
2214 // TODO: track globally.
2216 DerefBytes
= Base
->getPointerDereferenceableBytes(DL
, CanBeNull
);
2217 T
.GlobalState
.indicatePessimisticFixpoint();
2219 const DerefState
&DS
= static_cast<const DerefState
&>(AA
.getState());
2220 DerefBytes
= DS
.DerefBytesState
.getAssumed();
2221 T
.GlobalState
&= DS
.GlobalState
;
2224 // For now we do not try to "increase" dereferenceability due to negative
2225 // indices as we first have to come up with code to deal with loops and
2226 // for overflows of the dereferenceable bytes.
2227 int64_t OffsetSExt
= Offset
.getSExtValue();
2231 T
.takeAssumedDerefBytesMinimum(
2232 std::max(int64_t(0), DerefBytes
- OffsetSExt
));
2236 // If nothing was stripped IR information is all we got.
2237 T
.takeKnownDerefBytesMaximum(
2238 std::max(int64_t(0), DerefBytes
- OffsetSExt
));
2239 T
.indicatePessimisticFixpoint();
2240 } else if (OffsetSExt
> 0) {
2241 // If something was stripped but there is circular reasoning we look
2242 // for the offset. If it is positive we basically decrease the
2243 // dereferenceable bytes in a circluar loop now, which will simply
2244 // drive them down to the known value in a very slow way which we
2246 T
.indicatePessimisticFixpoint();
2250 return T
.isValidState();
2254 if (!genericValueTraversal
<AADereferenceable
, DerefState
>(
2255 A
, getIRPosition(), *this, T
, VisitValueCB
))
2256 return indicatePessimisticFixpoint();
2258 return clampStateAndIndicateChange(getState(), T
);
2261 /// See AbstractAttribute::trackStatistics()
2262 void trackStatistics() const override
{
2263 STATS_DECLTRACK_FLOATING_ATTR(dereferenceable
)
2267 /// Dereferenceable attribute for a return value.
2268 struct AADereferenceableReturned final
2269 : AAReturnedFromReturnedValues
<AADereferenceable
, AADereferenceableImpl
,
2271 AADereferenceableReturned(const IRPosition
&IRP
)
2272 : AAReturnedFromReturnedValues
<AADereferenceable
, AADereferenceableImpl
,
2275 /// See AbstractAttribute::trackStatistics()
2276 void trackStatistics() const override
{
2277 STATS_DECLTRACK_FNRET_ATTR(dereferenceable
)
2281 /// Dereferenceable attribute for an argument
2282 struct AADereferenceableArgument final
2283 : AAArgumentFromCallSiteArguments
<AADereferenceable
, AADereferenceableImpl
,
2285 AADereferenceableArgument(const IRPosition
&IRP
)
2286 : AAArgumentFromCallSiteArguments
<AADereferenceable
,
2287 AADereferenceableImpl
, DerefState
>(
2290 /// See AbstractAttribute::trackStatistics()
2291 void trackStatistics() const override
{
2292 STATS_DECLTRACK_ARG_ATTR(dereferenceable
)
2296 /// Dereferenceable attribute for a call site argument.
2297 struct AADereferenceableCallSiteArgument final
: AADereferenceableFloating
{
2298 AADereferenceableCallSiteArgument(const IRPosition
&IRP
)
2299 : AADereferenceableFloating(IRP
) {}
2301 /// See AbstractAttribute::trackStatistics()
2302 void trackStatistics() const override
{
2303 STATS_DECLTRACK_CSARG_ATTR(dereferenceable
)
2307 /// Dereferenceable attribute deduction for a call site return value.
2308 struct AADereferenceableCallSiteReturned final
: AADereferenceableImpl
{
2309 AADereferenceableCallSiteReturned(const IRPosition
&IRP
)
2310 : AADereferenceableImpl(IRP
) {}
2312 /// See AbstractAttribute::initialize(...).
2313 void initialize(Attributor
&A
) override
{
2314 AADereferenceableImpl::initialize(A
);
2315 Function
*F
= getAssociatedFunction();
2317 indicatePessimisticFixpoint();
2320 /// See AbstractAttribute::updateImpl(...).
2321 ChangeStatus
updateImpl(Attributor
&A
) override
{
2322 // TODO: Once we have call site specific value information we can provide
2323 // call site specific liveness information and then it makes
2324 // sense to specialize attributes for call sites arguments instead of
2325 // redirecting requests to the callee argument.
2326 Function
*F
= getAssociatedFunction();
2327 const IRPosition
&FnPos
= IRPosition::returned(*F
);
2328 auto &FnAA
= A
.getAAFor
<AADereferenceable
>(*this, FnPos
);
2329 return clampStateAndIndicateChange(
2330 getState(), static_cast<const DerefState
&>(FnAA
.getState()));
2333 /// See AbstractAttribute::trackStatistics()
2334 void trackStatistics() const override
{
2335 STATS_DECLTRACK_CS_ATTR(dereferenceable
);
2339 // ------------------------ Align Argument Attribute ------------------------
2341 struct AAAlignImpl
: AAAlign
{
2342 AAAlignImpl(const IRPosition
&IRP
) : AAAlign(IRP
) {}
2344 // Max alignemnt value allowed in IR
2345 static const unsigned MAX_ALIGN
= 1U << 29;
2347 /// See AbstractAttribute::initialize(...).
2348 void initialize(Attributor
&A
) override
{
2349 takeAssumedMinimum(MAX_ALIGN
);
2351 SmallVector
<Attribute
, 4> Attrs
;
2352 getAttrs({Attribute::Alignment
}, Attrs
);
2353 for (const Attribute
&Attr
: Attrs
)
2354 takeKnownMaximum(Attr
.getValueAsInt());
2356 if (getIRPosition().isFnInterfaceKind() &&
2357 (!getAssociatedFunction() ||
2358 !getAssociatedFunction()->hasExactDefinition()))
2359 indicatePessimisticFixpoint();
2362 /// See AbstractAttribute::manifest(...).
2363 ChangeStatus
manifest(Attributor
&A
) override
{
2364 ChangeStatus Changed
= ChangeStatus::UNCHANGED
;
2366 // Check for users that allow alignment annotations.
2367 Value
&AnchorVal
= getIRPosition().getAnchorValue();
2368 for (const Use
&U
: AnchorVal
.uses()) {
2369 if (auto *SI
= dyn_cast
<StoreInst
>(U
.getUser())) {
2370 if (SI
->getPointerOperand() == &AnchorVal
)
2371 if (SI
->getAlignment() < getAssumedAlign()) {
2372 STATS_DECLTRACK(AAAlign
, Store
,
2373 "Number of times alignemnt added to a store");
2374 SI
->setAlignment(getAssumedAlign());
2375 Changed
= ChangeStatus::CHANGED
;
2377 } else if (auto *LI
= dyn_cast
<LoadInst
>(U
.getUser())) {
2378 if (LI
->getPointerOperand() == &AnchorVal
)
2379 if (LI
->getAlignment() < getAssumedAlign()) {
2380 LI
->setAlignment(getAssumedAlign());
2381 STATS_DECLTRACK(AAAlign
, Load
,
2382 "Number of times alignemnt added to a load");
2383 Changed
= ChangeStatus::CHANGED
;
2388 return AAAlign::manifest(A
) | Changed
;
2391 // TODO: Provide a helper to determine the implied ABI alignment and check in
2392 // the existing manifest method and a new one for AAAlignImpl that value
2393 // to avoid making the alignment explicit if it did not improve.
2395 /// See AbstractAttribute::getDeducedAttributes
2397 getDeducedAttributes(LLVMContext
&Ctx
,
2398 SmallVectorImpl
<Attribute
> &Attrs
) const override
{
2399 if (getAssumedAlign() > 1)
2400 Attrs
.emplace_back(Attribute::getWithAlignment(Ctx
, getAssumedAlign()));
2403 /// See AbstractAttribute::getAsStr().
2404 const std::string
getAsStr() const override
{
2405 return getAssumedAlign() ? ("align<" + std::to_string(getKnownAlign()) +
2406 "-" + std::to_string(getAssumedAlign()) + ">")
2411 /// Align attribute for a floating value.
2412 struct AAAlignFloating
: AAAlignImpl
{
2413 AAAlignFloating(const IRPosition
&IRP
) : AAAlignImpl(IRP
) {}
2415 /// See AbstractAttribute::updateImpl(...).
2416 ChangeStatus
updateImpl(Attributor
&A
) override
{
2417 const DataLayout
&DL
= A
.getDataLayout();
2419 auto VisitValueCB
= [&](Value
&V
, AAAlign::StateType
&T
,
2420 bool Stripped
) -> bool {
2421 const auto &AA
= A
.getAAFor
<AAAlign
>(*this, IRPosition::value(V
));
2422 if (!Stripped
&& this == &AA
) {
2423 // Use only IR information if we did not strip anything.
2424 T
.takeKnownMaximum(V
.getPointerAlignment(DL
));
2425 T
.indicatePessimisticFixpoint();
2427 // Use abstract attribute information.
2428 const AAAlign::StateType
&DS
=
2429 static_cast<const AAAlign::StateType
&>(AA
.getState());
2432 return T
.isValidState();
2436 if (!genericValueTraversal
<AAAlign
, StateType
>(A
, getIRPosition(), *this, T
,
2438 return indicatePessimisticFixpoint();
2440 // TODO: If we know we visited all incoming values, thus no are assumed
2441 // dead, we can take the known information from the state T.
2442 return clampStateAndIndicateChange(getState(), T
);
2445 /// See AbstractAttribute::trackStatistics()
2446 void trackStatistics() const override
{ STATS_DECLTRACK_FLOATING_ATTR(align
) }
2449 /// Align attribute for function return value.
2450 struct AAAlignReturned final
2451 : AAReturnedFromReturnedValues
<AAAlign
, AAAlignImpl
> {
2452 AAAlignReturned(const IRPosition
&IRP
)
2453 : AAReturnedFromReturnedValues
<AAAlign
, AAAlignImpl
>(IRP
) {}
2455 /// See AbstractAttribute::trackStatistics()
2456 void trackStatistics() const override
{ STATS_DECLTRACK_FNRET_ATTR(aligned
) }
2459 /// Align attribute for function argument.
2460 struct AAAlignArgument final
2461 : AAArgumentFromCallSiteArguments
<AAAlign
, AAAlignImpl
> {
2462 AAAlignArgument(const IRPosition
&IRP
)
2463 : AAArgumentFromCallSiteArguments
<AAAlign
, AAAlignImpl
>(IRP
) {}
2465 /// See AbstractAttribute::trackStatistics()
2466 void trackStatistics() const override
{ STATS_DECLTRACK_ARG_ATTR(aligned
) }
2469 struct AAAlignCallSiteArgument final
: AAAlignFloating
{
2470 AAAlignCallSiteArgument(const IRPosition
&IRP
) : AAAlignFloating(IRP
) {}
2472 /// See AbstractAttribute::manifest(...).
2473 ChangeStatus
manifest(Attributor
&A
) override
{
2474 return AAAlignImpl::manifest(A
);
2477 /// See AbstractAttribute::trackStatistics()
2478 void trackStatistics() const override
{ STATS_DECLTRACK_CSARG_ATTR(aligned
) }
2481 /// Align attribute deduction for a call site return value.
2482 struct AAAlignCallSiteReturned final
: AAAlignImpl
{
2483 AAAlignCallSiteReturned(const IRPosition
&IRP
) : AAAlignImpl(IRP
) {}
2485 /// See AbstractAttribute::initialize(...).
2486 void initialize(Attributor
&A
) override
{
2487 AAAlignImpl::initialize(A
);
2488 Function
*F
= getAssociatedFunction();
2490 indicatePessimisticFixpoint();
2493 /// See AbstractAttribute::updateImpl(...).
2494 ChangeStatus
updateImpl(Attributor
&A
) override
{
2495 // TODO: Once we have call site specific value information we can provide
2496 // call site specific liveness information and then it makes
2497 // sense to specialize attributes for call sites arguments instead of
2498 // redirecting requests to the callee argument.
2499 Function
*F
= getAssociatedFunction();
2500 const IRPosition
&FnPos
= IRPosition::returned(*F
);
2501 auto &FnAA
= A
.getAAFor
<AAAlign
>(*this, FnPos
);
2502 return clampStateAndIndicateChange(
2503 getState(), static_cast<const AAAlign::StateType
&>(FnAA
.getState()));
2506 /// See AbstractAttribute::trackStatistics()
2507 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(align
); }
2510 /// ------------------ Function No-Return Attribute ----------------------------
2511 struct AANoReturnImpl
: public AANoReturn
{
2512 AANoReturnImpl(const IRPosition
&IRP
) : AANoReturn(IRP
) {}
2514 /// See AbstractAttribute::getAsStr().
2515 const std::string
getAsStr() const override
{
2516 return getAssumed() ? "noreturn" : "may-return";
2519 /// See AbstractAttribute::updateImpl(Attributor &A).
2520 virtual ChangeStatus
updateImpl(Attributor
&A
) override
{
2521 auto CheckForNoReturn
= [](Instruction
&) { return false; };
2522 if (!A
.checkForAllInstructions(CheckForNoReturn
, *this,
2523 {(unsigned)Instruction::Ret
}))
2524 return indicatePessimisticFixpoint();
2525 return ChangeStatus::UNCHANGED
;
2529 struct AANoReturnFunction final
: AANoReturnImpl
{
2530 AANoReturnFunction(const IRPosition
&IRP
) : AANoReturnImpl(IRP
) {}
2532 /// See AbstractAttribute::trackStatistics()
2533 void trackStatistics() const override
{ STATS_DECLTRACK_FN_ATTR(noreturn
) }
2536 /// NoReturn attribute deduction for a call sites.
2537 struct AANoReturnCallSite final
: AANoReturnImpl
{
2538 AANoReturnCallSite(const IRPosition
&IRP
) : AANoReturnImpl(IRP
) {}
2540 /// See AbstractAttribute::initialize(...).
2541 void initialize(Attributor
&A
) override
{
2542 AANoReturnImpl::initialize(A
);
2543 Function
*F
= getAssociatedFunction();
2545 indicatePessimisticFixpoint();
2548 /// See AbstractAttribute::updateImpl(...).
2549 ChangeStatus
updateImpl(Attributor
&A
) override
{
2550 // TODO: Once we have call site specific value information we can provide
2551 // call site specific liveness information and then it makes
2552 // sense to specialize attributes for call sites arguments instead of
2553 // redirecting requests to the callee argument.
2554 Function
*F
= getAssociatedFunction();
2555 const IRPosition
&FnPos
= IRPosition::function(*F
);
2556 auto &FnAA
= A
.getAAFor
<AANoReturn
>(*this, FnPos
);
2557 return clampStateAndIndicateChange(
2559 static_cast<const AANoReturn::StateType
&>(FnAA
.getState()));
2562 /// See AbstractAttribute::trackStatistics()
2563 void trackStatistics() const override
{ STATS_DECLTRACK_CS_ATTR(noreturn
); }
2566 /// ----------------------- Variable Capturing ---------------------------------
2568 /// A class to hold the state of for no-capture attributes.
2569 struct AANoCaptureImpl
: public AANoCapture
{
2570 AANoCaptureImpl(const IRPosition
&IRP
) : AANoCapture(IRP
) {}
2572 /// See AbstractAttribute::initialize(...).
2573 void initialize(Attributor
&A
) override
{
2574 AANoCapture::initialize(A
);
2576 const IRPosition
&IRP
= getIRPosition();
2578 getArgNo() >= 0 ? IRP
.getAssociatedFunction() : IRP
.getAnchorScope();
2580 // Check what state the associated function can actually capture.
2582 determineFunctionCaptureCapabilities(*F
, *this);
2584 indicatePessimisticFixpoint();
2587 /// See AbstractAttribute::updateImpl(...).
2588 ChangeStatus
updateImpl(Attributor
&A
) override
;
2590 /// see AbstractAttribute::isAssumedNoCaptureMaybeReturned(...).
2592 getDeducedAttributes(LLVMContext
&Ctx
,
2593 SmallVectorImpl
<Attribute
> &Attrs
) const override
{
2594 if (!isAssumedNoCaptureMaybeReturned())
2597 if (isAssumedNoCapture())
2598 Attrs
.emplace_back(Attribute::get(Ctx
, Attribute::NoCapture
));
2599 else if (ManifestInternal
)
2600 Attrs
.emplace_back(Attribute::get(Ctx
, "no-capture-maybe-returned"));
2603 /// Set the NOT_CAPTURED_IN_MEM and NOT_CAPTURED_IN_RET bits in \p Known
2604 /// depending on the ability of the function associated with \p IRP to capture
2605 /// state in memory and through "returning/throwing", respectively.
2606 static void determineFunctionCaptureCapabilities(const Function
&F
,
2607 IntegerState
&State
) {
2608 // TODO: Once we have memory behavior attributes we should use them here.
2610 // If we know we cannot communicate or write to memory, we do not care about
2612 if (F
.onlyReadsMemory() && F
.doesNotThrow() &&
2613 F
.getReturnType()->isVoidTy()) {
2614 State
.addKnownBits(NO_CAPTURE
);
2618 // A function cannot capture state in memory if it only reads memory, it can
2619 // however return/throw state and the state might be influenced by the
2620 // pointer value, e.g., loading from a returned pointer might reveal a bit.
2621 if (F
.onlyReadsMemory())
2622 State
.addKnownBits(NOT_CAPTURED_IN_MEM
);
2624 // A function cannot communicate state back if it does not through
2625 // exceptions and doesn not return values.
2626 if (F
.doesNotThrow() && F
.getReturnType()->isVoidTy())
2627 State
.addKnownBits(NOT_CAPTURED_IN_RET
);
2630 /// See AbstractState::getAsStr().
2631 const std::string
getAsStr() const override
{
2632 if (isKnownNoCapture())
2633 return "known not-captured";
2634 if (isAssumedNoCapture())
2635 return "assumed not-captured";
2636 if (isKnownNoCaptureMaybeReturned())
2637 return "known not-captured-maybe-returned";
2638 if (isAssumedNoCaptureMaybeReturned())
2639 return "assumed not-captured-maybe-returned";
2640 return "assumed-captured";
2644 /// Attributor-aware capture tracker.
2645 struct AACaptureUseTracker final
: public CaptureTracker
{
2647 /// Create a capture tracker that can lookup in-flight abstract attributes
2648 /// through the Attributor \p A.
2650 /// If a use leads to a potential capture, \p CapturedInMemory is set and the
2651 /// search is stopped. If a use leads to a return instruction,
2652 /// \p CommunicatedBack is set to true and \p CapturedInMemory is not changed.
2653 /// If a use leads to a ptr2int which may capture the value,
2654 /// \p CapturedInInteger is set. If a use is found that is currently assumed
2655 /// "no-capture-maybe-returned", the user is added to the \p PotentialCopies
2656 /// set. All values in \p PotentialCopies are later tracked as well. For every
2657 /// explored use we decrement \p RemainingUsesToExplore. Once it reaches 0,
2658 /// the search is stopped with \p CapturedInMemory and \p CapturedInInteger
2659 /// conservatively set to true.
2660 AACaptureUseTracker(Attributor
&A
, AANoCapture
&NoCaptureAA
,
2661 const AAIsDead
&IsDeadAA
, IntegerState
&State
,
2662 SmallVectorImpl
<const Value
*> &PotentialCopies
,
2663 unsigned &RemainingUsesToExplore
)
2664 : A(A
), NoCaptureAA(NoCaptureAA
), IsDeadAA(IsDeadAA
), State(State
),
2665 PotentialCopies(PotentialCopies
),
2666 RemainingUsesToExplore(RemainingUsesToExplore
) {}
2668 /// Determine if \p V maybe captured. *Also updates the state!*
2669 bool valueMayBeCaptured(const Value
*V
) {
2670 if (V
->getType()->isPointerTy()) {
2671 PointerMayBeCaptured(V
, this);
2673 State
.indicatePessimisticFixpoint();
2675 return State
.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED
);
2678 /// See CaptureTracker::tooManyUses().
2679 void tooManyUses() override
{
2680 State
.removeAssumedBits(AANoCapture::NO_CAPTURE
);
2683 bool isDereferenceableOrNull(Value
*O
, const DataLayout
&DL
) override
{
2684 if (CaptureTracker::isDereferenceableOrNull(O
, DL
))
2686 const auto &DerefAA
=
2687 A
.getAAFor
<AADereferenceable
>(NoCaptureAA
, IRPosition::value(*O
));
2688 return DerefAA
.getAssumedDereferenceableBytes();
2691 /// See CaptureTracker::captured(...).
2692 bool captured(const Use
*U
) override
{
2693 Instruction
*UInst
= cast
<Instruction
>(U
->getUser());
2694 LLVM_DEBUG(dbgs() << "Check use: " << *U
->get() << " in " << *UInst
2697 // Because we may reuse the tracker multiple times we keep track of the
2698 // number of explored uses ourselves as well.
2699 if (RemainingUsesToExplore
-- == 0) {
2700 LLVM_DEBUG(dbgs() << " - too many uses to explore!\n");
2701 return isCapturedIn(/* Memory */ true, /* Integer */ true,
2705 // Deal with ptr2int by following uses.
2706 if (isa
<PtrToIntInst
>(UInst
)) {
2707 LLVM_DEBUG(dbgs() << " - ptr2int assume the worst!\n");
2708 return valueMayBeCaptured(UInst
);
2711 // Explicitly catch return instructions.
2712 if (isa
<ReturnInst
>(UInst
))
2713 return isCapturedIn(/* Memory */ false, /* Integer */ false,
2716 // For now we only use special logic for call sites. However, the tracker
2717 // itself knows about a lot of other non-capturing cases already.
2719 if (!CS
|| !CS
.isArgOperand(U
))
2720 return isCapturedIn(/* Memory */ true, /* Integer */ true,
2723 unsigned ArgNo
= CS
.getArgumentNo(U
);
2724 const IRPosition
&CSArgPos
= IRPosition::callsite_argument(CS
, ArgNo
);
2725 // If we have a abstract no-capture attribute for the argument we can use
2726 // it to justify a non-capture attribute here. This allows recursion!
2727 auto &ArgNoCaptureAA
= A
.getAAFor
<AANoCapture
>(NoCaptureAA
, CSArgPos
);
2728 if (ArgNoCaptureAA
.isAssumedNoCapture())
2729 return isCapturedIn(/* Memory */ false, /* Integer */ false,
2730 /* Return */ false);
2731 if (ArgNoCaptureAA
.isAssumedNoCaptureMaybeReturned()) {
2732 addPotentialCopy(CS
);
2733 return isCapturedIn(/* Memory */ false, /* Integer */ false,
2734 /* Return */ false);
2737 // Lastly, we could not find a reason no-capture can be assumed so we don't.
2738 return isCapturedIn(/* Memory */ true, /* Integer */ true,
2742 /// Register \p CS as potential copy of the value we are checking.
2743 void addPotentialCopy(CallSite CS
) {
2744 PotentialCopies
.push_back(CS
.getInstruction());
2747 /// See CaptureTracker::shouldExplore(...).
2748 bool shouldExplore(const Use
*U
) override
{
2750 return !IsDeadAA
.isAssumedDead(cast
<Instruction
>(U
->getUser()));
2753 /// Update the state according to \p CapturedInMem, \p CapturedInInt, and
2754 /// \p CapturedInRet, then return the appropriate value for use in the
2755 /// CaptureTracker::captured() interface.
2756 bool isCapturedIn(bool CapturedInMem
, bool CapturedInInt
,
2757 bool CapturedInRet
) {
2758 LLVM_DEBUG(dbgs() << " - captures [Mem " << CapturedInMem
<< "|Int "
2759 << CapturedInInt
<< "|Ret " << CapturedInRet
<< "]\n");
2761 State
.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_MEM
);
2763 State
.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_INT
);
2765 State
.removeAssumedBits(AANoCapture::NOT_CAPTURED_IN_RET
);
2766 return !State
.isAssumed(AANoCapture::NO_CAPTURE_MAYBE_RETURNED
);
2770 /// The attributor providing in-flight abstract attributes.
2773 /// The abstract attribute currently updated.
2774 AANoCapture
&NoCaptureAA
;
2776 /// The abstract liveness state.
2777 const AAIsDead
&IsDeadAA
;
2779 /// The state currently updated.
2780 IntegerState
&State
;
2782 /// Set of potential copies of the tracked value.
2783 SmallVectorImpl
<const Value
*> &PotentialCopies
;
2785 /// Global counter to limit the number of explored uses.
2786 unsigned &RemainingUsesToExplore
;
2789 ChangeStatus
AANoCaptureImpl::updateImpl(Attributor
&A
) {
2790 const IRPosition
&IRP
= getIRPosition();
2792 getArgNo() >= 0 ? IRP
.getAssociatedArgument() : &IRP
.getAssociatedValue();
2794 return indicatePessimisticFixpoint();
2797 getArgNo() >= 0 ? IRP
.getAssociatedFunction() : IRP
.getAnchorScope();
2798 assert(F
&& "Expected a function!");
2799 const auto &IsDeadAA
= A
.getAAFor
<AAIsDead
>(*this, IRPosition::function(*F
));
2801 AANoCapture::StateType T
;
2802 // TODO: Once we have memory behavior attributes we should use them here
2803 // similar to the reasoning in
2804 // AANoCaptureImpl::determineFunctionCaptureCapabilities(...).
2806 // TODO: Use the AAReturnedValues to learn if the argument can return or
2809 // Use the CaptureTracker interface and logic with the specialized tracker,
2810 // defined in AACaptureUseTracker, that can look at in-flight abstract
2811 // attributes and directly updates the assumed state.
2812 SmallVector
<const Value
*, 4> PotentialCopies
;
2813 unsigned RemainingUsesToExplore
= DefaultMaxUsesToExplore
;
2814 AACaptureUseTracker
Tracker(A
, *this, IsDeadAA
, T
, PotentialCopies
,
2815 RemainingUsesToExplore
);
2817 // Check all potential copies of the associated value until we can assume
2818 // none will be captured or we have to assume at least one might be.
2820 PotentialCopies
.push_back(V
);
2821 while (T
.isAssumed(NO_CAPTURE_MAYBE_RETURNED
) && Idx
< PotentialCopies
.size())
2822 Tracker
.valueMayBeCaptured(PotentialCopies
[Idx
++]);
2824 AAAlign::StateType
&S
= getState();
2825 auto Assumed
= S
.getAssumed();
2826 S
.intersectAssumedBits(T
.getAssumed());
2827 return Assumed
== S
.getAssumed() ? ChangeStatus::UNCHANGED
2828 : ChangeStatus::CHANGED
;
2831 /// NoCapture attribute for function arguments.
2832 struct AANoCaptureArgument final
: AANoCaptureImpl
{
2833 AANoCaptureArgument(const IRPosition
&IRP
) : AANoCaptureImpl(IRP
) {}
2835 /// See AbstractAttribute::trackStatistics()
2836 void trackStatistics() const override
{ STATS_DECLTRACK_ARG_ATTR(nocapture
) }
2839 /// NoCapture attribute for call site arguments.
2840 struct AANoCaptureCallSiteArgument final
: AANoCaptureImpl
{
2841 AANoCaptureCallSiteArgument(const IRPosition
&IRP
) : AANoCaptureImpl(IRP
) {}
2843 /// See AbstractAttribute::updateImpl(...).
2844 ChangeStatus
updateImpl(Attributor
&A
) override
{
2845 // TODO: Once we have call site specific value information we can provide
2846 // call site specific liveness information and then it makes
2847 // sense to specialize attributes for call sites arguments instead of
2848 // redirecting requests to the callee argument.
2849 Argument
*Arg
= getAssociatedArgument();
2851 return indicatePessimisticFixpoint();
2852 const IRPosition
&ArgPos
= IRPosition::argument(*Arg
);
2853 auto &ArgAA
= A
.getAAFor
<AANoCapture
>(*this, ArgPos
);
2854 return clampStateAndIndicateChange(
2856 static_cast<const AANoCapture::StateType
&>(ArgAA
.getState()));
2859 /// See AbstractAttribute::trackStatistics()
2860 void trackStatistics() const override
{STATS_DECLTRACK_CSARG_ATTR(nocapture
)};
2863 /// NoCapture attribute for floating values.
2864 struct AANoCaptureFloating final
: AANoCaptureImpl
{
2865 AANoCaptureFloating(const IRPosition
&IRP
) : AANoCaptureImpl(IRP
) {}
2867 /// See AbstractAttribute::trackStatistics()
2868 void trackStatistics() const override
{
2869 STATS_DECLTRACK_FLOATING_ATTR(nocapture
)
2873 /// NoCapture attribute for function return value.
2874 struct AANoCaptureReturned final
: AANoCaptureImpl
{
2875 AANoCaptureReturned(const IRPosition
&IRP
) : AANoCaptureImpl(IRP
) {
2876 llvm_unreachable("NoCapture is not applicable to function returns!");
2879 /// See AbstractAttribute::initialize(...).
2880 void initialize(Attributor
&A
) override
{
2881 llvm_unreachable("NoCapture is not applicable to function returns!");
2884 /// See AbstractAttribute::updateImpl(...).
2885 ChangeStatus
updateImpl(Attributor
&A
) override
{
2886 llvm_unreachable("NoCapture is not applicable to function returns!");
2889 /// See AbstractAttribute::trackStatistics()
2890 void trackStatistics() const override
{}
2893 /// NoCapture attribute deduction for a call site return value.
2894 struct AANoCaptureCallSiteReturned final
: AANoCaptureImpl
{
2895 AANoCaptureCallSiteReturned(const IRPosition
&IRP
) : AANoCaptureImpl(IRP
) {}
2897 /// See AbstractAttribute::trackStatistics()
2898 void trackStatistics() const override
{
2899 STATS_DECLTRACK_CSRET_ATTR(nocapture
)
2903 /// ----------------------------------------------------------------------------
2905 /// ----------------------------------------------------------------------------
2907 bool Attributor::isAssumedDead(const AbstractAttribute
&AA
,
2908 const AAIsDead
*LivenessAA
) {
2909 const Instruction
*CtxI
= AA
.getIRPosition().getCtxI();
2915 &getAAFor
<AAIsDead
>(AA
, IRPosition::function(*CtxI
->getFunction()),
2916 /* TrackDependence */ false);
2918 // Don't check liveness for AAIsDead.
2919 if (&AA
== LivenessAA
)
2922 if (!LivenessAA
->isAssumedDead(CtxI
))
2925 // We actually used liveness information so we have to record a dependence.
2926 recordDependence(*LivenessAA
, AA
);
2931 bool Attributor::checkForAllCallSites(const function_ref
<bool(CallSite
)> &Pred
,
2932 const AbstractAttribute
&QueryingAA
,
2933 bool RequireAllCallSites
) {
2934 // We can try to determine information from
2935 // the call sites. However, this is only possible all call sites are known,
2936 // hence the function has internal linkage.
2937 const IRPosition
&IRP
= QueryingAA
.getIRPosition();
2938 const Function
*AssociatedFunction
= IRP
.getAssociatedFunction();
2939 if (!AssociatedFunction
)
2942 if (RequireAllCallSites
&& !AssociatedFunction
->hasInternalLinkage()) {
2945 << "[Attributor] Function " << AssociatedFunction
->getName()
2946 << " has no internal linkage, hence not all call sites are known\n");
2950 for (const Use
&U
: AssociatedFunction
->uses()) {
2951 Instruction
*I
= dyn_cast
<Instruction
>(U
.getUser());
2952 // TODO: Deal with abstract call sites here.
2956 Function
*Caller
= I
->getFunction();
2958 const auto &LivenessAA
= getAAFor
<AAIsDead
>(
2959 QueryingAA
, IRPosition::function(*Caller
), /* TrackDependence */ false);
2962 if (LivenessAA
.isAssumedDead(I
)) {
2963 // We actually used liveness information so we have to record a
2965 recordDependence(LivenessAA
, QueryingAA
);
2969 CallSite
CS(U
.getUser());
2970 if (!CS
|| !CS
.isCallee(&U
)) {
2971 if (!RequireAllCallSites
)
2974 LLVM_DEBUG(dbgs() << "[Attributor] User " << *U
.getUser()
2975 << " is an invalid use of "
2976 << AssociatedFunction
->getName() << "\n");
2983 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
2984 << *CS
.getInstruction() << "\n");
2991 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
2992 const function_ref
<bool(Value
&, const SmallSetVector
<ReturnInst
*, 4> &)>
2994 const AbstractAttribute
&QueryingAA
) {
2996 const IRPosition
&IRP
= QueryingAA
.getIRPosition();
2997 // Since we need to provide return instructions we have to have an exact
2999 const Function
*AssociatedFunction
= IRP
.getAssociatedFunction();
3000 if (!AssociatedFunction
)
3003 // If this is a call site query we use the call site specific return values
3004 // and liveness information.
3005 // TODO: use the function scope once we have call site AAReturnedValues.
3006 const IRPosition
&QueryIRP
= IRPosition::function(*AssociatedFunction
);
3007 const auto &AARetVal
= getAAFor
<AAReturnedValues
>(QueryingAA
, QueryIRP
);
3008 if (!AARetVal
.getState().isValidState())
3011 return AARetVal
.checkForAllReturnedValuesAndReturnInsts(Pred
);
3014 bool Attributor::checkForAllReturnedValues(
3015 const function_ref
<bool(Value
&)> &Pred
,
3016 const AbstractAttribute
&QueryingAA
) {
3018 const IRPosition
&IRP
= QueryingAA
.getIRPosition();
3019 const Function
*AssociatedFunction
= IRP
.getAssociatedFunction();
3020 if (!AssociatedFunction
)
3023 // TODO: use the function scope once we have call site AAReturnedValues.
3024 const IRPosition
&QueryIRP
= IRPosition::function(*AssociatedFunction
);
3025 const auto &AARetVal
= getAAFor
<AAReturnedValues
>(QueryingAA
, QueryIRP
);
3026 if (!AARetVal
.getState().isValidState())
3029 return AARetVal
.checkForAllReturnedValuesAndReturnInsts(
3030 [&](Value
&RV
, const SmallSetVector
<ReturnInst
*, 4> &) {
3035 bool Attributor::checkForAllInstructions(
3036 const llvm::function_ref
<bool(Instruction
&)> &Pred
,
3037 const AbstractAttribute
&QueryingAA
, const ArrayRef
<unsigned> &Opcodes
) {
3039 const IRPosition
&IRP
= QueryingAA
.getIRPosition();
3040 // Since we need to provide instructions we have to have an exact definition.
3041 const Function
*AssociatedFunction
= IRP
.getAssociatedFunction();
3042 if (!AssociatedFunction
)
3045 // TODO: use the function scope once we have call site AAReturnedValues.
3046 const IRPosition
&QueryIRP
= IRPosition::function(*AssociatedFunction
);
3047 const auto &LivenessAA
=
3048 getAAFor
<AAIsDead
>(QueryingAA
, QueryIRP
, /* TrackDependence */ false);
3049 bool AnyDead
= false;
3051 auto &OpcodeInstMap
=
3052 InfoCache
.getOpcodeInstMapForFunction(*AssociatedFunction
);
3053 for (unsigned Opcode
: Opcodes
) {
3054 for (Instruction
*I
: OpcodeInstMap
[Opcode
]) {
3055 // Skip dead instructions.
3056 if (LivenessAA
.isAssumedDead(I
)) {
3066 // If we actually used liveness information so we have to record a dependence.
3068 recordDependence(LivenessAA
, QueryingAA
);
3073 bool Attributor::checkForAllReadWriteInstructions(
3074 const llvm::function_ref
<bool(Instruction
&)> &Pred
,
3075 AbstractAttribute
&QueryingAA
) {
3077 const Function
*AssociatedFunction
=
3078 QueryingAA
.getIRPosition().getAssociatedFunction();
3079 if (!AssociatedFunction
)
3082 // TODO: use the function scope once we have call site AAReturnedValues.
3083 const IRPosition
&QueryIRP
= IRPosition::function(*AssociatedFunction
);
3084 const auto &LivenessAA
=
3085 getAAFor
<AAIsDead
>(QueryingAA
, QueryIRP
, /* TrackDependence */ false);
3086 bool AnyDead
= false;
3088 for (Instruction
*I
:
3089 InfoCache
.getReadOrWriteInstsForFunction(*AssociatedFunction
)) {
3090 // Skip dead instructions.
3091 if (LivenessAA
.isAssumedDead(I
)) {
3100 // If we actually used liveness information so we have to record a dependence.
3102 recordDependence(LivenessAA
, QueryingAA
);
3107 ChangeStatus
Attributor::run(Module
&M
) {
3108 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
3109 << AllAbstractAttributes
.size()
3110 << " abstract attributes.\n");
3112 // Now that all abstract attributes are collected and initialized we start
3113 // the abstract analysis.
3115 unsigned IterationCounter
= 1;
3117 SmallVector
<AbstractAttribute
*, 64> ChangedAAs
;
3118 SetVector
<AbstractAttribute
*> Worklist
;
3119 Worklist
.insert(AllAbstractAttributes
.begin(), AllAbstractAttributes
.end());
3121 bool RecomputeDependences
= false;
3124 // Remember the size to determine new attributes.
3125 size_t NumAAs
= AllAbstractAttributes
.size();
3126 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
3127 << ", Worklist size: " << Worklist
.size() << "\n");
3129 // If dependences (=QueryMap) are recomputed we have to look at all abstract
3130 // attributes again, regardless of what changed in the last iteration.
3131 if (RecomputeDependences
) {
3133 dbgs() << "[Attributor] Run all AAs to recompute dependences\n");
3136 Worklist
.insert(AllAbstractAttributes
.begin(),
3137 AllAbstractAttributes
.end());
3140 // Add all abstract attributes that are potentially dependent on one that
3141 // changed to the work list.
3142 for (AbstractAttribute
*ChangedAA
: ChangedAAs
) {
3143 auto &QuerriedAAs
= QueryMap
[ChangedAA
];
3144 Worklist
.insert(QuerriedAAs
.begin(), QuerriedAAs
.end());
3147 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
3148 << ", Worklist+Dependent size: " << Worklist
.size()
3151 // Reset the changed set.
3154 // Update all abstract attribute in the work list and record the ones that
3156 for (AbstractAttribute
*AA
: Worklist
)
3157 if (!isAssumedDead(*AA
, nullptr))
3158 if (AA
->update(*this) == ChangeStatus::CHANGED
)
3159 ChangedAAs
.push_back(AA
);
3161 // Check if we recompute the dependences in the next iteration.
3162 RecomputeDependences
= (DepRecomputeInterval
> 0 &&
3163 IterationCounter
% DepRecomputeInterval
== 0);
3165 // Add attributes to the changed set if they have been created in the last
3167 ChangedAAs
.append(AllAbstractAttributes
.begin() + NumAAs
,
3168 AllAbstractAttributes
.end());
3170 // Reset the work list and repopulate with the changed abstract attributes.
3171 // Note that dependent ones are added above.
3173 Worklist
.insert(ChangedAAs
.begin(), ChangedAAs
.end());
3175 } while (!Worklist
.empty() && (IterationCounter
++ < MaxFixpointIterations
||
3176 VerifyMaxFixpointIterations
));
3178 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
3179 << IterationCounter
<< "/" << MaxFixpointIterations
3180 << " iterations\n");
3182 size_t NumFinalAAs
= AllAbstractAttributes
.size();
3184 bool FinishedAtFixpoint
= Worklist
.empty();
3186 // Reset abstract arguments not settled in a sound fixpoint by now. This
3187 // happens when we stopped the fixpoint iteration early. Note that only the
3188 // ones marked as "changed" *and* the ones transitively depending on them
3189 // need to be reverted to a pessimistic state. Others might not be in a
3190 // fixpoint state but we can use the optimistic results for them anyway.
3191 SmallPtrSet
<AbstractAttribute
*, 32> Visited
;
3192 for (unsigned u
= 0; u
< ChangedAAs
.size(); u
++) {
3193 AbstractAttribute
*ChangedAA
= ChangedAAs
[u
];
3194 if (!Visited
.insert(ChangedAA
).second
)
3197 AbstractState
&State
= ChangedAA
->getState();
3198 if (!State
.isAtFixpoint()) {
3199 State
.indicatePessimisticFixpoint();
3201 NumAttributesTimedOut
++;
3204 auto &QuerriedAAs
= QueryMap
[ChangedAA
];
3205 ChangedAAs
.append(QuerriedAAs
.begin(), QuerriedAAs
.end());
3209 if (!Visited
.empty())
3210 dbgs() << "\n[Attributor] Finalized " << Visited
.size()
3211 << " abstract attributes.\n";
3214 unsigned NumManifested
= 0;
3215 unsigned NumAtFixpoint
= 0;
3216 ChangeStatus ManifestChange
= ChangeStatus::UNCHANGED
;
3217 for (AbstractAttribute
*AA
: AllAbstractAttributes
) {
3218 AbstractState
&State
= AA
->getState();
3220 // If there is not already a fixpoint reached, we can now take the
3221 // optimistic state. This is correct because we enforced a pessimistic one
3222 // on abstract attributes that were transitively dependent on a changed one
3224 if (!State
.isAtFixpoint())
3225 State
.indicateOptimisticFixpoint();
3227 // If the state is invalid, we do not try to manifest it.
3228 if (!State
.isValidState())
3232 if (isAssumedDead(*AA
, nullptr))
3234 // Manifest the state and record if we changed the IR.
3235 ChangeStatus LocalChange
= AA
->manifest(*this);
3236 if (LocalChange
== ChangeStatus::CHANGED
&& AreStatisticsEnabled())
3237 AA
->trackStatistics();
3239 ManifestChange
= ManifestChange
| LocalChange
;
3242 NumManifested
+= (LocalChange
== ChangeStatus::CHANGED
);
3245 (void)NumManifested
;
3246 (void)NumAtFixpoint
;
3247 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
3248 << " arguments while " << NumAtFixpoint
3249 << " were in a valid fixpoint state\n");
3251 // If verification is requested, we finished this run at a fixpoint, and the
3252 // IR was changed, we re-run the whole fixpoint analysis, starting at
3253 // re-initialization of the arguments. This re-run should not result in an IR
3254 // change. Though, the (virtual) state of attributes at the end of the re-run
3255 // might be more optimistic than the known state or the IR state if the better
3256 // state cannot be manifested.
3257 if (VerifyAttributor
&& FinishedAtFixpoint
&&
3258 ManifestChange
== ChangeStatus::CHANGED
) {
3259 VerifyAttributor
= false;
3260 ChangeStatus VerifyStatus
= run(M
);
3261 if (VerifyStatus
!= ChangeStatus::UNCHANGED
)
3263 "Attributor verification failed, re-run did result in an IR change "
3264 "even after a fixpoint was reached in the original run. (False "
3265 "positives possible!)");
3266 VerifyAttributor
= true;
3269 NumAttributesManifested
+= NumManifested
;
3270 NumAttributesValidFixpoint
+= NumAtFixpoint
;
3274 NumFinalAAs
== AllAbstractAttributes
.size() &&
3275 "Expected the final number of abstract attributes to remain unchanged!");
3277 // Delete stuff at the end to avoid invalid references and a nice order.
3279 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete at least "
3280 << ToBeDeletedFunctions
.size() << " functions and "
3281 << ToBeDeletedBlocks
.size() << " blocks and "
3282 << ToBeDeletedInsts
.size() << " instructions\n");
3283 for (Instruction
*I
: ToBeDeletedInsts
) {
3284 if (!I
->use_empty())
3285 I
->replaceAllUsesWith(UndefValue::get(I
->getType()));
3286 I
->eraseFromParent();
3289 if (unsigned NumDeadBlocks
= ToBeDeletedBlocks
.size()) {
3290 SmallVector
<BasicBlock
*, 8> ToBeDeletedBBs
;
3291 ToBeDeletedBBs
.reserve(NumDeadBlocks
);
3292 ToBeDeletedBBs
.append(ToBeDeletedBlocks
.begin(), ToBeDeletedBlocks
.end());
3293 DeleteDeadBlocks(ToBeDeletedBBs
);
3294 STATS_DECLTRACK(AAIsDead
, BasicBlock
,
3295 "Number of dead basic blocks deleted.");
3298 STATS_DECL(AAIsDead
, Function
, "Number of dead functions deleted.");
3299 for (Function
*Fn
: ToBeDeletedFunctions
) {
3300 Fn
->replaceAllUsesWith(UndefValue::get(Fn
->getType()));
3301 Fn
->eraseFromParent();
3302 STATS_TRACK(AAIsDead
, Function
);
3305 // Identify dead internal functions and delete them. This happens outside
3306 // the other fixpoint analysis as we might treat potentially dead functions
3307 // as live to lower the number of iterations. If they happen to be dead, the
3308 // below fixpoint loop will identify and eliminate them.
3309 SmallVector
<Function
*, 8> InternalFns
;
3310 for (Function
&F
: M
)
3311 if (F
.hasInternalLinkage())
3312 InternalFns
.push_back(&F
);
3314 bool FoundDeadFn
= true;
3315 while (FoundDeadFn
) {
3316 FoundDeadFn
= false;
3317 for (unsigned u
= 0, e
= InternalFns
.size(); u
< e
; ++u
) {
3318 Function
*F
= InternalFns
[u
];
3322 const auto *LivenessAA
=
3323 lookupAAFor
<AAIsDead
>(IRPosition::function(*F
));
3325 !checkForAllCallSites([](CallSite CS
) { return false; },
3329 STATS_TRACK(AAIsDead
, Function
);
3330 F
->replaceAllUsesWith(UndefValue::get(F
->getType()));
3331 F
->eraseFromParent();
3332 InternalFns
[u
] = nullptr;
3338 if (VerifyMaxFixpointIterations
&&
3339 IterationCounter
!= MaxFixpointIterations
) {
3340 errs() << "\n[Attributor] Fixpoint iteration done after: "
3341 << IterationCounter
<< "/" << MaxFixpointIterations
3343 llvm_unreachable("The fixpoint was not reached with exactly the number of "
3344 "specified iterations!");
3347 return ManifestChange
;
3350 void Attributor::identifyDefaultAbstractAttributes(Function
&F
) {
3351 if (!VisitedFunctions
.insert(&F
).second
)
3354 IRPosition FPos
= IRPosition::function(F
);
3356 // Check for dead BasicBlocks in every function.
3357 // We need dead instruction detection because we do not want to deal with
3358 // broken IR in which SSA rules do not apply.
3359 getOrCreateAAFor
<AAIsDead
>(FPos
);
3361 // Every function might be "will-return".
3362 getOrCreateAAFor
<AAWillReturn
>(FPos
);
3364 // Every function can be nounwind.
3365 getOrCreateAAFor
<AANoUnwind
>(FPos
);
3367 // Every function might be marked "nosync"
3368 getOrCreateAAFor
<AANoSync
>(FPos
);
3370 // Every function might be "no-free".
3371 getOrCreateAAFor
<AANoFree
>(FPos
);
3373 // Every function might be "no-return".
3374 getOrCreateAAFor
<AANoReturn
>(FPos
);
3376 // Return attributes are only appropriate if the return type is non void.
3377 Type
*ReturnType
= F
.getReturnType();
3378 if (!ReturnType
->isVoidTy()) {
3379 // Argument attribute "returned" --- Create only one per function even
3380 // though it is an argument attribute.
3381 getOrCreateAAFor
<AAReturnedValues
>(FPos
);
3383 if (ReturnType
->isPointerTy()) {
3384 IRPosition RetPos
= IRPosition::returned(F
);
3386 // Every function with pointer return type might be marked align.
3387 getOrCreateAAFor
<AAAlign
>(RetPos
);
3389 // Every function with pointer return type might be marked nonnull.
3390 getOrCreateAAFor
<AANonNull
>(RetPos
);
3392 // Every function with pointer return type might be marked noalias.
3393 getOrCreateAAFor
<AANoAlias
>(RetPos
);
3395 // Every function with pointer return type might be marked
3397 getOrCreateAAFor
<AADereferenceable
>(RetPos
);
3401 for (Argument
&Arg
: F
.args()) {
3402 if (Arg
.getType()->isPointerTy()) {
3403 IRPosition ArgPos
= IRPosition::argument(Arg
);
3404 // Every argument with pointer type might be marked nonnull.
3405 getOrCreateAAFor
<AANonNull
>(ArgPos
);
3407 // Every argument with pointer type might be marked noalias.
3408 getOrCreateAAFor
<AANoAlias
>(ArgPos
);
3410 // Every argument with pointer type might be marked dereferenceable.
3411 getOrCreateAAFor
<AADereferenceable
>(ArgPos
);
3413 // Every argument with pointer type might be marked align.
3414 getOrCreateAAFor
<AAAlign
>(ArgPos
);
3416 // Every argument with pointer type might be marked nocapture.
3417 getOrCreateAAFor
<AANoCapture
>(ArgPos
);
3421 // Walk all instructions to find more attribute opportunities and also
3422 // interesting instructions that might be queried by abstract attributes
3423 // during their initialization or update.
3424 auto &ReadOrWriteInsts
= InfoCache
.FuncRWInstsMap
[&F
];
3425 auto &InstOpcodeMap
= InfoCache
.FuncInstOpcodeMap
[&F
];
3427 for (Instruction
&I
: instructions(&F
)) {
3428 bool IsInterestingOpcode
= false;
3430 // To allow easy access to all instructions in a function with a given
3431 // opcode we store them in the InfoCache. As not all opcodes are interesting
3432 // to concrete attributes we only cache the ones that are as identified in
3433 // the following switch.
3434 // Note: There are no concrete attributes now so this is initially empty.
3435 switch (I
.getOpcode()) {
3437 assert((!ImmutableCallSite(&I
)) && (!isa
<CallBase
>(&I
)) &&
3438 "New call site/base instruction type needs to be known int the "
3441 case Instruction::Load
:
3442 // The alignment of a pointer is interesting for loads.
3443 getOrCreateAAFor
<AAAlign
>(
3444 IRPosition::value(*cast
<LoadInst
>(I
).getPointerOperand()));
3446 case Instruction::Store
:
3447 // The alignment of a pointer is interesting for stores.
3448 getOrCreateAAFor
<AAAlign
>(
3449 IRPosition::value(*cast
<StoreInst
>(I
).getPointerOperand()));
3451 case Instruction::Call
:
3452 case Instruction::CallBr
:
3453 case Instruction::Invoke
:
3454 case Instruction::CleanupRet
:
3455 case Instruction::CatchSwitch
:
3456 case Instruction::Resume
:
3457 case Instruction::Ret
:
3458 IsInterestingOpcode
= true;
3460 if (IsInterestingOpcode
)
3461 InstOpcodeMap
[I
.getOpcode()].push_back(&I
);
3462 if (I
.mayReadOrWriteMemory())
3463 ReadOrWriteInsts
.push_back(&I
);
3466 if (CS
&& CS
.getCalledFunction()) {
3467 for (int i
= 0, e
= CS
.getCalledFunction()->arg_size(); i
< e
; i
++) {
3468 if (!CS
.getArgument(i
)->getType()->isPointerTy())
3470 IRPosition CSArgPos
= IRPosition::callsite_argument(CS
, i
);
3472 // Call site argument attribute "non-null".
3473 getOrCreateAAFor
<AANonNull
>(CSArgPos
);
3475 // Call site argument attribute "no-alias".
3476 getOrCreateAAFor
<AANoAlias
>(CSArgPos
);
3478 // Call site argument attribute "dereferenceable".
3479 getOrCreateAAFor
<AADereferenceable
>(CSArgPos
);
3481 // Call site argument attribute "align".
3482 getOrCreateAAFor
<AAAlign
>(CSArgPos
);
3488 /// Helpers to ease debugging through output streams and print calls.
3491 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, ChangeStatus S
) {
3492 return OS
<< (S
== ChangeStatus::CHANGED
? "changed" : "unchanged");
3495 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, IRPosition::Kind AP
) {
3497 case IRPosition::IRP_INVALID
:
3499 case IRPosition::IRP_FLOAT
:
3501 case IRPosition::IRP_RETURNED
:
3502 return OS
<< "fn_ret";
3503 case IRPosition::IRP_CALL_SITE_RETURNED
:
3504 return OS
<< "cs_ret";
3505 case IRPosition::IRP_FUNCTION
:
3507 case IRPosition::IRP_CALL_SITE
:
3509 case IRPosition::IRP_ARGUMENT
:
3511 case IRPosition::IRP_CALL_SITE_ARGUMENT
:
3512 return OS
<< "cs_arg";
3514 llvm_unreachable("Unknown attribute position!");
3517 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const IRPosition
&Pos
) {
3518 const Value
&AV
= Pos
.getAssociatedValue();
3519 return OS
<< "{" << Pos
.getPositionKind() << ":" << AV
.getName() << " ["
3520 << Pos
.getAnchorValue().getName() << "@" << Pos
.getArgNo() << "]}";
3523 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const IntegerState
&S
) {
3524 return OS
<< "(" << S
.getKnown() << "-" << S
.getAssumed() << ")"
3525 << static_cast<const AbstractState
&>(S
);
3528 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const AbstractState
&S
) {
3529 return OS
<< (!S
.isValidState() ? "top" : (S
.isAtFixpoint() ? "fix" : ""));
3532 raw_ostream
&llvm::operator<<(raw_ostream
&OS
, const AbstractAttribute
&AA
) {
3537 void AbstractAttribute::print(raw_ostream
&OS
) const {
3538 OS
<< "[P: " << getIRPosition() << "][" << getAsStr() << "][S: " << getState()
3543 /// ----------------------------------------------------------------------------
3544 /// Pass (Manager) Boilerplate
3545 /// ----------------------------------------------------------------------------
3547 static bool runAttributorOnModule(Module
&M
) {
3548 if (DisableAttributor
)
3551 LLVM_DEBUG(dbgs() << "[Attributor] Run on module with " << M
.size()
3552 << " functions.\n");
3554 // Create an Attributor and initially empty information cache that is filled
3555 // while we identify default attribute opportunities.
3556 InformationCache
InfoCache(M
.getDataLayout());
3557 Attributor
A(InfoCache
, DepRecInterval
);
3559 for (Function
&F
: M
) {
3560 if (F
.hasExactDefinition())
3561 NumFnWithExactDefinition
++;
3563 NumFnWithoutExactDefinition
++;
3565 // For now we ignore naked and optnone functions.
3566 if (F
.hasFnAttribute(Attribute::Naked
) ||
3567 F
.hasFnAttribute(Attribute::OptimizeNone
))
3570 // We look at internal functions only on-demand but if any use is not a
3571 // direct call, we have to do it eagerly.
3572 if (F
.hasInternalLinkage()) {
3573 if (llvm::all_of(F
.uses(), [](const Use
&U
) {
3574 return ImmutableCallSite(U
.getUser()) &&
3575 ImmutableCallSite(U
.getUser()).isCallee(&U
);
3580 // Populate the Attributor with abstract attribute opportunities in the
3581 // function and the information cache with IR information.
3582 A
.identifyDefaultAbstractAttributes(F
);
3585 return A
.run(M
) == ChangeStatus::CHANGED
;
3588 PreservedAnalyses
AttributorPass::run(Module
&M
, ModuleAnalysisManager
&AM
) {
3589 if (runAttributorOnModule(M
)) {
3590 // FIXME: Think about passes we will preserve and add them here.
3591 return PreservedAnalyses::none();
3593 return PreservedAnalyses::all();
3598 struct AttributorLegacyPass
: public ModulePass
{
3601 AttributorLegacyPass() : ModulePass(ID
) {
3602 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
3605 bool runOnModule(Module
&M
) override
{
3608 return runAttributorOnModule(M
);
3611 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
3612 // FIXME: Think about passes we will preserve and add them here.
3616 } // end anonymous namespace
3618 Pass
*llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
3620 char AttributorLegacyPass::ID
= 0;
3622 const char AAReturnedValues::ID
= 0;
3623 const char AANoUnwind::ID
= 0;
3624 const char AANoSync::ID
= 0;
3625 const char AANoFree::ID
= 0;
3626 const char AANonNull::ID
= 0;
3627 const char AANoRecurse::ID
= 0;
3628 const char AAWillReturn::ID
= 0;
3629 const char AANoAlias::ID
= 0;
3630 const char AANoReturn::ID
= 0;
3631 const char AAIsDead::ID
= 0;
3632 const char AADereferenceable::ID
= 0;
3633 const char AAAlign::ID
= 0;
3634 const char AANoCapture::ID
= 0;
3636 // Macro magic to create the static generator function for attributes that
3637 // follow the naming scheme.
3639 #define SWITCH_PK_INV(CLASS, PK, POS_NAME) \
3640 case IRPosition::PK: \
3641 llvm_unreachable("Cannot create " #CLASS " for a " POS_NAME " position!");
3643 #define SWITCH_PK_CREATE(CLASS, IRP, PK, SUFFIX) \
3644 case IRPosition::PK: \
3645 AA = new CLASS##SUFFIX(IRP); \
3648 #define CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
3649 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
3650 CLASS *AA = nullptr; \
3651 switch (IRP.getPositionKind()) { \
3652 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
3653 SWITCH_PK_INV(CLASS, IRP_FLOAT, "floating") \
3654 SWITCH_PK_INV(CLASS, IRP_ARGUMENT, "argument") \
3655 SWITCH_PK_INV(CLASS, IRP_RETURNED, "returned") \
3656 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_RETURNED, "call site returned") \
3657 SWITCH_PK_INV(CLASS, IRP_CALL_SITE_ARGUMENT, "call site argument") \
3658 SWITCH_PK_CREATE(CLASS, IRP, IRP_FUNCTION, Function) \
3659 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE, CallSite) \
3664 #define CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(CLASS) \
3665 CLASS &CLASS::createForPosition(const IRPosition &IRP, Attributor &A) { \
3666 CLASS *AA = nullptr; \
3667 switch (IRP.getPositionKind()) { \
3668 SWITCH_PK_INV(CLASS, IRP_INVALID, "invalid") \
3669 SWITCH_PK_INV(CLASS, IRP_FUNCTION, "function") \
3670 SWITCH_PK_INV(CLASS, IRP_CALL_SITE, "call site") \
3671 SWITCH_PK_CREATE(CLASS, IRP, IRP_FLOAT, Floating) \
3672 SWITCH_PK_CREATE(CLASS, IRP, IRP_ARGUMENT, Argument) \
3673 SWITCH_PK_CREATE(CLASS, IRP, IRP_RETURNED, Returned) \
3674 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_RETURNED, CallSiteReturned) \
3675 SWITCH_PK_CREATE(CLASS, IRP, IRP_CALL_SITE_ARGUMENT, CallSiteArgument) \
3680 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoUnwind
)
3681 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoSync
)
3682 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoFree
)
3683 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoRecurse
)
3684 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAWillReturn
)
3685 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoReturn
)
3686 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAIsDead
)
3687 CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAReturnedValues
)
3689 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANonNull
)
3690 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoAlias
)
3691 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AADereferenceable
)
3692 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AAAlign
)
3693 CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION(AANoCapture
)
3695 #undef CREATE_FUNCTION_ABSTRACT_ATTRIBUTE_FOR_POSITION
3696 #undef CREATE_VALUE_ABSTRACT_ATTRIBUTE_FOR_POSITION
3697 #undef SWITCH_PK_CREATE
3698 #undef SWITCH_PK_INV
3700 INITIALIZE_PASS_BEGIN(AttributorLegacyPass
, "attributor",
3701 "Deduce and propagate attributes", false, false)
3702 INITIALIZE_PASS_END(AttributorLegacyPass
, "attributor",
3703 "Deduce and propagate attributes", false, false)