[MIPS GlobalISel] Select MSA vector generic and builtin add
[llvm-complete.git] / lib / Analysis / CFLAndersAliasAnalysis.cpp
blobfd90bd1521d69e9fe2eff48251e8329c374204fc
1 //===- CFLAndersAliasAnalysis.cpp - Unification-based Alias Analysis ------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a CFL-based, summary-based alias analysis algorithm. It
10 // differs from CFLSteensAliasAnalysis in its inclusion-based nature while
11 // CFLSteensAliasAnalysis is unification-based. This pass has worse performance
12 // than CFLSteensAliasAnalysis (the worst case complexity of
13 // CFLAndersAliasAnalysis is cubic, while the worst case complexity of
14 // CFLSteensAliasAnalysis is almost linear), but it is able to yield more
15 // precise analysis result. The precision of this analysis is roughly the same
16 // as that of an one level context-sensitive Andersen's algorithm.
18 // The algorithm used here is based on recursive state machine matching scheme
19 // proposed in "Demand-driven alias analysis for C" by Xin Zheng and Radu
20 // Rugina. The general idea is to extend the traditional transitive closure
21 // algorithm to perform CFL matching along the way: instead of recording
22 // "whether X is reachable from Y", we keep track of "whether X is reachable
23 // from Y at state Z", where the "state" field indicates where we are in the CFL
24 // matching process. To understand the matching better, it is advisable to have
25 // the state machine shown in Figure 3 of the paper available when reading the
26 // codes: all we do here is to selectively expand the transitive closure by
27 // discarding edges that are not recognized by the state machine.
29 // There are two differences between our current implementation and the one
30 // described in the paper:
31 // - Our algorithm eagerly computes all alias pairs after the CFLGraph is built,
32 // while in the paper the authors did the computation in a demand-driven
33 // fashion. We did not implement the demand-driven algorithm due to the
34 // additional coding complexity and higher memory profile, but if we found it
35 // necessary we may switch to it eventually.
36 // - In the paper the authors use a state machine that does not distinguish
37 // value reads from value writes. For example, if Y is reachable from X at state
38 // S3, it may be the case that X is written into Y, or it may be the case that
39 // there's a third value Z that writes into both X and Y. To make that
40 // distinction (which is crucial in building function summary as well as
41 // retrieving mod-ref info), we choose to duplicate some of the states in the
42 // paper's proposed state machine. The duplication does not change the set the
43 // machine accepts. Given a pair of reachable values, it only provides more
44 // detailed information on which value is being written into and which is being
45 // read from.
47 //===----------------------------------------------------------------------===//
49 // N.B. AliasAnalysis as a whole is phrased as a FunctionPass at the moment, and
50 // CFLAndersAA is interprocedural. This is *technically* A Bad Thing, because
51 // FunctionPasses are only allowed to inspect the Function that they're being
52 // run on. Realistically, this likely isn't a problem until we allow
53 // FunctionPasses to run concurrently.
55 #include "llvm/Analysis/CFLAndersAliasAnalysis.h"
56 #include "AliasAnalysisSummary.h"
57 #include "CFLGraph.h"
58 #include "llvm/ADT/DenseMap.h"
59 #include "llvm/ADT/DenseMapInfo.h"
60 #include "llvm/ADT/DenseSet.h"
61 #include "llvm/ADT/None.h"
62 #include "llvm/ADT/Optional.h"
63 #include "llvm/ADT/STLExtras.h"
64 #include "llvm/ADT/SmallVector.h"
65 #include "llvm/ADT/iterator_range.h"
66 #include "llvm/Analysis/AliasAnalysis.h"
67 #include "llvm/Analysis/MemoryLocation.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Function.h"
70 #include "llvm/IR/PassManager.h"
71 #include "llvm/IR/Type.h"
72 #include "llvm/Pass.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/Compiler.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include <algorithm>
78 #include <bitset>
79 #include <cassert>
80 #include <cstddef>
81 #include <cstdint>
82 #include <functional>
83 #include <utility>
84 #include <vector>
86 using namespace llvm;
87 using namespace llvm::cflaa;
89 #define DEBUG_TYPE "cfl-anders-aa"
91 CFLAndersAAResult::CFLAndersAAResult(
92 std::function<const TargetLibraryInfo &(Function &F)> GetTLI)
93 : GetTLI(std::move(GetTLI)) {}
94 CFLAndersAAResult::CFLAndersAAResult(CFLAndersAAResult &&RHS)
95 : AAResultBase(std::move(RHS)), GetTLI(std::move(RHS.GetTLI)) {}
96 CFLAndersAAResult::~CFLAndersAAResult() = default;
98 namespace {
100 enum class MatchState : uint8_t {
101 // The following state represents S1 in the paper.
102 FlowFromReadOnly = 0,
103 // The following two states together represent S2 in the paper.
104 // The 'NoReadWrite' suffix indicates that there exists an alias path that
105 // does not contain assignment and reverse assignment edges.
106 // The 'ReadOnly' suffix indicates that there exists an alias path that
107 // contains reverse assignment edges only.
108 FlowFromMemAliasNoReadWrite,
109 FlowFromMemAliasReadOnly,
110 // The following two states together represent S3 in the paper.
111 // The 'WriteOnly' suffix indicates that there exists an alias path that
112 // contains assignment edges only.
113 // The 'ReadWrite' suffix indicates that there exists an alias path that
114 // contains both assignment and reverse assignment edges. Note that if X and Y
115 // are reachable at 'ReadWrite' state, it does NOT mean X is both read from
116 // and written to Y. Instead, it means that a third value Z is written to both
117 // X and Y.
118 FlowToWriteOnly,
119 FlowToReadWrite,
120 // The following two states together represent S4 in the paper.
121 FlowToMemAliasWriteOnly,
122 FlowToMemAliasReadWrite,
125 using StateSet = std::bitset<7>;
127 const unsigned ReadOnlyStateMask =
128 (1U << static_cast<uint8_t>(MatchState::FlowFromReadOnly)) |
129 (1U << static_cast<uint8_t>(MatchState::FlowFromMemAliasReadOnly));
130 const unsigned WriteOnlyStateMask =
131 (1U << static_cast<uint8_t>(MatchState::FlowToWriteOnly)) |
132 (1U << static_cast<uint8_t>(MatchState::FlowToMemAliasWriteOnly));
134 // A pair that consists of a value and an offset
135 struct OffsetValue {
136 const Value *Val;
137 int64_t Offset;
140 bool operator==(OffsetValue LHS, OffsetValue RHS) {
141 return LHS.Val == RHS.Val && LHS.Offset == RHS.Offset;
143 bool operator<(OffsetValue LHS, OffsetValue RHS) {
144 return std::less<const Value *>()(LHS.Val, RHS.Val) ||
145 (LHS.Val == RHS.Val && LHS.Offset < RHS.Offset);
148 // A pair that consists of an InstantiatedValue and an offset
149 struct OffsetInstantiatedValue {
150 InstantiatedValue IVal;
151 int64_t Offset;
154 bool operator==(OffsetInstantiatedValue LHS, OffsetInstantiatedValue RHS) {
155 return LHS.IVal == RHS.IVal && LHS.Offset == RHS.Offset;
158 // We use ReachabilitySet to keep track of value aliases (The nonterminal "V" in
159 // the paper) during the analysis.
160 class ReachabilitySet {
161 using ValueStateMap = DenseMap<InstantiatedValue, StateSet>;
162 using ValueReachMap = DenseMap<InstantiatedValue, ValueStateMap>;
164 ValueReachMap ReachMap;
166 public:
167 using const_valuestate_iterator = ValueStateMap::const_iterator;
168 using const_value_iterator = ValueReachMap::const_iterator;
170 // Insert edge 'From->To' at state 'State'
171 bool insert(InstantiatedValue From, InstantiatedValue To, MatchState State) {
172 assert(From != To);
173 auto &States = ReachMap[To][From];
174 auto Idx = static_cast<size_t>(State);
175 if (!States.test(Idx)) {
176 States.set(Idx);
177 return true;
179 return false;
182 // Return the set of all ('From', 'State') pair for a given node 'To'
183 iterator_range<const_valuestate_iterator>
184 reachableValueAliases(InstantiatedValue V) const {
185 auto Itr = ReachMap.find(V);
186 if (Itr == ReachMap.end())
187 return make_range<const_valuestate_iterator>(const_valuestate_iterator(),
188 const_valuestate_iterator());
189 return make_range<const_valuestate_iterator>(Itr->second.begin(),
190 Itr->second.end());
193 iterator_range<const_value_iterator> value_mappings() const {
194 return make_range<const_value_iterator>(ReachMap.begin(), ReachMap.end());
198 // We use AliasMemSet to keep track of all memory aliases (the nonterminal "M"
199 // in the paper) during the analysis.
200 class AliasMemSet {
201 using MemSet = DenseSet<InstantiatedValue>;
202 using MemMapType = DenseMap<InstantiatedValue, MemSet>;
204 MemMapType MemMap;
206 public:
207 using const_mem_iterator = MemSet::const_iterator;
209 bool insert(InstantiatedValue LHS, InstantiatedValue RHS) {
210 // Top-level values can never be memory aliases because one cannot take the
211 // addresses of them
212 assert(LHS.DerefLevel > 0 && RHS.DerefLevel > 0);
213 return MemMap[LHS].insert(RHS).second;
216 const MemSet *getMemoryAliases(InstantiatedValue V) const {
217 auto Itr = MemMap.find(V);
218 if (Itr == MemMap.end())
219 return nullptr;
220 return &Itr->second;
224 // We use AliasAttrMap to keep track of the AliasAttr of each node.
225 class AliasAttrMap {
226 using MapType = DenseMap<InstantiatedValue, AliasAttrs>;
228 MapType AttrMap;
230 public:
231 using const_iterator = MapType::const_iterator;
233 bool add(InstantiatedValue V, AliasAttrs Attr) {
234 auto &OldAttr = AttrMap[V];
235 auto NewAttr = OldAttr | Attr;
236 if (OldAttr == NewAttr)
237 return false;
238 OldAttr = NewAttr;
239 return true;
242 AliasAttrs getAttrs(InstantiatedValue V) const {
243 AliasAttrs Attr;
244 auto Itr = AttrMap.find(V);
245 if (Itr != AttrMap.end())
246 Attr = Itr->second;
247 return Attr;
250 iterator_range<const_iterator> mappings() const {
251 return make_range<const_iterator>(AttrMap.begin(), AttrMap.end());
255 struct WorkListItem {
256 InstantiatedValue From;
257 InstantiatedValue To;
258 MatchState State;
261 struct ValueSummary {
262 struct Record {
263 InterfaceValue IValue;
264 unsigned DerefLevel;
266 SmallVector<Record, 4> FromRecords, ToRecords;
269 } // end anonymous namespace
271 namespace llvm {
273 // Specialize DenseMapInfo for OffsetValue.
274 template <> struct DenseMapInfo<OffsetValue> {
275 static OffsetValue getEmptyKey() {
276 return OffsetValue{DenseMapInfo<const Value *>::getEmptyKey(),
277 DenseMapInfo<int64_t>::getEmptyKey()};
280 static OffsetValue getTombstoneKey() {
281 return OffsetValue{DenseMapInfo<const Value *>::getTombstoneKey(),
282 DenseMapInfo<int64_t>::getEmptyKey()};
285 static unsigned getHashValue(const OffsetValue &OVal) {
286 return DenseMapInfo<std::pair<const Value *, int64_t>>::getHashValue(
287 std::make_pair(OVal.Val, OVal.Offset));
290 static bool isEqual(const OffsetValue &LHS, const OffsetValue &RHS) {
291 return LHS == RHS;
295 // Specialize DenseMapInfo for OffsetInstantiatedValue.
296 template <> struct DenseMapInfo<OffsetInstantiatedValue> {
297 static OffsetInstantiatedValue getEmptyKey() {
298 return OffsetInstantiatedValue{
299 DenseMapInfo<InstantiatedValue>::getEmptyKey(),
300 DenseMapInfo<int64_t>::getEmptyKey()};
303 static OffsetInstantiatedValue getTombstoneKey() {
304 return OffsetInstantiatedValue{
305 DenseMapInfo<InstantiatedValue>::getTombstoneKey(),
306 DenseMapInfo<int64_t>::getEmptyKey()};
309 static unsigned getHashValue(const OffsetInstantiatedValue &OVal) {
310 return DenseMapInfo<std::pair<InstantiatedValue, int64_t>>::getHashValue(
311 std::make_pair(OVal.IVal, OVal.Offset));
314 static bool isEqual(const OffsetInstantiatedValue &LHS,
315 const OffsetInstantiatedValue &RHS) {
316 return LHS == RHS;
320 } // end namespace llvm
322 class CFLAndersAAResult::FunctionInfo {
323 /// Map a value to other values that may alias it
324 /// Since the alias relation is symmetric, to save some space we assume values
325 /// are properly ordered: if a and b alias each other, and a < b, then b is in
326 /// AliasMap[a] but not vice versa.
327 DenseMap<const Value *, std::vector<OffsetValue>> AliasMap;
329 /// Map a value to its corresponding AliasAttrs
330 DenseMap<const Value *, AliasAttrs> AttrMap;
332 /// Summary of externally visible effects.
333 AliasSummary Summary;
335 Optional<AliasAttrs> getAttrs(const Value *) const;
337 public:
338 FunctionInfo(const Function &, const SmallVectorImpl<Value *> &,
339 const ReachabilitySet &, const AliasAttrMap &);
341 bool mayAlias(const Value *, LocationSize, const Value *, LocationSize) const;
342 const AliasSummary &getAliasSummary() const { return Summary; }
345 static bool hasReadOnlyState(StateSet Set) {
346 return (Set & StateSet(ReadOnlyStateMask)).any();
349 static bool hasWriteOnlyState(StateSet Set) {
350 return (Set & StateSet(WriteOnlyStateMask)).any();
353 static Optional<InterfaceValue>
354 getInterfaceValue(InstantiatedValue IValue,
355 const SmallVectorImpl<Value *> &RetVals) {
356 auto Val = IValue.Val;
358 Optional<unsigned> Index;
359 if (auto Arg = dyn_cast<Argument>(Val))
360 Index = Arg->getArgNo() + 1;
361 else if (is_contained(RetVals, Val))
362 Index = 0;
364 if (Index)
365 return InterfaceValue{*Index, IValue.DerefLevel};
366 return None;
369 static void populateAttrMap(DenseMap<const Value *, AliasAttrs> &AttrMap,
370 const AliasAttrMap &AMap) {
371 for (const auto &Mapping : AMap.mappings()) {
372 auto IVal = Mapping.first;
374 // Insert IVal into the map
375 auto &Attr = AttrMap[IVal.Val];
376 // AttrMap only cares about top-level values
377 if (IVal.DerefLevel == 0)
378 Attr |= Mapping.second;
382 static void
383 populateAliasMap(DenseMap<const Value *, std::vector<OffsetValue>> &AliasMap,
384 const ReachabilitySet &ReachSet) {
385 for (const auto &OuterMapping : ReachSet.value_mappings()) {
386 // AliasMap only cares about top-level values
387 if (OuterMapping.first.DerefLevel > 0)
388 continue;
390 auto Val = OuterMapping.first.Val;
391 auto &AliasList = AliasMap[Val];
392 for (const auto &InnerMapping : OuterMapping.second) {
393 // Again, AliasMap only cares about top-level values
394 if (InnerMapping.first.DerefLevel == 0)
395 AliasList.push_back(OffsetValue{InnerMapping.first.Val, UnknownOffset});
398 // Sort AliasList for faster lookup
399 llvm::sort(AliasList);
403 static void populateExternalRelations(
404 SmallVectorImpl<ExternalRelation> &ExtRelations, const Function &Fn,
405 const SmallVectorImpl<Value *> &RetVals, const ReachabilitySet &ReachSet) {
406 // If a function only returns one of its argument X, then X will be both an
407 // argument and a return value at the same time. This is an edge case that
408 // needs special handling here.
409 for (const auto &Arg : Fn.args()) {
410 if (is_contained(RetVals, &Arg)) {
411 auto ArgVal = InterfaceValue{Arg.getArgNo() + 1, 0};
412 auto RetVal = InterfaceValue{0, 0};
413 ExtRelations.push_back(ExternalRelation{ArgVal, RetVal, 0});
417 // Below is the core summary construction logic.
418 // A naive solution of adding only the value aliases that are parameters or
419 // return values in ReachSet to the summary won't work: It is possible that a
420 // parameter P is written into an intermediate value I, and the function
421 // subsequently returns *I. In that case, *I is does not value alias anything
422 // in ReachSet, and the naive solution will miss a summary edge from (P, 1) to
423 // (I, 1).
424 // To account for the aforementioned case, we need to check each non-parameter
425 // and non-return value for the possibility of acting as an intermediate.
426 // 'ValueMap' here records, for each value, which InterfaceValues read from or
427 // write into it. If both the read list and the write list of a given value
428 // are non-empty, we know that a particular value is an intermidate and we
429 // need to add summary edges from the writes to the reads.
430 DenseMap<Value *, ValueSummary> ValueMap;
431 for (const auto &OuterMapping : ReachSet.value_mappings()) {
432 if (auto Dst = getInterfaceValue(OuterMapping.first, RetVals)) {
433 for (const auto &InnerMapping : OuterMapping.second) {
434 // If Src is a param/return value, we get a same-level assignment.
435 if (auto Src = getInterfaceValue(InnerMapping.first, RetVals)) {
436 // This may happen if both Dst and Src are return values
437 if (*Dst == *Src)
438 continue;
440 if (hasReadOnlyState(InnerMapping.second))
441 ExtRelations.push_back(ExternalRelation{*Dst, *Src, UnknownOffset});
442 // No need to check for WriteOnly state, since ReachSet is symmetric
443 } else {
444 // If Src is not a param/return, add it to ValueMap
445 auto SrcIVal = InnerMapping.first;
446 if (hasReadOnlyState(InnerMapping.second))
447 ValueMap[SrcIVal.Val].FromRecords.push_back(
448 ValueSummary::Record{*Dst, SrcIVal.DerefLevel});
449 if (hasWriteOnlyState(InnerMapping.second))
450 ValueMap[SrcIVal.Val].ToRecords.push_back(
451 ValueSummary::Record{*Dst, SrcIVal.DerefLevel});
457 for (const auto &Mapping : ValueMap) {
458 for (const auto &FromRecord : Mapping.second.FromRecords) {
459 for (const auto &ToRecord : Mapping.second.ToRecords) {
460 auto ToLevel = ToRecord.DerefLevel;
461 auto FromLevel = FromRecord.DerefLevel;
462 // Same-level assignments should have already been processed by now
463 if (ToLevel == FromLevel)
464 continue;
466 auto SrcIndex = FromRecord.IValue.Index;
467 auto SrcLevel = FromRecord.IValue.DerefLevel;
468 auto DstIndex = ToRecord.IValue.Index;
469 auto DstLevel = ToRecord.IValue.DerefLevel;
470 if (ToLevel > FromLevel)
471 SrcLevel += ToLevel - FromLevel;
472 else
473 DstLevel += FromLevel - ToLevel;
475 ExtRelations.push_back(ExternalRelation{
476 InterfaceValue{SrcIndex, SrcLevel},
477 InterfaceValue{DstIndex, DstLevel}, UnknownOffset});
482 // Remove duplicates in ExtRelations
483 llvm::sort(ExtRelations);
484 ExtRelations.erase(std::unique(ExtRelations.begin(), ExtRelations.end()),
485 ExtRelations.end());
488 static void populateExternalAttributes(
489 SmallVectorImpl<ExternalAttribute> &ExtAttributes, const Function &Fn,
490 const SmallVectorImpl<Value *> &RetVals, const AliasAttrMap &AMap) {
491 for (const auto &Mapping : AMap.mappings()) {
492 if (auto IVal = getInterfaceValue(Mapping.first, RetVals)) {
493 auto Attr = getExternallyVisibleAttrs(Mapping.second);
494 if (Attr.any())
495 ExtAttributes.push_back(ExternalAttribute{*IVal, Attr});
500 CFLAndersAAResult::FunctionInfo::FunctionInfo(
501 const Function &Fn, const SmallVectorImpl<Value *> &RetVals,
502 const ReachabilitySet &ReachSet, const AliasAttrMap &AMap) {
503 populateAttrMap(AttrMap, AMap);
504 populateExternalAttributes(Summary.RetParamAttributes, Fn, RetVals, AMap);
505 populateAliasMap(AliasMap, ReachSet);
506 populateExternalRelations(Summary.RetParamRelations, Fn, RetVals, ReachSet);
509 Optional<AliasAttrs>
510 CFLAndersAAResult::FunctionInfo::getAttrs(const Value *V) const {
511 assert(V != nullptr);
513 auto Itr = AttrMap.find(V);
514 if (Itr != AttrMap.end())
515 return Itr->second;
516 return None;
519 bool CFLAndersAAResult::FunctionInfo::mayAlias(
520 const Value *LHS, LocationSize MaybeLHSSize, const Value *RHS,
521 LocationSize MaybeRHSSize) const {
522 assert(LHS && RHS);
524 // Check if we've seen LHS and RHS before. Sometimes LHS or RHS can be created
525 // after the analysis gets executed, and we want to be conservative in those
526 // cases.
527 auto MaybeAttrsA = getAttrs(LHS);
528 auto MaybeAttrsB = getAttrs(RHS);
529 if (!MaybeAttrsA || !MaybeAttrsB)
530 return true;
532 // Check AliasAttrs before AliasMap lookup since it's cheaper
533 auto AttrsA = *MaybeAttrsA;
534 auto AttrsB = *MaybeAttrsB;
535 if (hasUnknownOrCallerAttr(AttrsA))
536 return AttrsB.any();
537 if (hasUnknownOrCallerAttr(AttrsB))
538 return AttrsA.any();
539 if (isGlobalOrArgAttr(AttrsA))
540 return isGlobalOrArgAttr(AttrsB);
541 if (isGlobalOrArgAttr(AttrsB))
542 return isGlobalOrArgAttr(AttrsA);
544 // At this point both LHS and RHS should point to locally allocated objects
546 auto Itr = AliasMap.find(LHS);
547 if (Itr != AliasMap.end()) {
549 // Find out all (X, Offset) where X == RHS
550 auto Comparator = [](OffsetValue LHS, OffsetValue RHS) {
551 return std::less<const Value *>()(LHS.Val, RHS.Val);
553 #ifdef EXPENSIVE_CHECKS
554 assert(std::is_sorted(Itr->second.begin(), Itr->second.end(), Comparator));
555 #endif
556 auto RangePair = std::equal_range(Itr->second.begin(), Itr->second.end(),
557 OffsetValue{RHS, 0}, Comparator);
559 if (RangePair.first != RangePair.second) {
560 // Be conservative about unknown sizes
561 if (MaybeLHSSize == LocationSize::unknown() ||
562 MaybeRHSSize == LocationSize::unknown())
563 return true;
565 const uint64_t LHSSize = MaybeLHSSize.getValue();
566 const uint64_t RHSSize = MaybeRHSSize.getValue();
568 for (const auto &OVal : make_range(RangePair)) {
569 // Be conservative about UnknownOffset
570 if (OVal.Offset == UnknownOffset)
571 return true;
573 // We know that LHS aliases (RHS + OVal.Offset) if the control flow
574 // reaches here. The may-alias query essentially becomes integer
575 // range-overlap queries over two ranges [OVal.Offset, OVal.Offset +
576 // LHSSize) and [0, RHSSize).
578 // Try to be conservative on super large offsets
579 if (LLVM_UNLIKELY(LHSSize > INT64_MAX || RHSSize > INT64_MAX))
580 return true;
582 auto LHSStart = OVal.Offset;
583 // FIXME: Do we need to guard against integer overflow?
584 auto LHSEnd = OVal.Offset + static_cast<int64_t>(LHSSize);
585 auto RHSStart = 0;
586 auto RHSEnd = static_cast<int64_t>(RHSSize);
587 if (LHSEnd > RHSStart && LHSStart < RHSEnd)
588 return true;
593 return false;
596 static void propagate(InstantiatedValue From, InstantiatedValue To,
597 MatchState State, ReachabilitySet &ReachSet,
598 std::vector<WorkListItem> &WorkList) {
599 if (From == To)
600 return;
601 if (ReachSet.insert(From, To, State))
602 WorkList.push_back(WorkListItem{From, To, State});
605 static void initializeWorkList(std::vector<WorkListItem> &WorkList,
606 ReachabilitySet &ReachSet,
607 const CFLGraph &Graph) {
608 for (const auto &Mapping : Graph.value_mappings()) {
609 auto Val = Mapping.first;
610 auto &ValueInfo = Mapping.second;
611 assert(ValueInfo.getNumLevels() > 0);
613 // Insert all immediate assignment neighbors to the worklist
614 for (unsigned I = 0, E = ValueInfo.getNumLevels(); I < E; ++I) {
615 auto Src = InstantiatedValue{Val, I};
616 // If there's an assignment edge from X to Y, it means Y is reachable from
617 // X at S3 and X is reachable from Y at S1
618 for (auto &Edge : ValueInfo.getNodeInfoAtLevel(I).Edges) {
619 propagate(Edge.Other, Src, MatchState::FlowFromReadOnly, ReachSet,
620 WorkList);
621 propagate(Src, Edge.Other, MatchState::FlowToWriteOnly, ReachSet,
622 WorkList);
628 static Optional<InstantiatedValue> getNodeBelow(const CFLGraph &Graph,
629 InstantiatedValue V) {
630 auto NodeBelow = InstantiatedValue{V.Val, V.DerefLevel + 1};
631 if (Graph.getNode(NodeBelow))
632 return NodeBelow;
633 return None;
636 static void processWorkListItem(const WorkListItem &Item, const CFLGraph &Graph,
637 ReachabilitySet &ReachSet, AliasMemSet &MemSet,
638 std::vector<WorkListItem> &WorkList) {
639 auto FromNode = Item.From;
640 auto ToNode = Item.To;
642 auto NodeInfo = Graph.getNode(ToNode);
643 assert(NodeInfo != nullptr);
645 // TODO: propagate field offsets
647 // FIXME: Here is a neat trick we can do: since both ReachSet and MemSet holds
648 // relations that are symmetric, we could actually cut the storage by half by
649 // sorting FromNode and ToNode before insertion happens.
651 // The newly added value alias pair may potentially generate more memory
652 // alias pairs. Check for them here.
653 auto FromNodeBelow = getNodeBelow(Graph, FromNode);
654 auto ToNodeBelow = getNodeBelow(Graph, ToNode);
655 if (FromNodeBelow && ToNodeBelow &&
656 MemSet.insert(*FromNodeBelow, *ToNodeBelow)) {
657 propagate(*FromNodeBelow, *ToNodeBelow,
658 MatchState::FlowFromMemAliasNoReadWrite, ReachSet, WorkList);
659 for (const auto &Mapping : ReachSet.reachableValueAliases(*FromNodeBelow)) {
660 auto Src = Mapping.first;
661 auto MemAliasPropagate = [&](MatchState FromState, MatchState ToState) {
662 if (Mapping.second.test(static_cast<size_t>(FromState)))
663 propagate(Src, *ToNodeBelow, ToState, ReachSet, WorkList);
666 MemAliasPropagate(MatchState::FlowFromReadOnly,
667 MatchState::FlowFromMemAliasReadOnly);
668 MemAliasPropagate(MatchState::FlowToWriteOnly,
669 MatchState::FlowToMemAliasWriteOnly);
670 MemAliasPropagate(MatchState::FlowToReadWrite,
671 MatchState::FlowToMemAliasReadWrite);
675 // This is the core of the state machine walking algorithm. We expand ReachSet
676 // based on which state we are at (which in turn dictates what edges we
677 // should examine)
678 // From a high-level point of view, the state machine here guarantees two
679 // properties:
680 // - If *X and *Y are memory aliases, then X and Y are value aliases
681 // - If Y is an alias of X, then reverse assignment edges (if there is any)
682 // should precede any assignment edges on the path from X to Y.
683 auto NextAssignState = [&](MatchState State) {
684 for (const auto &AssignEdge : NodeInfo->Edges)
685 propagate(FromNode, AssignEdge.Other, State, ReachSet, WorkList);
687 auto NextRevAssignState = [&](MatchState State) {
688 for (const auto &RevAssignEdge : NodeInfo->ReverseEdges)
689 propagate(FromNode, RevAssignEdge.Other, State, ReachSet, WorkList);
691 auto NextMemState = [&](MatchState State) {
692 if (auto AliasSet = MemSet.getMemoryAliases(ToNode)) {
693 for (const auto &MemAlias : *AliasSet)
694 propagate(FromNode, MemAlias, State, ReachSet, WorkList);
698 switch (Item.State) {
699 case MatchState::FlowFromReadOnly:
700 NextRevAssignState(MatchState::FlowFromReadOnly);
701 NextAssignState(MatchState::FlowToReadWrite);
702 NextMemState(MatchState::FlowFromMemAliasReadOnly);
703 break;
705 case MatchState::FlowFromMemAliasNoReadWrite:
706 NextRevAssignState(MatchState::FlowFromReadOnly);
707 NextAssignState(MatchState::FlowToWriteOnly);
708 break;
710 case MatchState::FlowFromMemAliasReadOnly:
711 NextRevAssignState(MatchState::FlowFromReadOnly);
712 NextAssignState(MatchState::FlowToReadWrite);
713 break;
715 case MatchState::FlowToWriteOnly:
716 NextAssignState(MatchState::FlowToWriteOnly);
717 NextMemState(MatchState::FlowToMemAliasWriteOnly);
718 break;
720 case MatchState::FlowToReadWrite:
721 NextAssignState(MatchState::FlowToReadWrite);
722 NextMemState(MatchState::FlowToMemAliasReadWrite);
723 break;
725 case MatchState::FlowToMemAliasWriteOnly:
726 NextAssignState(MatchState::FlowToWriteOnly);
727 break;
729 case MatchState::FlowToMemAliasReadWrite:
730 NextAssignState(MatchState::FlowToReadWrite);
731 break;
735 static AliasAttrMap buildAttrMap(const CFLGraph &Graph,
736 const ReachabilitySet &ReachSet) {
737 AliasAttrMap AttrMap;
738 std::vector<InstantiatedValue> WorkList, NextList;
740 // Initialize each node with its original AliasAttrs in CFLGraph
741 for (const auto &Mapping : Graph.value_mappings()) {
742 auto Val = Mapping.first;
743 auto &ValueInfo = Mapping.second;
744 for (unsigned I = 0, E = ValueInfo.getNumLevels(); I < E; ++I) {
745 auto Node = InstantiatedValue{Val, I};
746 AttrMap.add(Node, ValueInfo.getNodeInfoAtLevel(I).Attr);
747 WorkList.push_back(Node);
751 while (!WorkList.empty()) {
752 for (const auto &Dst : WorkList) {
753 auto DstAttr = AttrMap.getAttrs(Dst);
754 if (DstAttr.none())
755 continue;
757 // Propagate attr on the same level
758 for (const auto &Mapping : ReachSet.reachableValueAliases(Dst)) {
759 auto Src = Mapping.first;
760 if (AttrMap.add(Src, DstAttr))
761 NextList.push_back(Src);
764 // Propagate attr to the levels below
765 auto DstBelow = getNodeBelow(Graph, Dst);
766 while (DstBelow) {
767 if (AttrMap.add(*DstBelow, DstAttr)) {
768 NextList.push_back(*DstBelow);
769 break;
771 DstBelow = getNodeBelow(Graph, *DstBelow);
774 WorkList.swap(NextList);
775 NextList.clear();
778 return AttrMap;
781 CFLAndersAAResult::FunctionInfo
782 CFLAndersAAResult::buildInfoFrom(const Function &Fn) {
783 CFLGraphBuilder<CFLAndersAAResult> GraphBuilder(
784 *this, GetTLI(const_cast<Function &>(Fn)),
785 // Cast away the constness here due to GraphBuilder's API requirement
786 const_cast<Function &>(Fn));
787 auto &Graph = GraphBuilder.getCFLGraph();
789 ReachabilitySet ReachSet;
790 AliasMemSet MemSet;
792 std::vector<WorkListItem> WorkList, NextList;
793 initializeWorkList(WorkList, ReachSet, Graph);
794 // TODO: make sure we don't stop before the fix point is reached
795 while (!WorkList.empty()) {
796 for (const auto &Item : WorkList)
797 processWorkListItem(Item, Graph, ReachSet, MemSet, NextList);
799 NextList.swap(WorkList);
800 NextList.clear();
803 // Now that we have all the reachability info, propagate AliasAttrs according
804 // to it
805 auto IValueAttrMap = buildAttrMap(Graph, ReachSet);
807 return FunctionInfo(Fn, GraphBuilder.getReturnValues(), ReachSet,
808 std::move(IValueAttrMap));
811 void CFLAndersAAResult::scan(const Function &Fn) {
812 auto InsertPair = Cache.insert(std::make_pair(&Fn, Optional<FunctionInfo>()));
813 (void)InsertPair;
814 assert(InsertPair.second &&
815 "Trying to scan a function that has already been cached");
817 // Note that we can't do Cache[Fn] = buildSetsFrom(Fn) here: the function call
818 // may get evaluated after operator[], potentially triggering a DenseMap
819 // resize and invalidating the reference returned by operator[]
820 auto FunInfo = buildInfoFrom(Fn);
821 Cache[&Fn] = std::move(FunInfo);
822 Handles.emplace_front(const_cast<Function *>(&Fn), this);
825 void CFLAndersAAResult::evict(const Function *Fn) { Cache.erase(Fn); }
827 const Optional<CFLAndersAAResult::FunctionInfo> &
828 CFLAndersAAResult::ensureCached(const Function &Fn) {
829 auto Iter = Cache.find(&Fn);
830 if (Iter == Cache.end()) {
831 scan(Fn);
832 Iter = Cache.find(&Fn);
833 assert(Iter != Cache.end());
834 assert(Iter->second.hasValue());
836 return Iter->second;
839 const AliasSummary *CFLAndersAAResult::getAliasSummary(const Function &Fn) {
840 auto &FunInfo = ensureCached(Fn);
841 if (FunInfo.hasValue())
842 return &FunInfo->getAliasSummary();
843 else
844 return nullptr;
847 AliasResult CFLAndersAAResult::query(const MemoryLocation &LocA,
848 const MemoryLocation &LocB) {
849 auto *ValA = LocA.Ptr;
850 auto *ValB = LocB.Ptr;
852 if (!ValA->getType()->isPointerTy() || !ValB->getType()->isPointerTy())
853 return NoAlias;
855 auto *Fn = parentFunctionOfValue(ValA);
856 if (!Fn) {
857 Fn = parentFunctionOfValue(ValB);
858 if (!Fn) {
859 // The only times this is known to happen are when globals + InlineAsm are
860 // involved
861 LLVM_DEBUG(
862 dbgs()
863 << "CFLAndersAA: could not extract parent function information.\n");
864 return MayAlias;
866 } else {
867 assert(!parentFunctionOfValue(ValB) || parentFunctionOfValue(ValB) == Fn);
870 assert(Fn != nullptr);
871 auto &FunInfo = ensureCached(*Fn);
873 // AliasMap lookup
874 if (FunInfo->mayAlias(ValA, LocA.Size, ValB, LocB.Size))
875 return MayAlias;
876 return NoAlias;
879 AliasResult CFLAndersAAResult::alias(const MemoryLocation &LocA,
880 const MemoryLocation &LocB,
881 AAQueryInfo &AAQI) {
882 if (LocA.Ptr == LocB.Ptr)
883 return MustAlias;
885 // Comparisons between global variables and other constants should be
886 // handled by BasicAA.
887 // CFLAndersAA may report NoAlias when comparing a GlobalValue and
888 // ConstantExpr, but every query needs to have at least one Value tied to a
889 // Function, and neither GlobalValues nor ConstantExprs are.
890 if (isa<Constant>(LocA.Ptr) && isa<Constant>(LocB.Ptr))
891 return AAResultBase::alias(LocA, LocB, AAQI);
893 AliasResult QueryResult = query(LocA, LocB);
894 if (QueryResult == MayAlias)
895 return AAResultBase::alias(LocA, LocB, AAQI);
897 return QueryResult;
900 AnalysisKey CFLAndersAA::Key;
902 CFLAndersAAResult CFLAndersAA::run(Function &F, FunctionAnalysisManager &AM) {
903 auto GetTLI = [&AM](Function &F) -> TargetLibraryInfo & {
904 return AM.getResult<TargetLibraryAnalysis>(F);
906 return CFLAndersAAResult(GetTLI);
909 char CFLAndersAAWrapperPass::ID = 0;
910 INITIALIZE_PASS(CFLAndersAAWrapperPass, "cfl-anders-aa",
911 "Inclusion-Based CFL Alias Analysis", false, true)
913 ImmutablePass *llvm::createCFLAndersAAWrapperPass() {
914 return new CFLAndersAAWrapperPass();
917 CFLAndersAAWrapperPass::CFLAndersAAWrapperPass() : ImmutablePass(ID) {
918 initializeCFLAndersAAWrapperPassPass(*PassRegistry::getPassRegistry());
921 void CFLAndersAAWrapperPass::initializePass() {
922 auto GetTLI = [this](Function &F) -> TargetLibraryInfo & {
923 return this->getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
925 Result.reset(new CFLAndersAAResult(GetTLI));
928 void CFLAndersAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
929 AU.setPreservesAll();
930 AU.addRequired<TargetLibraryInfoWrapperPass>();