1 //===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
10 /// This file implements the new LLVM's Global Value Numbering pass.
11 /// GVN partitions values computed by a function into congruence classes.
12 /// Values ending up in the same congruence class are guaranteed to be the same
13 /// for every execution of the program. In that respect, congruency is a
14 /// compile-time approximation of equivalence of values at runtime.
15 /// The algorithm implemented here uses a sparse formulation and it's based
16 /// on the ideas described in the paper:
17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from
20 /// A brief overview of the algorithm: The algorithm is essentially the same as
21 /// the standard RPO value numbering algorithm (a good reference is the paper
22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference:
23 /// The RPO algorithm proceeds, on every iteration, to process every reachable
24 /// block and every instruction in that block. This is because the standard RPO
25 /// algorithm does not track what things have the same value number, it only
26 /// tracks what the value number of a given operation is (the mapping is
27 /// operation -> value number). Thus, when a value number of an operation
28 /// changes, it must reprocess everything to ensure all uses of a value number
29 /// get updated properly. In constrast, the sparse algorithm we use *also*
30 /// tracks what operations have a given value number (IE it also tracks the
31 /// reverse mapping from value number -> operations with that value number), so
32 /// that it only needs to reprocess the instructions that are affected when
33 /// something's value number changes. The vast majority of complexity and code
34 /// in this file is devoted to tracking what value numbers could change for what
35 /// instructions when various things happen. The rest of the algorithm is
36 /// devoted to performing symbolic evaluation, forward propagation, and
37 /// simplification of operations based on the value numbers deduced so far
39 /// In order to make the GVN mostly-complete, we use a technique derived from
40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time
41 /// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA
42 /// based GVN algorithms is related to their inability to detect equivalence
43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)).
44 /// We resolve this issue by generating the equivalent "phi of ops" form for
45 /// each op of phis we see, in a way that only takes polynomial time to resolve.
47 /// We also do not perform elimination by using any published algorithm. All
48 /// published algorithms are O(Instructions). Instead, we use a technique that
49 /// is O(number of operations with the same value number), enabling us to skip
50 /// trying to eliminate things that have unique value numbers.
52 //===----------------------------------------------------------------------===//
54 #include "llvm/Transforms/Scalar/NewGVN.h"
55 #include "llvm/ADT/ArrayRef.h"
56 #include "llvm/ADT/BitVector.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/DenseMapInfo.h"
59 #include "llvm/ADT/DenseSet.h"
60 #include "llvm/ADT/DepthFirstIterator.h"
61 #include "llvm/ADT/GraphTraits.h"
62 #include "llvm/ADT/Hashing.h"
63 #include "llvm/ADT/PointerIntPair.h"
64 #include "llvm/ADT/PostOrderIterator.h"
65 #include "llvm/ADT/SetOperations.h"
66 #include "llvm/ADT/SmallPtrSet.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/SparseBitVector.h"
69 #include "llvm/ADT/Statistic.h"
70 #include "llvm/ADT/iterator_range.h"
71 #include "llvm/Analysis/AliasAnalysis.h"
72 #include "llvm/Analysis/AssumptionCache.h"
73 #include "llvm/Analysis/CFGPrinter.h"
74 #include "llvm/Analysis/ConstantFolding.h"
75 #include "llvm/Analysis/GlobalsModRef.h"
76 #include "llvm/Analysis/InstructionSimplify.h"
77 #include "llvm/Analysis/MemoryBuiltins.h"
78 #include "llvm/Analysis/MemorySSA.h"
79 #include "llvm/Analysis/TargetLibraryInfo.h"
80 #include "llvm/Analysis/ValueTracking.h"
81 #include "llvm/IR/Argument.h"
82 #include "llvm/IR/BasicBlock.h"
83 #include "llvm/IR/Constant.h"
84 #include "llvm/IR/Constants.h"
85 #include "llvm/IR/Dominators.h"
86 #include "llvm/IR/Function.h"
87 #include "llvm/IR/InstrTypes.h"
88 #include "llvm/IR/Instruction.h"
89 #include "llvm/IR/Instructions.h"
90 #include "llvm/IR/IntrinsicInst.h"
91 #include "llvm/IR/PatternMatch.h"
92 #include "llvm/IR/Type.h"
93 #include "llvm/IR/Use.h"
94 #include "llvm/IR/User.h"
95 #include "llvm/IR/Value.h"
96 #include "llvm/Support/Allocator.h"
97 #include "llvm/Support/ArrayRecycler.h"
98 #include "llvm/Support/Casting.h"
99 #include "llvm/Support/CommandLine.h"
100 #include "llvm/Support/Debug.h"
101 #include "llvm/Support/DebugCounter.h"
102 #include "llvm/Support/ErrorHandling.h"
103 #include "llvm/Support/PointerLikeTypeTraits.h"
104 #include "llvm/Support/raw_ostream.h"
105 #include "llvm/Transforms/Scalar/GVNExpression.h"
106 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
107 #include "llvm/Transforms/Utils/Local.h"
108 #include "llvm/Transforms/Utils/PredicateInfo.h"
109 #include "llvm/Transforms/Utils/VNCoercion.h"
122 using namespace llvm
;
123 using namespace llvm::GVNExpression
;
124 using namespace llvm::VNCoercion
;
125 using namespace llvm::PatternMatch
;
127 #define DEBUG_TYPE "newgvn"
129 STATISTIC(NumGVNInstrDeleted
, "Number of instructions deleted");
130 STATISTIC(NumGVNBlocksDeleted
, "Number of blocks deleted");
131 STATISTIC(NumGVNOpsSimplified
, "Number of Expressions simplified");
132 STATISTIC(NumGVNPhisAllSame
, "Number of PHIs whos arguments are all the same");
133 STATISTIC(NumGVNMaxIterations
,
134 "Maximum Number of iterations it took to converge GVN");
135 STATISTIC(NumGVNLeaderChanges
, "Number of leader changes");
136 STATISTIC(NumGVNSortedLeaderChanges
, "Number of sorted leader changes");
137 STATISTIC(NumGVNAvoidedSortedLeaderChanges
,
138 "Number of avoided sorted leader changes");
139 STATISTIC(NumGVNDeadStores
, "Number of redundant/dead stores eliminated");
140 STATISTIC(NumGVNPHIOfOpsCreated
, "Number of PHI of ops created");
141 STATISTIC(NumGVNPHIOfOpsEliminations
,
142 "Number of things eliminated using PHI of ops");
143 DEBUG_COUNTER(VNCounter
, "newgvn-vn",
144 "Controls which instructions are value numbered");
145 DEBUG_COUNTER(PHIOfOpsCounter
, "newgvn-phi",
146 "Controls which instructions we create phi of ops for");
147 // Currently store defining access refinement is too slow due to basicaa being
148 // egregiously slow. This flag lets us keep it working while we work on this
150 static cl::opt
<bool> EnableStoreRefinement("enable-store-refinement",
151 cl::init(false), cl::Hidden
);
153 /// Currently, the generation "phi of ops" can result in correctness issues.
154 static cl::opt
<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(true),
157 //===----------------------------------------------------------------------===//
159 //===----------------------------------------------------------------------===//
163 namespace GVNExpression
{
165 Expression::~Expression() = default;
166 BasicExpression::~BasicExpression() = default;
167 CallExpression::~CallExpression() = default;
168 LoadExpression::~LoadExpression() = default;
169 StoreExpression::~StoreExpression() = default;
170 AggregateValueExpression::~AggregateValueExpression() = default;
171 PHIExpression::~PHIExpression() = default;
173 } // end namespace GVNExpression
174 } // end namespace llvm
178 // Tarjan's SCC finding algorithm with Nuutila's improvements
179 // SCCIterator is actually fairly complex for the simple thing we want.
180 // It also wants to hand us SCC's that are unrelated to the phi node we ask
181 // about, and have us process them there or risk redoing work.
182 // Graph traits over a filter iterator also doesn't work that well here.
183 // This SCC finder is specialized to walk use-def chains, and only follows
185 // not generic values (arguments, etc).
187 TarjanSCC() : Components(1) {}
189 void Start(const Instruction
*Start
) {
190 if (Root
.lookup(Start
) == 0)
194 const SmallPtrSetImpl
<const Value
*> &getComponentFor(const Value
*V
) const {
195 unsigned ComponentID
= ValueToComponent
.lookup(V
);
197 assert(ComponentID
> 0 &&
198 "Asking for a component for a value we never processed");
199 return Components
[ComponentID
];
203 void FindSCC(const Instruction
*I
) {
205 // Store the DFS Number we had before it possibly gets incremented.
206 unsigned int OurDFS
= DFSNum
;
207 for (const auto &Op
: I
->operands()) {
208 if (auto *InstOp
= dyn_cast
<Instruction
>(Op
)) {
209 if (Root
.lookup(Op
) == 0)
211 if (!InComponent
.count(Op
))
212 Root
[I
] = std::min(Root
.lookup(I
), Root
.lookup(Op
));
215 // See if we really were the root of a component, by seeing if we still have
216 // our DFSNumber. If we do, we are the root of the component, and we have
217 // completed a component. If we do not, we are not the root of a component,
218 // and belong on the component stack.
219 if (Root
.lookup(I
) == OurDFS
) {
220 unsigned ComponentID
= Components
.size();
221 Components
.resize(Components
.size() + 1);
222 auto &Component
= Components
.back();
224 LLVM_DEBUG(dbgs() << "Component root is " << *I
<< "\n");
225 InComponent
.insert(I
);
226 ValueToComponent
[I
] = ComponentID
;
227 // Pop a component off the stack and label it.
228 while (!Stack
.empty() && Root
.lookup(Stack
.back()) >= OurDFS
) {
229 auto *Member
= Stack
.back();
230 LLVM_DEBUG(dbgs() << "Component member is " << *Member
<< "\n");
231 Component
.insert(Member
);
232 InComponent
.insert(Member
);
233 ValueToComponent
[Member
] = ComponentID
;
237 // Part of a component, push to stack
242 unsigned int DFSNum
= 1;
243 SmallPtrSet
<const Value
*, 8> InComponent
;
244 DenseMap
<const Value
*, unsigned int> Root
;
245 SmallVector
<const Value
*, 8> Stack
;
247 // Store the components as vector of ptr sets, because we need the topo order
248 // of SCC's, but not individual member order
249 SmallVector
<SmallPtrSet
<const Value
*, 8>, 8> Components
;
251 DenseMap
<const Value
*, unsigned> ValueToComponent
;
254 // Congruence classes represent the set of expressions/instructions
255 // that are all the same *during some scope in the function*.
256 // That is, because of the way we perform equality propagation, and
257 // because of memory value numbering, it is not correct to assume
258 // you can willy-nilly replace any member with any other at any
259 // point in the function.
261 // For any Value in the Member set, it is valid to replace any dominated member
264 // Every congruence class has a leader, and the leader is used to symbolize
265 // instructions in a canonical way (IE every operand of an instruction that is a
266 // member of the same congruence class will always be replaced with leader
267 // during symbolization). To simplify symbolization, we keep the leader as a
268 // constant if class can be proved to be a constant value. Otherwise, the
269 // leader is the member of the value set with the smallest DFS number. Each
270 // congruence class also has a defining expression, though the expression may be
271 // null. If it exists, it can be used for forward propagation and reassociation
274 // For memory, we also track a representative MemoryAccess, and a set of memory
275 // members for MemoryPhis (which have no real instructions). Note that for
276 // memory, it seems tempting to try to split the memory members into a
277 // MemoryCongruenceClass or something. Unfortunately, this does not work
278 // easily. The value numbering of a given memory expression depends on the
279 // leader of the memory congruence class, and the leader of memory congruence
280 // class depends on the value numbering of a given memory expression. This
281 // leads to wasted propagation, and in some cases, missed optimization. For
282 // example: If we had value numbered two stores together before, but now do not,
283 // we move them to a new value congruence class. This in turn will move at one
284 // of the memorydefs to a new memory congruence class. Which in turn, affects
285 // the value numbering of the stores we just value numbered (because the memory
286 // congruence class is part of the value number). So while theoretically
287 // possible to split them up, it turns out to be *incredibly* complicated to get
288 // it to work right, because of the interdependency. While structurally
289 // slightly messier, it is algorithmically much simpler and faster to do what we
290 // do here, and track them both at once in the same class.
291 // Note: The default iterators for this class iterate over values
292 class CongruenceClass
{
294 using MemberType
= Value
;
295 using MemberSet
= SmallPtrSet
<MemberType
*, 4>;
296 using MemoryMemberType
= MemoryPhi
;
297 using MemoryMemberSet
= SmallPtrSet
<const MemoryMemberType
*, 2>;
299 explicit CongruenceClass(unsigned ID
) : ID(ID
) {}
300 CongruenceClass(unsigned ID
, std::pair
<Value
*, unsigned int> Leader
,
302 : ID(ID
), RepLeader(Leader
), DefiningExpr(E
) {}
304 unsigned getID() const { return ID
; }
306 // True if this class has no members left. This is mainly used for assertion
307 // purposes, and for skipping empty classes.
308 bool isDead() const {
309 // If it's both dead from a value perspective, and dead from a memory
310 // perspective, it's really dead.
311 return empty() && memory_empty();
315 Value
*getLeader() const { return RepLeader
.first
; }
316 void setLeader(std::pair
<Value
*, unsigned int> Leader
) {
319 const std::pair
<Value
*, unsigned int> &getNextLeader() const {
322 void resetNextLeader() { NextLeader
= {nullptr, ~0}; }
323 bool addPossibleLeader(std::pair
<Value
*, unsigned int> LeaderPair
) {
324 if (LeaderPair
.second
< RepLeader
.second
) {
325 NextLeader
= RepLeader
;
326 RepLeader
= LeaderPair
;
328 } else if (LeaderPair
.second
< NextLeader
.second
) {
329 NextLeader
= LeaderPair
;
334 Value
*getStoredValue() const { return RepStoredValue
; }
335 void setStoredValue(Value
*Leader
) { RepStoredValue
= Leader
; }
336 const MemoryAccess
*getMemoryLeader() const { return RepMemoryAccess
; }
337 void setMemoryLeader(const MemoryAccess
*Leader
) { RepMemoryAccess
= Leader
; }
339 // Forward propagation info
340 const Expression
*getDefiningExpr() const { return DefiningExpr
; }
343 bool empty() const { return Members
.empty(); }
344 unsigned size() const { return Members
.size(); }
345 MemberSet::const_iterator
begin() const { return Members
.begin(); }
346 MemberSet::const_iterator
end() const { return Members
.end(); }
347 void insert(MemberType
*M
) { Members
.insert(M
); }
348 void erase(MemberType
*M
) { Members
.erase(M
); }
349 void swap(MemberSet
&Other
) { Members
.swap(Other
); }
352 bool memory_empty() const { return MemoryMembers
.empty(); }
353 unsigned memory_size() const { return MemoryMembers
.size(); }
354 MemoryMemberSet::const_iterator
memory_begin() const {
355 return MemoryMembers
.begin();
357 MemoryMemberSet::const_iterator
memory_end() const {
358 return MemoryMembers
.end();
360 iterator_range
<MemoryMemberSet::const_iterator
> memory() const {
361 return make_range(memory_begin(), memory_end());
364 void memory_insert(const MemoryMemberType
*M
) { MemoryMembers
.insert(M
); }
365 void memory_erase(const MemoryMemberType
*M
) { MemoryMembers
.erase(M
); }
368 unsigned getStoreCount() const { return StoreCount
; }
369 void incStoreCount() { ++StoreCount
; }
370 void decStoreCount() {
371 assert(StoreCount
!= 0 && "Store count went negative");
375 // True if this class has no memory members.
376 bool definesNoMemory() const { return StoreCount
== 0 && memory_empty(); }
378 // Return true if two congruence classes are equivalent to each other. This
379 // means that every field but the ID number and the dead field are equivalent.
380 bool isEquivalentTo(const CongruenceClass
*Other
) const {
386 if (std::tie(StoreCount
, RepLeader
, RepStoredValue
, RepMemoryAccess
) !=
387 std::tie(Other
->StoreCount
, Other
->RepLeader
, Other
->RepStoredValue
,
388 Other
->RepMemoryAccess
))
390 if (DefiningExpr
!= Other
->DefiningExpr
)
391 if (!DefiningExpr
|| !Other
->DefiningExpr
||
392 *DefiningExpr
!= *Other
->DefiningExpr
)
395 if (Members
.size() != Other
->Members
.size())
398 return llvm::set_is_subset(Members
, Other
->Members
);
404 // Representative leader and its corresponding RPO number.
405 // The leader must have the lowest RPO number.
406 std::pair
<Value
*, unsigned int> RepLeader
= {nullptr, ~0U};
408 // The most dominating leader after our current leader (given by the RPO
409 // number), because the member set is not sorted and is expensive to keep
410 // sorted all the time.
411 std::pair
<Value
*, unsigned int> NextLeader
= {nullptr, ~0U};
413 // If this is represented by a store, the value of the store.
414 Value
*RepStoredValue
= nullptr;
416 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory
418 const MemoryAccess
*RepMemoryAccess
= nullptr;
420 // Defining Expression.
421 const Expression
*DefiningExpr
= nullptr;
423 // Actual members of this class.
426 // This is the set of MemoryPhis that exist in the class. MemoryDefs and
427 // MemoryUses have real instructions representing them, so we only need to
428 // track MemoryPhis here.
429 MemoryMemberSet MemoryMembers
;
431 // Number of stores in this congruence class.
432 // This is used so we can detect store equivalence changes properly.
436 } // end anonymous namespace
440 struct ExactEqualsExpression
{
443 explicit ExactEqualsExpression(const Expression
&E
) : E(E
) {}
445 hash_code
getComputedHash() const { return E
.getComputedHash(); }
447 bool operator==(const Expression
&Other
) const {
448 return E
.exactlyEquals(Other
);
452 template <> struct DenseMapInfo
<const Expression
*> {
453 static const Expression
*getEmptyKey() {
454 auto Val
= static_cast<uintptr_t>(-1);
455 Val
<<= PointerLikeTypeTraits
<const Expression
*>::NumLowBitsAvailable
;
456 return reinterpret_cast<const Expression
*>(Val
);
459 static const Expression
*getTombstoneKey() {
460 auto Val
= static_cast<uintptr_t>(~1U);
461 Val
<<= PointerLikeTypeTraits
<const Expression
*>::NumLowBitsAvailable
;
462 return reinterpret_cast<const Expression
*>(Val
);
465 static unsigned getHashValue(const Expression
*E
) {
466 return E
->getComputedHash();
469 static unsigned getHashValue(const ExactEqualsExpression
&E
) {
470 return E
.getComputedHash();
473 static bool isEqual(const ExactEqualsExpression
&LHS
, const Expression
*RHS
) {
474 if (RHS
== getTombstoneKey() || RHS
== getEmptyKey())
479 static bool isEqual(const Expression
*LHS
, const Expression
*RHS
) {
482 if (LHS
== getTombstoneKey() || RHS
== getTombstoneKey() ||
483 LHS
== getEmptyKey() || RHS
== getEmptyKey())
485 // Compare hashes before equality. This is *not* what the hashtable does,
486 // since it is computing it modulo the number of buckets, whereas we are
487 // using the full hash keyspace. Since the hashes are precomputed, this
488 // check is *much* faster than equality.
489 if (LHS
->getComputedHash() != RHS
->getComputedHash())
495 } // end namespace llvm
501 DominatorTree
*DT
= nullptr;
502 const TargetLibraryInfo
*TLI
= nullptr;
503 AliasAnalysis
*AA
= nullptr;
504 MemorySSA
*MSSA
= nullptr;
505 MemorySSAWalker
*MSSAWalker
= nullptr;
506 AssumptionCache
*AC
= nullptr;
507 const DataLayout
&DL
;
508 std::unique_ptr
<PredicateInfo
> PredInfo
;
510 // These are the only two things the create* functions should have
511 // side-effects on due to allocating memory.
512 mutable BumpPtrAllocator ExpressionAllocator
;
513 mutable ArrayRecycler
<Value
*> ArgRecycler
;
514 mutable TarjanSCC SCCFinder
;
515 const SimplifyQuery SQ
;
517 // Number of function arguments, used by ranking
518 unsigned int NumFuncArgs
= 0;
520 // RPOOrdering of basic blocks
521 DenseMap
<const DomTreeNode
*, unsigned> RPOOrdering
;
523 // Congruence class info.
525 // This class is called INITIAL in the paper. It is the class everything
526 // startsout in, and represents any value. Being an optimistic analysis,
527 // anything in the TOP class has the value TOP, which is indeterminate and
528 // equivalent to everything.
529 CongruenceClass
*TOPClass
= nullptr;
530 std::vector
<CongruenceClass
*> CongruenceClasses
;
531 unsigned NextCongruenceNum
= 0;
534 DenseMap
<Value
*, CongruenceClass
*> ValueToClass
;
535 DenseMap
<Value
*, const Expression
*> ValueToExpression
;
537 // Value PHI handling, used to make equivalence between phi(op, op) and
539 // These mappings just store various data that would normally be part of the
541 SmallPtrSet
<const Instruction
*, 8> PHINodeUses
;
543 // The cached results, in general, are only valid for the specific block where
544 // they were computed. The unsigned part of the key is a unique block
546 DenseMap
<std::pair
<const Value
*, unsigned>, bool> OpSafeForPHIOfOps
;
549 // Map a temporary instruction we created to a parent block.
550 DenseMap
<const Value
*, BasicBlock
*> TempToBlock
;
552 // Map between the already in-program instructions and the temporary phis we
553 // created that they are known equivalent to.
554 DenseMap
<const Value
*, PHINode
*> RealToTemp
;
556 // In order to know when we should re-process instructions that have
557 // phi-of-ops, we track the set of expressions that they needed as
558 // leaders. When we discover new leaders for those expressions, we process the
559 // associated phi-of-op instructions again in case they have changed. The
560 // other way they may change is if they had leaders, and those leaders
561 // disappear. However, at the point they have leaders, there are uses of the
562 // relevant operands in the created phi node, and so they will get reprocessed
563 // through the normal user marking we perform.
564 mutable DenseMap
<const Value
*, SmallPtrSet
<Value
*, 2>> AdditionalUsers
;
565 DenseMap
<const Expression
*, SmallPtrSet
<Instruction
*, 2>>
566 ExpressionToPhiOfOps
;
568 // Map from temporary operation to MemoryAccess.
569 DenseMap
<const Instruction
*, MemoryUseOrDef
*> TempToMemory
;
571 // Set of all temporary instructions we created.
572 // Note: This will include instructions that were just created during value
573 // numbering. The way to test if something is using them is to check
575 DenseSet
<Instruction
*> AllTempInstructions
;
577 // This is the set of instructions to revisit on a reachability change. At
578 // the end of the main iteration loop it will contain at least all the phi of
579 // ops instructions that will be changed to phis, as well as regular phis.
580 // During the iteration loop, it may contain other things, such as phi of ops
581 // instructions that used edge reachability to reach a result, and so need to
582 // be revisited when the edge changes, independent of whether the phi they
583 // depended on changes.
584 DenseMap
<BasicBlock
*, SparseBitVector
<>> RevisitOnReachabilityChange
;
586 // Mapping from predicate info we used to the instructions we used it with.
587 // In order to correctly ensure propagation, we must keep track of what
588 // comparisons we used, so that when the values of the comparisons change, we
589 // propagate the information to the places we used the comparison.
590 mutable DenseMap
<const Value
*, SmallPtrSet
<Instruction
*, 2>>
593 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for
594 // stores, we no longer can rely solely on the def-use chains of MemorySSA.
595 mutable DenseMap
<const MemoryAccess
*, SmallPtrSet
<MemoryAccess
*, 2>>
598 // A table storing which memorydefs/phis represent a memory state provably
599 // equivalent to another memory state.
600 // We could use the congruence class machinery, but the MemoryAccess's are
601 // abstract memory states, so they can only ever be equivalent to each other,
602 // and not to constants, etc.
603 DenseMap
<const MemoryAccess
*, CongruenceClass
*> MemoryAccessToClass
;
605 // We could, if we wanted, build MemoryPhiExpressions and
606 // MemoryVariableExpressions, etc, and value number them the same way we value
607 // number phi expressions. For the moment, this seems like overkill. They
608 // can only exist in one of three states: they can be TOP (equal to
609 // everything), Equivalent to something else, or unique. Because we do not
610 // create expressions for them, we need to simulate leader change not just
611 // when they change class, but when they change state. Note: We can do the
612 // same thing for phis, and avoid having phi expressions if we wanted, We
613 // should eventually unify in one direction or the other, so this is a little
614 // bit of an experiment in which turns out easier to maintain.
615 enum MemoryPhiState
{ MPS_Invalid
, MPS_TOP
, MPS_Equivalent
, MPS_Unique
};
616 DenseMap
<const MemoryPhi
*, MemoryPhiState
> MemoryPhiState
;
618 enum InstCycleState
{ ICS_Unknown
, ICS_CycleFree
, ICS_Cycle
};
619 mutable DenseMap
<const Instruction
*, InstCycleState
> InstCycleState
;
621 // Expression to class mapping.
622 using ExpressionClassMap
= DenseMap
<const Expression
*, CongruenceClass
*>;
623 ExpressionClassMap ExpressionToClass
;
625 // We have a single expression that represents currently DeadExpressions.
626 // For dead expressions we can prove will stay dead, we mark them with
627 // DFS number zero. However, it's possible in the case of phi nodes
628 // for us to assume/prove all arguments are dead during fixpointing.
629 // We use DeadExpression for that case.
630 DeadExpression
*SingletonDeadExpression
= nullptr;
632 // Which values have changed as a result of leader changes.
633 SmallPtrSet
<Value
*, 8> LeaderChanges
;
635 // Reachability info.
636 using BlockEdge
= BasicBlockEdge
;
637 DenseSet
<BlockEdge
> ReachableEdges
;
638 SmallPtrSet
<const BasicBlock
*, 8> ReachableBlocks
;
640 // This is a bitvector because, on larger functions, we may have
641 // thousands of touched instructions at once (entire blocks,
642 // instructions with hundreds of uses, etc). Even with optimization
643 // for when we mark whole blocks as touched, when this was a
644 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all
645 // the time in GVN just managing this list. The bitvector, on the
646 // other hand, efficiently supports test/set/clear of both
647 // individual and ranges, as well as "find next element" This
648 // enables us to use it as a worklist with essentially 0 cost.
649 BitVector TouchedInstructions
;
651 DenseMap
<const BasicBlock
*, std::pair
<unsigned, unsigned>> BlockInstRange
;
652 mutable DenseMap
<const IntrinsicInst
*, const Value
*> IntrinsicInstPred
;
655 // Debugging for how many times each block and instruction got processed.
656 DenseMap
<const Value
*, unsigned> ProcessedCount
;
660 // This contains a mapping from Instructions to DFS numbers.
661 // The numbering starts at 1. An instruction with DFS number zero
662 // means that the instruction is dead.
663 DenseMap
<const Value
*, unsigned> InstrDFS
;
665 // This contains the mapping DFS numbers to instructions.
666 SmallVector
<Value
*, 32> DFSToInstr
;
669 SmallPtrSet
<Instruction
*, 8> InstructionsToErase
;
672 NewGVN(Function
&F
, DominatorTree
*DT
, AssumptionCache
*AC
,
673 TargetLibraryInfo
*TLI
, AliasAnalysis
*AA
, MemorySSA
*MSSA
,
674 const DataLayout
&DL
)
675 : F(F
), DT(DT
), TLI(TLI
), AA(AA
), MSSA(MSSA
), AC(AC
), DL(DL
),
676 PredInfo(std::make_unique
<PredicateInfo
>(F
, *DT
, *AC
)),
677 SQ(DL
, TLI
, DT
, AC
, /*CtxI=*/nullptr, /*UseInstrInfo=*/false,
678 /*CanUseUndef=*/false) {}
683 /// Helper struct return a Expression with an optional extra dependency.
685 const Expression
*Expr
;
687 const PredicateBase
*PredDep
;
689 ExprResult(const Expression
*Expr
, Value
*ExtraDep
= nullptr,
690 const PredicateBase
*PredDep
= nullptr)
691 : Expr(Expr
), ExtraDep(ExtraDep
), PredDep(PredDep
) {}
692 ExprResult(const ExprResult
&) = delete;
693 ExprResult(ExprResult
&&Other
)
694 : Expr(Other
.Expr
), ExtraDep(Other
.ExtraDep
), PredDep(Other
.PredDep
) {
695 Other
.Expr
= nullptr;
696 Other
.ExtraDep
= nullptr;
697 Other
.PredDep
= nullptr;
699 ExprResult
&operator=(const ExprResult
&Other
) = delete;
700 ExprResult
&operator=(ExprResult
&&Other
) = delete;
702 ~ExprResult() { assert(!ExtraDep
&& "unhandled ExtraDep"); }
704 operator bool() const { return Expr
; }
706 static ExprResult
none() { return {nullptr, nullptr, nullptr}; }
707 static ExprResult
some(const Expression
*Expr
, Value
*ExtraDep
= nullptr) {
708 return {Expr
, ExtraDep
, nullptr};
710 static ExprResult
some(const Expression
*Expr
,
711 const PredicateBase
*PredDep
) {
712 return {Expr
, nullptr, PredDep
};
714 static ExprResult
some(const Expression
*Expr
, Value
*ExtraDep
,
715 const PredicateBase
*PredDep
) {
716 return {Expr
, ExtraDep
, PredDep
};
720 // Expression handling.
721 ExprResult
createExpression(Instruction
*) const;
722 const Expression
*createBinaryExpression(unsigned, Type
*, Value
*, Value
*,
723 Instruction
*) const;
725 // Our canonical form for phi arguments is a pair of incoming value, incoming
727 using ValPair
= std::pair
<Value
*, BasicBlock
*>;
729 PHIExpression
*createPHIExpression(ArrayRef
<ValPair
>, const Instruction
*,
730 BasicBlock
*, bool &HasBackEdge
,
731 bool &OriginalOpsConstant
) const;
732 const DeadExpression
*createDeadExpression() const;
733 const VariableExpression
*createVariableExpression(Value
*) const;
734 const ConstantExpression
*createConstantExpression(Constant
*) const;
735 const Expression
*createVariableOrConstant(Value
*V
) const;
736 const UnknownExpression
*createUnknownExpression(Instruction
*) const;
737 const StoreExpression
*createStoreExpression(StoreInst
*,
738 const MemoryAccess
*) const;
739 LoadExpression
*createLoadExpression(Type
*, Value
*, LoadInst
*,
740 const MemoryAccess
*) const;
741 const CallExpression
*createCallExpression(CallInst
*,
742 const MemoryAccess
*) const;
743 const AggregateValueExpression
*
744 createAggregateValueExpression(Instruction
*) const;
745 bool setBasicExpressionInfo(Instruction
*, BasicExpression
*) const;
747 // Congruence class handling.
748 CongruenceClass
*createCongruenceClass(Value
*Leader
, const Expression
*E
) {
749 // Set RPO to 0 for values that are always available (constants and function
750 // args). These should always be made leader.
751 unsigned LeaderDFS
= 0;
753 // If Leader is not specified, either we have a memory class or the leader
754 // will be set later. Otherwise, if Leader is an Instruction, set LeaderDFS
755 // to its RPO number.
758 else if (auto *I
= dyn_cast
<Instruction
>(Leader
))
759 LeaderDFS
= InstrToDFSNum(I
);
761 new CongruenceClass(NextCongruenceNum
++, {Leader
, LeaderDFS
}, E
);
762 CongruenceClasses
.emplace_back(result
);
766 CongruenceClass
*createMemoryClass(MemoryAccess
*MA
) {
767 auto *CC
= createCongruenceClass(nullptr, nullptr);
768 CC
->setMemoryLeader(MA
);
772 CongruenceClass
*ensureLeaderOfMemoryClass(MemoryAccess
*MA
) {
773 auto *CC
= getMemoryClass(MA
);
774 if (CC
->getMemoryLeader() != MA
)
775 CC
= createMemoryClass(MA
);
779 CongruenceClass
*createSingletonCongruenceClass(Value
*Member
) {
780 CongruenceClass
*CClass
= createCongruenceClass(Member
, nullptr);
781 CClass
->insert(Member
);
782 ValueToClass
[Member
] = CClass
;
786 void initializeCongruenceClasses(Function
&F
);
787 const Expression
*makePossiblePHIOfOps(Instruction
*,
788 SmallPtrSetImpl
<Value
*> &);
789 Value
*findLeaderForInst(Instruction
*ValueOp
,
790 SmallPtrSetImpl
<Value
*> &Visited
,
791 MemoryAccess
*MemAccess
, Instruction
*OrigInst
,
793 bool OpIsSafeForPHIOfOps(Value
*Op
, const BasicBlock
*PHIBlock
,
794 SmallPtrSetImpl
<const Value
*> &);
795 void addPhiOfOps(PHINode
*Op
, BasicBlock
*BB
, Instruction
*ExistingValue
);
796 void removePhiOfOps(Instruction
*I
, PHINode
*PHITemp
);
798 // Value number an Instruction or MemoryPhi.
799 void valueNumberMemoryPhi(MemoryPhi
*);
800 void valueNumberInstruction(Instruction
*);
802 // Symbolic evaluation.
803 ExprResult
checkExprResults(Expression
*, Instruction
*, Value
*) const;
804 ExprResult
performSymbolicEvaluation(Instruction
*,
805 SmallPtrSetImpl
<Value
*> &) const;
806 const Expression
*performSymbolicLoadCoercion(Type
*, Value
*, LoadInst
*,
808 MemoryAccess
*) const;
809 const Expression
*performSymbolicLoadEvaluation(Instruction
*) const;
810 const Expression
*performSymbolicStoreEvaluation(Instruction
*) const;
811 ExprResult
performSymbolicCallEvaluation(Instruction
*) const;
812 void sortPHIOps(MutableArrayRef
<ValPair
> Ops
) const;
813 const Expression
*performSymbolicPHIEvaluation(ArrayRef
<ValPair
>,
815 BasicBlock
*PHIBlock
) const;
816 const Expression
*performSymbolicAggrValueEvaluation(Instruction
*) const;
817 ExprResult
performSymbolicCmpEvaluation(Instruction
*) const;
818 ExprResult
performSymbolicPredicateInfoEvaluation(IntrinsicInst
*) const;
820 // Congruence finding.
821 bool someEquivalentDominates(const Instruction
*, const Instruction
*) const;
822 Value
*lookupOperandLeader(Value
*) const;
823 CongruenceClass
*getClassForExpression(const Expression
*E
) const;
824 void performCongruenceFinding(Instruction
*, const Expression
*);
825 void moveValueToNewCongruenceClass(Instruction
*, const Expression
*,
826 CongruenceClass
*, CongruenceClass
*);
827 void moveMemoryToNewCongruenceClass(Instruction
*, MemoryAccess
*,
828 CongruenceClass
*, CongruenceClass
*);
829 Value
*getNextValueLeader(CongruenceClass
*) const;
830 const MemoryAccess
*getNextMemoryLeader(CongruenceClass
*) const;
831 bool setMemoryClass(const MemoryAccess
*From
, CongruenceClass
*To
);
832 CongruenceClass
*getMemoryClass(const MemoryAccess
*MA
) const;
833 const MemoryAccess
*lookupMemoryLeader(const MemoryAccess
*) const;
834 bool isMemoryAccessTOP(const MemoryAccess
*) const;
837 unsigned int getRank(const Value
*) const;
838 bool shouldSwapOperands(const Value
*, const Value
*) const;
839 bool shouldSwapOperandsForIntrinsic(const Value
*, const Value
*,
840 const IntrinsicInst
*I
) const;
842 // Reachability handling.
843 void updateReachableEdge(BasicBlock
*, BasicBlock
*);
844 void processOutgoingEdges(Instruction
*, BasicBlock
*);
845 Value
*findConditionEquivalence(Value
*) const;
849 void convertClassToDFSOrdered(const CongruenceClass
&,
850 SmallVectorImpl
<ValueDFS
> &,
851 DenseMap
<const Value
*, unsigned int> &,
852 SmallPtrSetImpl
<Instruction
*> &) const;
853 void convertClassToLoadsAndStores(const CongruenceClass
&,
854 SmallVectorImpl
<ValueDFS
> &) const;
856 bool eliminateInstructions(Function
&);
857 void replaceInstruction(Instruction
*, Value
*);
858 void markInstructionForDeletion(Instruction
*);
859 void deleteInstructionsInBlock(BasicBlock
*);
860 Value
*findPHIOfOpsLeader(const Expression
*, const Instruction
*,
861 const BasicBlock
*) const;
863 // Various instruction touch utilities
864 template <typename Map
, typename KeyType
>
865 void touchAndErase(Map
&, const KeyType
&);
866 void markUsersTouched(Value
*);
867 void markMemoryUsersTouched(const MemoryAccess
*);
868 void markMemoryDefTouched(const MemoryAccess
*);
869 void markPredicateUsersTouched(Instruction
*);
870 void markValueLeaderChangeTouched(CongruenceClass
*CC
);
871 void markMemoryLeaderChangeTouched(CongruenceClass
*CC
);
872 void markPhiOfOpsChanged(const Expression
*E
);
873 void addMemoryUsers(const MemoryAccess
*To
, MemoryAccess
*U
) const;
874 void addAdditionalUsers(Value
*To
, Value
*User
) const;
875 void addAdditionalUsers(ExprResult
&Res
, Instruction
*User
) const;
877 // Main loop of value numbering
878 void iterateTouchedInstructions();
881 void cleanupTables();
882 std::pair
<unsigned, unsigned> assignDFSNumbers(BasicBlock
*, unsigned);
883 void updateProcessedCount(const Value
*V
);
884 void verifyMemoryCongruency() const;
885 void verifyIterationSettled(Function
&F
);
886 void verifyStoreExpressions() const;
887 bool singleReachablePHIPath(SmallPtrSet
<const MemoryAccess
*, 8> &,
888 const MemoryAccess
*, const MemoryAccess
*) const;
889 BasicBlock
*getBlockForValue(Value
*V
) const;
890 void deleteExpression(const Expression
*E
) const;
891 MemoryUseOrDef
*getMemoryAccess(const Instruction
*) const;
892 MemoryPhi
*getMemoryAccess(const BasicBlock
*) const;
893 template <class T
, class Range
> T
*getMinDFSOfRange(const Range
&) const;
895 unsigned InstrToDFSNum(const Value
*V
) const {
896 assert(isa
<Instruction
>(V
) && "This should not be used for MemoryAccesses");
897 return InstrDFS
.lookup(V
);
900 unsigned InstrToDFSNum(const MemoryAccess
*MA
) const {
901 return MemoryToDFSNum(MA
);
904 Value
*InstrFromDFSNum(unsigned DFSNum
) { return DFSToInstr
[DFSNum
]; }
906 // Given a MemoryAccess, return the relevant instruction DFS number. Note:
907 // This deliberately takes a value so it can be used with Use's, which will
908 // auto-convert to Value's but not to MemoryAccess's.
909 unsigned MemoryToDFSNum(const Value
*MA
) const {
910 assert(isa
<MemoryAccess
>(MA
) &&
911 "This should not be used with instructions");
912 return isa
<MemoryUseOrDef
>(MA
)
913 ? InstrToDFSNum(cast
<MemoryUseOrDef
>(MA
)->getMemoryInst())
914 : InstrDFS
.lookup(MA
);
917 bool isCycleFree(const Instruction
*) const;
918 bool isBackedge(BasicBlock
*From
, BasicBlock
*To
) const;
920 // Debug counter info. When verifying, we have to reset the value numbering
921 // debug counter to the same state it started in to get the same results.
922 DebugCounter::CounterState StartingVNCounter
;
925 } // end anonymous namespace
927 template <typename T
>
928 static bool equalsLoadStoreHelper(const T
&LHS
, const Expression
&RHS
) {
929 if (!isa
<LoadExpression
>(RHS
) && !isa
<StoreExpression
>(RHS
))
931 return LHS
.MemoryExpression::equals(RHS
);
934 bool LoadExpression::equals(const Expression
&Other
) const {
935 return equalsLoadStoreHelper(*this, Other
);
938 bool StoreExpression::equals(const Expression
&Other
) const {
939 if (!equalsLoadStoreHelper(*this, Other
))
941 // Make sure that store vs store includes the value operand.
942 if (const auto *S
= dyn_cast
<StoreExpression
>(&Other
))
943 if (getStoredValue() != S
->getStoredValue())
948 bool CallExpression::equals(const Expression
&Other
) const {
949 if (!MemoryExpression::equals(Other
))
952 if (auto *RHS
= dyn_cast
<CallExpression
>(&Other
))
953 return Call
->getAttributes()
954 .intersectWith(Call
->getContext(), RHS
->Call
->getAttributes())
960 // Determine if the edge From->To is a backedge
961 bool NewGVN::isBackedge(BasicBlock
*From
, BasicBlock
*To
) const {
963 RPOOrdering
.lookup(DT
->getNode(From
)) >=
964 RPOOrdering
.lookup(DT
->getNode(To
));
968 static std::string
getBlockName(const BasicBlock
*B
) {
969 return DOTGraphTraits
<DOTFuncInfo
*>::getSimpleNodeLabel(B
, nullptr);
973 // Get a MemoryAccess for an instruction, fake or real.
974 MemoryUseOrDef
*NewGVN::getMemoryAccess(const Instruction
*I
) const {
975 auto *Result
= MSSA
->getMemoryAccess(I
);
976 return Result
? Result
: TempToMemory
.lookup(I
);
979 // Get a MemoryPhi for a basic block. These are all real.
980 MemoryPhi
*NewGVN::getMemoryAccess(const BasicBlock
*BB
) const {
981 return MSSA
->getMemoryAccess(BB
);
984 // Get the basic block from an instruction/memory value.
985 BasicBlock
*NewGVN::getBlockForValue(Value
*V
) const {
986 if (auto *I
= dyn_cast
<Instruction
>(V
)) {
987 auto *Parent
= I
->getParent();
990 Parent
= TempToBlock
.lookup(V
);
991 assert(Parent
&& "Every fake instruction should have a block");
995 auto *MP
= dyn_cast
<MemoryPhi
>(V
);
996 assert(MP
&& "Should have been an instruction or a MemoryPhi");
997 return MP
->getBlock();
1000 // Delete a definitely dead expression, so it can be reused by the expression
1001 // allocator. Some of these are not in creation functions, so we have to accept
1003 void NewGVN::deleteExpression(const Expression
*E
) const {
1004 assert(isa
<BasicExpression
>(E
));
1005 auto *BE
= cast
<BasicExpression
>(E
);
1006 const_cast<BasicExpression
*>(BE
)->deallocateOperands(ArgRecycler
);
1007 ExpressionAllocator
.Deallocate(E
);
1010 // If V is a predicateinfo copy, get the thing it is a copy of.
1011 static Value
*getCopyOf(const Value
*V
) {
1012 if (auto *II
= dyn_cast
<IntrinsicInst
>(V
))
1013 if (II
->getIntrinsicID() == Intrinsic::ssa_copy
)
1014 return II
->getOperand(0);
1018 // Return true if V is really PN, even accounting for predicateinfo copies.
1019 static bool isCopyOfPHI(const Value
*V
, const PHINode
*PN
) {
1020 return V
== PN
|| getCopyOf(V
) == PN
;
1023 static bool isCopyOfAPHI(const Value
*V
) {
1024 auto *CO
= getCopyOf(V
);
1025 return CO
&& isa
<PHINode
>(CO
);
1028 // Sort PHI Operands into a canonical order. What we use here is an RPO
1029 // order. The BlockInstRange numbers are generated in an RPO walk of the basic
1031 void NewGVN::sortPHIOps(MutableArrayRef
<ValPair
> Ops
) const {
1032 llvm::sort(Ops
, [&](const ValPair
&P1
, const ValPair
&P2
) {
1033 return BlockInstRange
.lookup(P1
.second
).first
<
1034 BlockInstRange
.lookup(P2
.second
).first
;
1038 // Return true if V is a value that will always be available (IE can
1039 // be placed anywhere) in the function. We don't do globals here
1040 // because they are often worse to put in place.
1041 static bool alwaysAvailable(Value
*V
) {
1042 return isa
<Constant
>(V
) || isa
<Argument
>(V
);
1045 // Create a PHIExpression from an array of {incoming edge, value} pairs. I is
1046 // the original instruction we are creating a PHIExpression for (but may not be
1047 // a phi node). We require, as an invariant, that all the PHIOperands in the
1048 // same block are sorted the same way. sortPHIOps will sort them into a
1050 PHIExpression
*NewGVN::createPHIExpression(ArrayRef
<ValPair
> PHIOperands
,
1051 const Instruction
*I
,
1052 BasicBlock
*PHIBlock
,
1054 bool &OriginalOpsConstant
) const {
1055 unsigned NumOps
= PHIOperands
.size();
1056 auto *E
= new (ExpressionAllocator
) PHIExpression(NumOps
, PHIBlock
);
1058 E
->allocateOperands(ArgRecycler
, ExpressionAllocator
);
1059 E
->setType(PHIOperands
.begin()->first
->getType());
1060 E
->setOpcode(Instruction::PHI
);
1062 // Filter out unreachable phi operands.
1063 auto Filtered
= make_filter_range(PHIOperands
, [&](const ValPair
&P
) {
1064 auto *BB
= P
.second
;
1065 if (auto *PHIOp
= dyn_cast
<PHINode
>(I
))
1066 if (isCopyOfPHI(P
.first
, PHIOp
))
1068 if (!ReachableEdges
.count({BB
, PHIBlock
}))
1070 // Things in TOPClass are equivalent to everything.
1071 if (ValueToClass
.lookup(P
.first
) == TOPClass
)
1073 OriginalOpsConstant
= OriginalOpsConstant
&& isa
<Constant
>(P
.first
);
1074 HasBackedge
= HasBackedge
|| isBackedge(BB
, PHIBlock
);
1075 return lookupOperandLeader(P
.first
) != I
;
1077 std::transform(Filtered
.begin(), Filtered
.end(), op_inserter(E
),
1078 [&](const ValPair
&P
) -> Value
* {
1079 return lookupOperandLeader(P
.first
);
1084 // Set basic expression info (Arguments, type, opcode) for Expression
1085 // E from Instruction I in block B.
1086 bool NewGVN::setBasicExpressionInfo(Instruction
*I
, BasicExpression
*E
) const {
1087 bool AllConstant
= true;
1088 if (auto *GEP
= dyn_cast
<GetElementPtrInst
>(I
))
1089 E
->setType(GEP
->getSourceElementType());
1091 E
->setType(I
->getType());
1092 E
->setOpcode(I
->getOpcode());
1093 E
->allocateOperands(ArgRecycler
, ExpressionAllocator
);
1095 // Transform the operand array into an operand leader array, and keep track of
1096 // whether all members are constant.
1097 std::transform(I
->op_begin(), I
->op_end(), op_inserter(E
), [&](Value
*O
) {
1098 auto Operand
= lookupOperandLeader(O
);
1099 AllConstant
= AllConstant
&& isa
<Constant
>(Operand
);
1106 const Expression
*NewGVN::createBinaryExpression(unsigned Opcode
, Type
*T
,
1107 Value
*Arg1
, Value
*Arg2
,
1108 Instruction
*I
) const {
1109 auto *E
= new (ExpressionAllocator
) BasicExpression(2);
1110 // TODO: we need to remove context instruction after Value Tracking
1111 // can run without context instruction
1112 const SimplifyQuery Q
= SQ
.getWithInstruction(I
);
1115 E
->setOpcode(Opcode
);
1116 E
->allocateOperands(ArgRecycler
, ExpressionAllocator
);
1117 if (Instruction::isCommutative(Opcode
)) {
1118 // Ensure that commutative instructions that only differ by a permutation
1119 // of their operands get the same value number by sorting the operand value
1120 // numbers. Since all commutative instructions have two operands it is more
1121 // efficient to sort by hand rather than using, say, std::sort.
1122 if (shouldSwapOperands(Arg1
, Arg2
))
1123 std::swap(Arg1
, Arg2
);
1125 E
->op_push_back(lookupOperandLeader(Arg1
));
1126 E
->op_push_back(lookupOperandLeader(Arg2
));
1128 Value
*V
= simplifyBinOp(Opcode
, E
->getOperand(0), E
->getOperand(1), Q
);
1129 if (auto Simplified
= checkExprResults(E
, I
, V
)) {
1130 addAdditionalUsers(Simplified
, I
);
1131 return Simplified
.Expr
;
1136 // Take a Value returned by simplification of Expression E/Instruction
1137 // I, and see if it resulted in a simpler expression. If so, return
1139 NewGVN::ExprResult
NewGVN::checkExprResults(Expression
*E
, Instruction
*I
,
1142 return ExprResult::none();
1144 if (auto *C
= dyn_cast
<Constant
>(V
)) {
1146 LLVM_DEBUG(dbgs() << "Simplified " << *I
<< " to "
1147 << " constant " << *C
<< "\n");
1148 NumGVNOpsSimplified
++;
1149 assert(isa
<BasicExpression
>(E
) &&
1150 "We should always have had a basic expression here");
1151 deleteExpression(E
);
1152 return ExprResult::some(createConstantExpression(C
));
1153 } else if (isa
<Argument
>(V
) || isa
<GlobalVariable
>(V
)) {
1155 LLVM_DEBUG(dbgs() << "Simplified " << *I
<< " to "
1156 << " variable " << *V
<< "\n");
1157 deleteExpression(E
);
1158 return ExprResult::some(createVariableExpression(V
));
1161 CongruenceClass
*CC
= ValueToClass
.lookup(V
);
1163 if (CC
->getLeader() && CC
->getLeader() != I
) {
1164 return ExprResult::some(createVariableOrConstant(CC
->getLeader()), V
);
1166 if (CC
->getDefiningExpr()) {
1168 LLVM_DEBUG(dbgs() << "Simplified " << *I
<< " to "
1169 << " expression " << *CC
->getDefiningExpr() << "\n");
1170 NumGVNOpsSimplified
++;
1171 deleteExpression(E
);
1172 return ExprResult::some(CC
->getDefiningExpr(), V
);
1176 return ExprResult::none();
1179 // Create a value expression from the instruction I, replacing operands with
1182 NewGVN::ExprResult
NewGVN::createExpression(Instruction
*I
) const {
1183 auto *E
= new (ExpressionAllocator
) BasicExpression(I
->getNumOperands());
1184 // TODO: we need to remove context instruction after Value Tracking
1185 // can run without context instruction
1186 const SimplifyQuery Q
= SQ
.getWithInstruction(I
);
1188 bool AllConstant
= setBasicExpressionInfo(I
, E
);
1190 if (I
->isCommutative()) {
1191 // Ensure that commutative instructions that only differ by a permutation
1192 // of their operands get the same value number by sorting the operand value
1193 // numbers. Since all commutative instructions have two operands it is more
1194 // efficient to sort by hand rather than using, say, std::sort.
1195 assert(I
->getNumOperands() == 2 && "Unsupported commutative instruction!");
1196 if (shouldSwapOperands(E
->getOperand(0), E
->getOperand(1)))
1197 E
->swapOperands(0, 1);
1199 // Perform simplification.
1200 if (auto *CI
= dyn_cast
<CmpInst
>(I
)) {
1201 // Sort the operand value numbers so x<y and y>x get the same value
1203 CmpInst::Predicate Predicate
= CI
->getPredicate();
1204 if (shouldSwapOperands(E
->getOperand(0), E
->getOperand(1))) {
1205 E
->swapOperands(0, 1);
1206 Predicate
= CmpInst::getSwappedPredicate(Predicate
);
1208 E
->setOpcode((CI
->getOpcode() << 8) | Predicate
);
1209 // TODO: 25% of our time is spent in simplifyCmpInst with pointer operands
1210 assert(I
->getOperand(0)->getType() == I
->getOperand(1)->getType() &&
1211 "Wrong types on cmp instruction");
1212 assert((E
->getOperand(0)->getType() == I
->getOperand(0)->getType() &&
1213 E
->getOperand(1)->getType() == I
->getOperand(1)->getType()));
1215 simplifyCmpInst(Predicate
, E
->getOperand(0), E
->getOperand(1), Q
);
1216 if (auto Simplified
= checkExprResults(E
, I
, V
))
1218 } else if (isa
<SelectInst
>(I
)) {
1219 if (isa
<Constant
>(E
->getOperand(0)) ||
1220 E
->getOperand(1) == E
->getOperand(2)) {
1221 assert(E
->getOperand(1)->getType() == I
->getOperand(1)->getType() &&
1222 E
->getOperand(2)->getType() == I
->getOperand(2)->getType());
1223 Value
*V
= simplifySelectInst(E
->getOperand(0), E
->getOperand(1),
1224 E
->getOperand(2), Q
);
1225 if (auto Simplified
= checkExprResults(E
, I
, V
))
1228 } else if (I
->isBinaryOp()) {
1230 simplifyBinOp(E
->getOpcode(), E
->getOperand(0), E
->getOperand(1), Q
);
1231 if (auto Simplified
= checkExprResults(E
, I
, V
))
1233 } else if (auto *CI
= dyn_cast
<CastInst
>(I
)) {
1235 simplifyCastInst(CI
->getOpcode(), E
->getOperand(0), CI
->getType(), Q
);
1236 if (auto Simplified
= checkExprResults(E
, I
, V
))
1238 } else if (auto *GEPI
= dyn_cast
<GetElementPtrInst
>(I
)) {
1239 Value
*V
= simplifyGEPInst(GEPI
->getSourceElementType(), *E
->op_begin(),
1240 ArrayRef(std::next(E
->op_begin()), E
->op_end()),
1241 GEPI
->getNoWrapFlags(), Q
);
1242 if (auto Simplified
= checkExprResults(E
, I
, V
))
1244 } else if (AllConstant
) {
1245 // We don't bother trying to simplify unless all of the operands
1247 // TODO: There are a lot of Simplify*'s we could call here, if we
1248 // wanted to. The original motivating case for this code was a
1249 // zext i1 false to i8, which we don't have an interface to
1250 // simplify (IE there is no SimplifyZExt).
1252 SmallVector
<Constant
*, 8> C
;
1253 for (Value
*Arg
: E
->operands())
1254 C
.emplace_back(cast
<Constant
>(Arg
));
1256 if (Value
*V
= ConstantFoldInstOperands(I
, C
, DL
, TLI
))
1257 if (auto Simplified
= checkExprResults(E
, I
, V
))
1260 return ExprResult::some(E
);
1263 const AggregateValueExpression
*
1264 NewGVN::createAggregateValueExpression(Instruction
*I
) const {
1265 if (auto *II
= dyn_cast
<InsertValueInst
>(I
)) {
1266 auto *E
= new (ExpressionAllocator
)
1267 AggregateValueExpression(I
->getNumOperands(), II
->getNumIndices());
1268 setBasicExpressionInfo(I
, E
);
1269 E
->allocateIntOperands(ExpressionAllocator
);
1270 std::copy(II
->idx_begin(), II
->idx_end(), int_op_inserter(E
));
1272 } else if (auto *EI
= dyn_cast
<ExtractValueInst
>(I
)) {
1273 auto *E
= new (ExpressionAllocator
)
1274 AggregateValueExpression(I
->getNumOperands(), EI
->getNumIndices());
1275 setBasicExpressionInfo(EI
, E
);
1276 E
->allocateIntOperands(ExpressionAllocator
);
1277 std::copy(EI
->idx_begin(), EI
->idx_end(), int_op_inserter(E
));
1280 llvm_unreachable("Unhandled type of aggregate value operation");
1283 const DeadExpression
*NewGVN::createDeadExpression() const {
1284 // DeadExpression has no arguments and all DeadExpression's are the same,
1285 // so we only need one of them.
1286 return SingletonDeadExpression
;
1289 const VariableExpression
*NewGVN::createVariableExpression(Value
*V
) const {
1290 auto *E
= new (ExpressionAllocator
) VariableExpression(V
);
1291 E
->setOpcode(V
->getValueID());
1295 const Expression
*NewGVN::createVariableOrConstant(Value
*V
) const {
1296 if (auto *C
= dyn_cast
<Constant
>(V
))
1297 return createConstantExpression(C
);
1298 return createVariableExpression(V
);
1301 const ConstantExpression
*NewGVN::createConstantExpression(Constant
*C
) const {
1302 auto *E
= new (ExpressionAllocator
) ConstantExpression(C
);
1303 E
->setOpcode(C
->getValueID());
1307 const UnknownExpression
*NewGVN::createUnknownExpression(Instruction
*I
) const {
1308 auto *E
= new (ExpressionAllocator
) UnknownExpression(I
);
1309 E
->setOpcode(I
->getOpcode());
1313 const CallExpression
*
1314 NewGVN::createCallExpression(CallInst
*CI
, const MemoryAccess
*MA
) const {
1315 // FIXME: Add operand bundles for calls.
1317 new (ExpressionAllocator
) CallExpression(CI
->getNumOperands(), CI
, MA
);
1318 setBasicExpressionInfo(CI
, E
);
1319 if (CI
->isCommutative()) {
1320 // Ensure that commutative intrinsics that only differ by a permutation
1321 // of their operands get the same value number by sorting the operand value
1323 assert(CI
->getNumOperands() >= 2 && "Unsupported commutative intrinsic!");
1324 if (shouldSwapOperands(E
->getOperand(0), E
->getOperand(1)))
1325 E
->swapOperands(0, 1);
1330 // Return true if some equivalent of instruction Inst dominates instruction U.
1331 bool NewGVN::someEquivalentDominates(const Instruction
*Inst
,
1332 const Instruction
*U
) const {
1333 auto *CC
= ValueToClass
.lookup(Inst
);
1334 // This must be an instruction because we are only called from phi nodes
1335 // in the case that the value it needs to check against is an instruction.
1337 // The most likely candidates for dominance are the leader and the next leader.
1338 // The leader or nextleader will dominate in all cases where there is an
1339 // equivalent that is higher up in the dom tree.
1340 // We can't *only* check them, however, because the
1341 // dominator tree could have an infinite number of non-dominating siblings
1342 // with instructions that are in the right congruence class.
1347 // Instruction U could be in H, with equivalents in every other sibling.
1348 // Depending on the rpo order picked, the leader could be the equivalent in
1349 // any of these siblings.
1352 if (alwaysAvailable(CC
->getLeader()))
1354 if (DT
->dominates(cast
<Instruction
>(CC
->getLeader()), U
))
1356 if (CC
->getNextLeader().first
&&
1357 DT
->dominates(cast
<Instruction
>(CC
->getNextLeader().first
), U
))
1359 return llvm::any_of(*CC
, [&](const Value
*Member
) {
1360 return Member
!= CC
->getLeader() &&
1361 DT
->dominates(cast
<Instruction
>(Member
), U
);
1365 // See if we have a congruence class and leader for this operand, and if so,
1366 // return it. Otherwise, return the operand itself.
1367 Value
*NewGVN::lookupOperandLeader(Value
*V
) const {
1368 CongruenceClass
*CC
= ValueToClass
.lookup(V
);
1370 // Everything in TOP is represented by poison, as it can be any value.
1371 // We do have to make sure we get the type right though, so we can't set the
1372 // RepLeader to poison.
1374 return PoisonValue::get(V
->getType());
1375 return CC
->getStoredValue() ? CC
->getStoredValue() : CC
->getLeader();
1381 const MemoryAccess
*NewGVN::lookupMemoryLeader(const MemoryAccess
*MA
) const {
1382 auto *CC
= getMemoryClass(MA
);
1383 assert(CC
->getMemoryLeader() &&
1384 "Every MemoryAccess should be mapped to a congruence class with a "
1385 "representative memory access");
1386 return CC
->getMemoryLeader();
1389 // Return true if the MemoryAccess is really equivalent to everything. This is
1390 // equivalent to the lattice value "TOP" in most lattices. This is the initial
1391 // state of all MemoryAccesses.
1392 bool NewGVN::isMemoryAccessTOP(const MemoryAccess
*MA
) const {
1393 return getMemoryClass(MA
) == TOPClass
;
1396 LoadExpression
*NewGVN::createLoadExpression(Type
*LoadType
, Value
*PointerOp
,
1398 const MemoryAccess
*MA
) const {
1400 new (ExpressionAllocator
) LoadExpression(1, LI
, lookupMemoryLeader(MA
));
1401 E
->allocateOperands(ArgRecycler
, ExpressionAllocator
);
1402 E
->setType(LoadType
);
1404 // Give store and loads same opcode so they value number together.
1406 E
->op_push_back(PointerOp
);
1408 // TODO: Value number heap versions. We may be able to discover
1409 // things alias analysis can't on it's own (IE that a store and a
1410 // load have the same value, and thus, it isn't clobbering the load).
1414 const StoreExpression
*
1415 NewGVN::createStoreExpression(StoreInst
*SI
, const MemoryAccess
*MA
) const {
1416 auto *StoredValueLeader
= lookupOperandLeader(SI
->getValueOperand());
1417 auto *E
= new (ExpressionAllocator
)
1418 StoreExpression(SI
->getNumOperands(), SI
, StoredValueLeader
, MA
);
1419 E
->allocateOperands(ArgRecycler
, ExpressionAllocator
);
1420 E
->setType(SI
->getValueOperand()->getType());
1422 // Give store and loads same opcode so they value number together.
1424 E
->op_push_back(lookupOperandLeader(SI
->getPointerOperand()));
1426 // TODO: Value number heap versions. We may be able to discover
1427 // things alias analysis can't on it's own (IE that a store and a
1428 // load have the same value, and thus, it isn't clobbering the load).
1432 const Expression
*NewGVN::performSymbolicStoreEvaluation(Instruction
*I
) const {
1433 // Unlike loads, we never try to eliminate stores, so we do not check if they
1434 // are simple and avoid value numbering them.
1435 auto *SI
= cast
<StoreInst
>(I
);
1436 auto *StoreAccess
= getMemoryAccess(SI
);
1437 // Get the expression, if any, for the RHS of the MemoryDef.
1438 const MemoryAccess
*StoreRHS
= StoreAccess
->getDefiningAccess();
1439 if (EnableStoreRefinement
)
1440 StoreRHS
= MSSAWalker
->getClobberingMemoryAccess(StoreAccess
);
1441 // If we bypassed the use-def chains, make sure we add a use.
1442 StoreRHS
= lookupMemoryLeader(StoreRHS
);
1443 if (StoreRHS
!= StoreAccess
->getDefiningAccess())
1444 addMemoryUsers(StoreRHS
, StoreAccess
);
1445 // If we are defined by ourselves, use the live on entry def.
1446 if (StoreRHS
== StoreAccess
)
1447 StoreRHS
= MSSA
->getLiveOnEntryDef();
1449 if (SI
->isSimple()) {
1450 // See if we are defined by a previous store expression, it already has a
1451 // value, and it's the same value as our current store. FIXME: Right now, we
1452 // only do this for simple stores, we should expand to cover memcpys, etc.
1453 const auto *LastStore
= createStoreExpression(SI
, StoreRHS
);
1454 const auto *LastCC
= ExpressionToClass
.lookup(LastStore
);
1455 // We really want to check whether the expression we matched was a store. No
1456 // easy way to do that. However, we can check that the class we found has a
1457 // store, which, assuming the value numbering state is not corrupt, is
1458 // sufficient, because we must also be equivalent to that store's expression
1459 // for it to be in the same class as the load.
1460 if (LastCC
&& LastCC
->getStoredValue() == LastStore
->getStoredValue())
1462 // Also check if our value operand is defined by a load of the same memory
1463 // location, and the memory state is the same as it was then (otherwise, it
1464 // could have been overwritten later. See test32 in
1465 // transforms/DeadStoreElimination/simple.ll).
1466 if (auto *LI
= dyn_cast
<LoadInst
>(LastStore
->getStoredValue()))
1467 if ((lookupOperandLeader(LI
->getPointerOperand()) ==
1468 LastStore
->getOperand(0)) &&
1469 (lookupMemoryLeader(getMemoryAccess(LI
)->getDefiningAccess()) ==
1472 deleteExpression(LastStore
);
1475 // If the store is not equivalent to anything, value number it as a store that
1476 // produces a unique memory state (instead of using it's MemoryUse, we use
1478 return createStoreExpression(SI
, StoreAccess
);
1481 // See if we can extract the value of a loaded pointer from a load, a store, or
1482 // a memory instruction.
1484 NewGVN::performSymbolicLoadCoercion(Type
*LoadType
, Value
*LoadPtr
,
1485 LoadInst
*LI
, Instruction
*DepInst
,
1486 MemoryAccess
*DefiningAccess
) const {
1487 assert((!LI
|| LI
->isSimple()) && "Not a simple load");
1488 if (auto *DepSI
= dyn_cast
<StoreInst
>(DepInst
)) {
1489 // Can't forward from non-atomic to atomic without violating memory model.
1490 // Also don't need to coerce if they are the same type, we will just
1492 if (LI
->isAtomic() > DepSI
->isAtomic() ||
1493 LoadType
== DepSI
->getValueOperand()->getType())
1495 int Offset
= analyzeLoadFromClobberingStore(LoadType
, LoadPtr
, DepSI
, DL
);
1497 if (auto *C
= dyn_cast
<Constant
>(
1498 lookupOperandLeader(DepSI
->getValueOperand()))) {
1499 if (Constant
*Res
= getConstantValueForLoad(C
, Offset
, LoadType
, DL
)) {
1500 LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI
1501 << " to constant " << *Res
<< "\n");
1502 return createConstantExpression(Res
);
1506 } else if (auto *DepLI
= dyn_cast
<LoadInst
>(DepInst
)) {
1507 // Can't forward from non-atomic to atomic without violating memory model.
1508 if (LI
->isAtomic() > DepLI
->isAtomic())
1510 int Offset
= analyzeLoadFromClobberingLoad(LoadType
, LoadPtr
, DepLI
, DL
);
1512 // We can coerce a constant load into a load.
1513 if (auto *C
= dyn_cast
<Constant
>(lookupOperandLeader(DepLI
)))
1514 if (auto *PossibleConstant
=
1515 getConstantValueForLoad(C
, Offset
, LoadType
, DL
)) {
1516 LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI
1517 << " to constant " << *PossibleConstant
<< "\n");
1518 return createConstantExpression(PossibleConstant
);
1521 } else if (auto *DepMI
= dyn_cast
<MemIntrinsic
>(DepInst
)) {
1522 int Offset
= analyzeLoadFromClobberingMemInst(LoadType
, LoadPtr
, DepMI
, DL
);
1524 if (auto *PossibleConstant
=
1525 getConstantMemInstValueForLoad(DepMI
, Offset
, LoadType
, DL
)) {
1526 LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
1527 << " to constant " << *PossibleConstant
<< "\n");
1528 return createConstantExpression(PossibleConstant
);
1533 // All of the below are only true if the loaded pointer is produced
1534 // by the dependent instruction.
1535 if (LoadPtr
!= lookupOperandLeader(DepInst
) &&
1536 !AA
->isMustAlias(LoadPtr
, DepInst
))
1538 // If this load really doesn't depend on anything, then we must be loading an
1539 // undef value. This can happen when loading for a fresh allocation with no
1540 // intervening stores, for example. Note that this is only true in the case
1541 // that the result of the allocation is pointer equal to the load ptr.
1542 if (isa
<AllocaInst
>(DepInst
)) {
1543 return createConstantExpression(UndefValue::get(LoadType
));
1545 // If this load occurs either right after a lifetime begin,
1546 // then the loaded value is undefined.
1547 else if (auto *II
= dyn_cast
<IntrinsicInst
>(DepInst
)) {
1548 if (II
->getIntrinsicID() == Intrinsic::lifetime_start
)
1549 return createConstantExpression(UndefValue::get(LoadType
));
1550 } else if (auto *InitVal
=
1551 getInitialValueOfAllocation(DepInst
, TLI
, LoadType
))
1552 return createConstantExpression(InitVal
);
1557 const Expression
*NewGVN::performSymbolicLoadEvaluation(Instruction
*I
) const {
1558 auto *LI
= cast
<LoadInst
>(I
);
1560 // We can eliminate in favor of non-simple loads, but we won't be able to
1561 // eliminate the loads themselves.
1562 if (!LI
->isSimple())
1565 Value
*LoadAddressLeader
= lookupOperandLeader(LI
->getPointerOperand());
1566 // Load of undef is UB.
1567 if (isa
<UndefValue
>(LoadAddressLeader
))
1568 return createConstantExpression(PoisonValue::get(LI
->getType()));
1569 MemoryAccess
*OriginalAccess
= getMemoryAccess(I
);
1570 MemoryAccess
*DefiningAccess
=
1571 MSSAWalker
->getClobberingMemoryAccess(OriginalAccess
);
1573 if (!MSSA
->isLiveOnEntryDef(DefiningAccess
)) {
1574 if (auto *MD
= dyn_cast
<MemoryDef
>(DefiningAccess
)) {
1575 Instruction
*DefiningInst
= MD
->getMemoryInst();
1576 // If the defining instruction is not reachable, replace with poison.
1577 if (!ReachableBlocks
.count(DefiningInst
->getParent()))
1578 return createConstantExpression(PoisonValue::get(LI
->getType()));
1579 // This will handle stores and memory insts. We only do if it the
1580 // defining access has a different type, or it is a pointer produced by
1581 // certain memory operations that cause the memory to have a fixed value
1582 // (IE things like calloc).
1583 if (const auto *CoercionResult
=
1584 performSymbolicLoadCoercion(LI
->getType(), LoadAddressLeader
, LI
,
1585 DefiningInst
, DefiningAccess
))
1586 return CoercionResult
;
1590 const auto *LE
= createLoadExpression(LI
->getType(), LoadAddressLeader
, LI
,
1592 // If our MemoryLeader is not our defining access, add a use to the
1593 // MemoryLeader, so that we get reprocessed when it changes.
1594 if (LE
->getMemoryLeader() != DefiningAccess
)
1595 addMemoryUsers(LE
->getMemoryLeader(), OriginalAccess
);
1600 NewGVN::performSymbolicPredicateInfoEvaluation(IntrinsicInst
*I
) const {
1601 auto *PI
= PredInfo
->getPredicateInfoFor(I
);
1603 return ExprResult::none();
1605 LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n");
1607 const std::optional
<PredicateConstraint
> &Constraint
= PI
->getConstraint();
1609 return ExprResult::none();
1611 CmpInst::Predicate Predicate
= Constraint
->Predicate
;
1612 Value
*CmpOp0
= I
->getOperand(0);
1613 Value
*CmpOp1
= Constraint
->OtherOp
;
1615 Value
*FirstOp
= lookupOperandLeader(CmpOp0
);
1616 Value
*SecondOp
= lookupOperandLeader(CmpOp1
);
1617 Value
*AdditionallyUsedValue
= CmpOp0
;
1620 if (shouldSwapOperandsForIntrinsic(FirstOp
, SecondOp
, I
)) {
1621 std::swap(FirstOp
, SecondOp
);
1622 Predicate
= CmpInst::getSwappedPredicate(Predicate
);
1623 AdditionallyUsedValue
= CmpOp1
;
1626 if (Predicate
== CmpInst::ICMP_EQ
)
1627 return ExprResult::some(createVariableOrConstant(FirstOp
),
1628 AdditionallyUsedValue
, PI
);
1630 // Handle the special case of floating point.
1631 if (Predicate
== CmpInst::FCMP_OEQ
&& isa
<ConstantFP
>(FirstOp
) &&
1632 !cast
<ConstantFP
>(FirstOp
)->isZero())
1633 return ExprResult::some(createConstantExpression(cast
<Constant
>(FirstOp
)),
1634 AdditionallyUsedValue
, PI
);
1636 return ExprResult::none();
1639 // Evaluate read only and pure calls, and create an expression result.
1640 NewGVN::ExprResult
NewGVN::performSymbolicCallEvaluation(Instruction
*I
) const {
1641 auto *CI
= cast
<CallInst
>(I
);
1642 if (auto *II
= dyn_cast
<IntrinsicInst
>(I
)) {
1643 // Intrinsics with the returned attribute are copies of arguments.
1644 if (auto *ReturnedValue
= II
->getReturnedArgOperand()) {
1645 if (II
->getIntrinsicID() == Intrinsic::ssa_copy
)
1646 if (auto Res
= performSymbolicPredicateInfoEvaluation(II
))
1648 return ExprResult::some(createVariableOrConstant(ReturnedValue
));
1652 // FIXME: Currently the calls which may access the thread id may
1653 // be considered as not accessing the memory. But this is
1654 // problematic for coroutines, since coroutines may resume in a
1655 // different thread. So we disable the optimization here for the
1656 // correctness. However, it may block many other correct
1657 // optimizations. Revert this one when we detect the memory
1658 // accessing kind more precisely.
1659 if (CI
->getFunction()->isPresplitCoroutine())
1660 return ExprResult::none();
1662 // Do not combine convergent calls since they implicitly depend on the set of
1663 // threads that is currently executing, and they might be in different basic
1665 if (CI
->isConvergent())
1666 return ExprResult::none();
1668 if (AA
->doesNotAccessMemory(CI
)) {
1669 return ExprResult::some(
1670 createCallExpression(CI
, TOPClass
->getMemoryLeader()));
1671 } else if (AA
->onlyReadsMemory(CI
)) {
1672 if (auto *MA
= MSSA
->getMemoryAccess(CI
)) {
1673 auto *DefiningAccess
= MSSAWalker
->getClobberingMemoryAccess(MA
);
1674 return ExprResult::some(createCallExpression(CI
, DefiningAccess
));
1675 } else // MSSA determined that CI does not access memory.
1676 return ExprResult::some(
1677 createCallExpression(CI
, TOPClass
->getMemoryLeader()));
1679 return ExprResult::none();
1682 // Retrieve the memory class for a given MemoryAccess.
1683 CongruenceClass
*NewGVN::getMemoryClass(const MemoryAccess
*MA
) const {
1684 auto *Result
= MemoryAccessToClass
.lookup(MA
);
1685 assert(Result
&& "Should have found memory class");
1689 // Update the MemoryAccess equivalence table to say that From is equal to To,
1690 // and return true if this is different from what already existed in the table.
1691 bool NewGVN::setMemoryClass(const MemoryAccess
*From
,
1692 CongruenceClass
*NewClass
) {
1694 "Every MemoryAccess should be getting mapped to a non-null class");
1695 LLVM_DEBUG(dbgs() << "Setting " << *From
);
1696 LLVM_DEBUG(dbgs() << " equivalent to congruence class ");
1697 LLVM_DEBUG(dbgs() << NewClass
->getID()
1698 << " with current MemoryAccess leader ");
1699 LLVM_DEBUG(dbgs() << *NewClass
->getMemoryLeader() << "\n");
1701 auto LookupResult
= MemoryAccessToClass
.find(From
);
1702 bool Changed
= false;
1703 // If it's already in the table, see if the value changed.
1704 if (LookupResult
!= MemoryAccessToClass
.end()) {
1705 auto *OldClass
= LookupResult
->second
;
1706 if (OldClass
!= NewClass
) {
1707 // If this is a phi, we have to handle memory member updates.
1708 if (auto *MP
= dyn_cast
<MemoryPhi
>(From
)) {
1709 OldClass
->memory_erase(MP
);
1710 NewClass
->memory_insert(MP
);
1711 // This may have killed the class if it had no non-memory members
1712 if (OldClass
->getMemoryLeader() == From
) {
1713 if (OldClass
->definesNoMemory()) {
1714 OldClass
->setMemoryLeader(nullptr);
1716 OldClass
->setMemoryLeader(getNextMemoryLeader(OldClass
));
1717 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
1718 << OldClass
->getID() << " to "
1719 << *OldClass
->getMemoryLeader()
1720 << " due to removal of a memory member " << *From
1722 markMemoryLeaderChangeTouched(OldClass
);
1726 // It wasn't equivalent before, and now it is.
1727 LookupResult
->second
= NewClass
;
1735 // Determine if a instruction is cycle-free. That means the values in the
1736 // instruction don't depend on any expressions that can change value as a result
1737 // of the instruction. For example, a non-cycle free instruction would be v =
1739 bool NewGVN::isCycleFree(const Instruction
*I
) const {
1740 // In order to compute cycle-freeness, we do SCC finding on the instruction,
1741 // and see what kind of SCC it ends up in. If it is a singleton, it is
1742 // cycle-free. If it is not in a singleton, it is only cycle free if the
1743 // other members are all phi nodes (as they do not compute anything, they are
1745 auto ICS
= InstCycleState
.lookup(I
);
1746 if (ICS
== ICS_Unknown
) {
1748 auto &SCC
= SCCFinder
.getComponentFor(I
);
1749 // It's cycle free if it's size 1 or the SCC is *only* phi nodes.
1750 if (SCC
.size() == 1)
1751 InstCycleState
.insert({I
, ICS_CycleFree
});
1753 bool AllPhis
= llvm::all_of(SCC
, [](const Value
*V
) {
1754 return isa
<PHINode
>(V
) || isCopyOfAPHI(V
);
1756 ICS
= AllPhis
? ICS_CycleFree
: ICS_Cycle
;
1757 for (const auto *Member
: SCC
)
1758 if (auto *MemberPhi
= dyn_cast
<PHINode
>(Member
))
1759 InstCycleState
.insert({MemberPhi
, ICS
});
1762 if (ICS
== ICS_Cycle
)
1767 // Evaluate PHI nodes symbolically and create an expression result.
1769 NewGVN::performSymbolicPHIEvaluation(ArrayRef
<ValPair
> PHIOps
,
1771 BasicBlock
*PHIBlock
) const {
1772 // True if one of the incoming phi edges is a backedge.
1773 bool HasBackedge
= false;
1774 // All constant tracks the state of whether all the *original* phi operands
1775 // This is really shorthand for "this phi cannot cycle due to forward
1776 // change in value of the phi is guaranteed not to later change the value of
1777 // the phi. IE it can't be v = phi(undef, v+1)
1778 bool OriginalOpsConstant
= true;
1779 auto *E
= cast
<PHIExpression
>(createPHIExpression(
1780 PHIOps
, I
, PHIBlock
, HasBackedge
, OriginalOpsConstant
));
1781 // We match the semantics of SimplifyPhiNode from InstructionSimplify here.
1782 // See if all arguments are the same.
1783 // We track if any were undef because they need special handling.
1784 bool HasUndef
= false, HasPoison
= false;
1785 auto Filtered
= make_filter_range(E
->operands(), [&](Value
*Arg
) {
1786 if (isa
<PoisonValue
>(Arg
)) {
1790 if (isa
<UndefValue
>(Arg
)) {
1796 // If we are left with no operands, it's dead.
1797 if (Filtered
.empty()) {
1798 // If it has undef or poison at this point, it means there are no-non-undef
1799 // arguments, and thus, the value of the phi node must be undef.
1802 dbgs() << "PHI Node " << *I
1803 << " has no non-undef arguments, valuing it as undef\n");
1804 return createConstantExpression(UndefValue::get(I
->getType()));
1808 dbgs() << "PHI Node " << *I
1809 << " has no non-poison arguments, valuing it as poison\n");
1810 return createConstantExpression(PoisonValue::get(I
->getType()));
1813 LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I
<< " are live\n");
1814 deleteExpression(E
);
1815 return createDeadExpression();
1817 Value
*AllSameValue
= *(Filtered
.begin());
1819 // Can't use std::equal here, sadly, because filter.begin moves.
1820 if (llvm::all_of(Filtered
, [&](Value
*Arg
) { return Arg
== AllSameValue
; })) {
1821 // Can't fold phi(undef, X) -> X unless X can't be poison (thus X is undef
1822 // in the worst case).
1823 if (HasUndef
&& !isGuaranteedNotToBePoison(AllSameValue
, AC
, nullptr, DT
))
1826 // In LLVM's non-standard representation of phi nodes, it's possible to have
1827 // phi nodes with cycles (IE dependent on other phis that are .... dependent
1828 // on the original phi node), especially in weird CFG's where some arguments
1829 // are unreachable, or uninitialized along certain paths. This can cause
1830 // infinite loops during evaluation. We work around this by not trying to
1831 // really evaluate them independently, but instead using a variable
1832 // expression to say if one is equivalent to the other.
1833 // We also special case undef/poison, so that if we have an undef, we can't
1834 // use the common value unless it dominates the phi block.
1835 if (HasPoison
|| HasUndef
) {
1836 // If we have undef and at least one other value, this is really a
1837 // multivalued phi, and we need to know if it's cycle free in order to
1838 // evaluate whether we can ignore the undef. The other parts of this are
1839 // just shortcuts. If there is no backedge, or all operands are
1840 // constants, it also must be cycle free.
1841 if (HasBackedge
&& !OriginalOpsConstant
&&
1842 !isa
<UndefValue
>(AllSameValue
) && !isCycleFree(I
))
1845 // Only have to check for instructions
1846 if (auto *AllSameInst
= dyn_cast
<Instruction
>(AllSameValue
))
1847 if (!someEquivalentDominates(AllSameInst
, I
))
1850 // Can't simplify to something that comes later in the iteration.
1851 // Otherwise, when and if it changes congruence class, we will never catch
1852 // up. We will always be a class behind it.
1853 if (isa
<Instruction
>(AllSameValue
) &&
1854 InstrToDFSNum(AllSameValue
) > InstrToDFSNum(I
))
1856 NumGVNPhisAllSame
++;
1857 LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I
<< " to " << *AllSameValue
1859 deleteExpression(E
);
1860 return createVariableOrConstant(AllSameValue
);
1866 NewGVN::performSymbolicAggrValueEvaluation(Instruction
*I
) const {
1867 if (auto *EI
= dyn_cast
<ExtractValueInst
>(I
)) {
1868 auto *WO
= dyn_cast
<WithOverflowInst
>(EI
->getAggregateOperand());
1869 if (WO
&& EI
->getNumIndices() == 1 && *EI
->idx_begin() == 0)
1870 // EI is an extract from one of our with.overflow intrinsics. Synthesize
1871 // a semantically equivalent expression instead of an extract value
1873 return createBinaryExpression(WO
->getBinaryOp(), EI
->getType(),
1874 WO
->getLHS(), WO
->getRHS(), I
);
1877 return createAggregateValueExpression(I
);
1880 NewGVN::ExprResult
NewGVN::performSymbolicCmpEvaluation(Instruction
*I
) const {
1881 assert(isa
<CmpInst
>(I
) && "Expected a cmp instruction.");
1883 auto *CI
= cast
<CmpInst
>(I
);
1884 // See if our operands are equal to those of a previous predicate, and if so,
1885 // if it implies true or false.
1886 auto Op0
= lookupOperandLeader(CI
->getOperand(0));
1887 auto Op1
= lookupOperandLeader(CI
->getOperand(1));
1888 auto OurPredicate
= CI
->getPredicate();
1889 if (shouldSwapOperands(Op0
, Op1
)) {
1890 std::swap(Op0
, Op1
);
1891 OurPredicate
= CI
->getSwappedPredicate();
1894 // Avoid processing the same info twice.
1895 const PredicateBase
*LastPredInfo
= nullptr;
1896 // See if we know something about the comparison itself, like it is the target
1898 auto *CmpPI
= PredInfo
->getPredicateInfoFor(I
);
1899 if (isa_and_nonnull
<PredicateAssume
>(CmpPI
))
1900 return ExprResult::some(
1901 createConstantExpression(ConstantInt::getTrue(CI
->getType())));
1904 // This condition does not depend on predicates, no need to add users
1905 if (CI
->isTrueWhenEqual())
1906 return ExprResult::some(
1907 createConstantExpression(ConstantInt::getTrue(CI
->getType())));
1908 else if (CI
->isFalseWhenEqual())
1909 return ExprResult::some(
1910 createConstantExpression(ConstantInt::getFalse(CI
->getType())));
1913 // NOTE: Because we are comparing both operands here and below, and using
1914 // previous comparisons, we rely on fact that predicateinfo knows to mark
1915 // comparisons that use renamed operands as users of the earlier comparisons.
1916 // It is *not* enough to just mark predicateinfo renamed operands as users of
1917 // the earlier comparisons, because the *other* operand may have changed in a
1918 // previous iteration.
1921 // %b.0 = ssa.copy(%b)
1923 // icmp slt %c, %b.0
1925 // %c and %a may start out equal, and thus, the code below will say the second
1926 // %icmp is false. c may become equal to something else, and in that case the
1927 // %second icmp *must* be reexamined, but would not if only the renamed
1928 // %operands are considered users of the icmp.
1930 // *Currently* we only check one level of comparisons back, and only mark one
1931 // level back as touched when changes happen. If you modify this code to look
1932 // back farther through comparisons, you *must* mark the appropriate
1933 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if
1934 // we know something just from the operands themselves
1936 // See if our operands have predicate info, so that we may be able to derive
1937 // something from a previous comparison.
1938 for (const auto &Op
: CI
->operands()) {
1939 auto *PI
= PredInfo
->getPredicateInfoFor(Op
);
1940 if (const auto *PBranch
= dyn_cast_or_null
<PredicateBranch
>(PI
)) {
1941 if (PI
== LastPredInfo
)
1944 // In phi of ops cases, we may have predicate info that we are evaluating
1945 // in a different context.
1946 if (!DT
->dominates(PBranch
->To
, I
->getParent()))
1948 // TODO: Along the false edge, we may know more things too, like
1950 // same operands is false.
1951 // TODO: We only handle actual comparison conditions below, not
1953 auto *BranchCond
= dyn_cast
<CmpInst
>(PBranch
->Condition
);
1956 auto *BranchOp0
= lookupOperandLeader(BranchCond
->getOperand(0));
1957 auto *BranchOp1
= lookupOperandLeader(BranchCond
->getOperand(1));
1958 auto BranchPredicate
= BranchCond
->getPredicate();
1959 if (shouldSwapOperands(BranchOp0
, BranchOp1
)) {
1960 std::swap(BranchOp0
, BranchOp1
);
1961 BranchPredicate
= BranchCond
->getSwappedPredicate();
1963 if (BranchOp0
== Op0
&& BranchOp1
== Op1
) {
1964 if (PBranch
->TrueEdge
) {
1965 // If we know the previous predicate is true and we are in the true
1966 // edge then we may be implied true or false.
1967 if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate
,
1969 return ExprResult::some(
1970 createConstantExpression(ConstantInt::getTrue(CI
->getType())),
1974 if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate
,
1976 return ExprResult::some(
1977 createConstantExpression(ConstantInt::getFalse(CI
->getType())),
1981 // Just handle the ne and eq cases, where if we have the same
1982 // operands, we may know something.
1983 if (BranchPredicate
== OurPredicate
) {
1984 // Same predicate, same ops,we know it was false, so this is false.
1985 return ExprResult::some(
1986 createConstantExpression(ConstantInt::getFalse(CI
->getType())),
1988 } else if (BranchPredicate
==
1989 CmpInst::getInversePredicate(OurPredicate
)) {
1990 // Inverse predicate, we know the other was false, so this is true.
1991 return ExprResult::some(
1992 createConstantExpression(ConstantInt::getTrue(CI
->getType())),
1999 // Create expression will take care of simplifyCmpInst
2000 return createExpression(I
);
2003 // Substitute and symbolize the instruction before value numbering.
2005 NewGVN::performSymbolicEvaluation(Instruction
*I
,
2006 SmallPtrSetImpl
<Value
*> &Visited
) const {
2008 const Expression
*E
= nullptr;
2009 // TODO: memory intrinsics.
2010 // TODO: Some day, we should do the forward propagation and reassociation
2011 // parts of the algorithm.
2012 switch (I
->getOpcode()) {
2013 case Instruction::ExtractValue
:
2014 case Instruction::InsertValue
:
2015 E
= performSymbolicAggrValueEvaluation(I
);
2017 case Instruction::PHI
: {
2018 SmallVector
<ValPair
, 3> Ops
;
2019 auto *PN
= cast
<PHINode
>(I
);
2020 for (unsigned i
= 0; i
< PN
->getNumOperands(); ++i
)
2021 Ops
.push_back({PN
->getIncomingValue(i
), PN
->getIncomingBlock(i
)});
2022 // Sort to ensure the invariant createPHIExpression requires is met.
2024 E
= performSymbolicPHIEvaluation(Ops
, I
, getBlockForValue(I
));
2026 case Instruction::Call
:
2027 return performSymbolicCallEvaluation(I
);
2029 case Instruction::Store
:
2030 E
= performSymbolicStoreEvaluation(I
);
2032 case Instruction::Load
:
2033 E
= performSymbolicLoadEvaluation(I
);
2035 case Instruction::BitCast
:
2036 case Instruction::AddrSpaceCast
:
2037 case Instruction::Freeze
:
2038 return createExpression(I
);
2040 case Instruction::ICmp
:
2041 case Instruction::FCmp
:
2042 return performSymbolicCmpEvaluation(I
);
2044 case Instruction::FNeg
:
2045 case Instruction::Add
:
2046 case Instruction::FAdd
:
2047 case Instruction::Sub
:
2048 case Instruction::FSub
:
2049 case Instruction::Mul
:
2050 case Instruction::FMul
:
2051 case Instruction::UDiv
:
2052 case Instruction::SDiv
:
2053 case Instruction::FDiv
:
2054 case Instruction::URem
:
2055 case Instruction::SRem
:
2056 case Instruction::FRem
:
2057 case Instruction::Shl
:
2058 case Instruction::LShr
:
2059 case Instruction::AShr
:
2060 case Instruction::And
:
2061 case Instruction::Or
:
2062 case Instruction::Xor
:
2063 case Instruction::Trunc
:
2064 case Instruction::ZExt
:
2065 case Instruction::SExt
:
2066 case Instruction::FPToUI
:
2067 case Instruction::FPToSI
:
2068 case Instruction::UIToFP
:
2069 case Instruction::SIToFP
:
2070 case Instruction::FPTrunc
:
2071 case Instruction::FPExt
:
2072 case Instruction::PtrToInt
:
2073 case Instruction::IntToPtr
:
2074 case Instruction::Select
:
2075 case Instruction::ExtractElement
:
2076 case Instruction::InsertElement
:
2077 case Instruction::GetElementPtr
:
2078 return createExpression(I
);
2080 case Instruction::ShuffleVector
:
2081 // FIXME: Add support for shufflevector to createExpression.
2082 return ExprResult::none();
2084 return ExprResult::none();
2086 return ExprResult::some(E
);
2089 // Look up a container of values/instructions in a map, and touch all the
2090 // instructions in the container. Then erase value from the map.
2091 template <typename Map
, typename KeyType
>
2092 void NewGVN::touchAndErase(Map
&M
, const KeyType
&Key
) {
2093 const auto Result
= M
.find_as(Key
);
2094 if (Result
!= M
.end()) {
2095 for (const typename
Map::mapped_type::value_type Mapped
: Result
->second
)
2096 TouchedInstructions
.set(InstrToDFSNum(Mapped
));
2101 void NewGVN::addAdditionalUsers(Value
*To
, Value
*User
) const {
2102 assert(User
&& To
!= User
);
2103 if (isa
<Instruction
>(To
))
2104 AdditionalUsers
[To
].insert(User
);
2107 void NewGVN::addAdditionalUsers(ExprResult
&Res
, Instruction
*User
) const {
2108 if (Res
.ExtraDep
&& Res
.ExtraDep
!= User
)
2109 addAdditionalUsers(Res
.ExtraDep
, User
);
2110 Res
.ExtraDep
= nullptr;
2113 if (const auto *PBranch
= dyn_cast
<PredicateBranch
>(Res
.PredDep
))
2114 PredicateToUsers
[PBranch
->Condition
].insert(User
);
2115 else if (const auto *PAssume
= dyn_cast
<PredicateAssume
>(Res
.PredDep
))
2116 PredicateToUsers
[PAssume
->Condition
].insert(User
);
2118 Res
.PredDep
= nullptr;
2121 void NewGVN::markUsersTouched(Value
*V
) {
2122 // Now mark the users as touched.
2123 for (auto *User
: V
->users()) {
2124 assert(isa
<Instruction
>(User
) && "Use of value not within an instruction?");
2125 TouchedInstructions
.set(InstrToDFSNum(User
));
2127 touchAndErase(AdditionalUsers
, V
);
2130 void NewGVN::addMemoryUsers(const MemoryAccess
*To
, MemoryAccess
*U
) const {
2131 LLVM_DEBUG(dbgs() << "Adding memory user " << *U
<< " to " << *To
<< "\n");
2132 MemoryToUsers
[To
].insert(U
);
2135 void NewGVN::markMemoryDefTouched(const MemoryAccess
*MA
) {
2136 TouchedInstructions
.set(MemoryToDFSNum(MA
));
2139 void NewGVN::markMemoryUsersTouched(const MemoryAccess
*MA
) {
2140 if (isa
<MemoryUse
>(MA
))
2142 for (const auto *U
: MA
->users())
2143 TouchedInstructions
.set(MemoryToDFSNum(U
));
2144 touchAndErase(MemoryToUsers
, MA
);
2147 // Touch all the predicates that depend on this instruction.
2148 void NewGVN::markPredicateUsersTouched(Instruction
*I
) {
2149 touchAndErase(PredicateToUsers
, I
);
2152 // Mark users affected by a memory leader change.
2153 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass
*CC
) {
2154 for (const auto *M
: CC
->memory())
2155 markMemoryDefTouched(M
);
2158 // Touch the instructions that need to be updated after a congruence class has a
2159 // leader change, and mark changed values.
2160 void NewGVN::markValueLeaderChangeTouched(CongruenceClass
*CC
) {
2161 for (auto *M
: *CC
) {
2162 if (auto *I
= dyn_cast
<Instruction
>(M
))
2163 TouchedInstructions
.set(InstrToDFSNum(I
));
2164 LeaderChanges
.insert(M
);
2168 // Give a range of things that have instruction DFS numbers, this will return
2169 // the member of the range with the smallest dfs number.
2170 template <class T
, class Range
>
2171 T
*NewGVN::getMinDFSOfRange(const Range
&R
) const {
2172 std::pair
<T
*, unsigned> MinDFS
= {nullptr, ~0U};
2173 for (const auto X
: R
) {
2174 auto DFSNum
= InstrToDFSNum(X
);
2175 if (DFSNum
< MinDFS
.second
)
2176 MinDFS
= {X
, DFSNum
};
2178 return MinDFS
.first
;
2181 // This function returns the MemoryAccess that should be the next leader of
2182 // congruence class CC, under the assumption that the current leader is going to
2184 const MemoryAccess
*NewGVN::getNextMemoryLeader(CongruenceClass
*CC
) const {
2185 // TODO: If this ends up to slow, we can maintain a next memory leader like we
2186 // do for regular leaders.
2187 // Make sure there will be a leader to find.
2188 assert(!CC
->definesNoMemory() && "Can't get next leader if there is none");
2189 if (CC
->getStoreCount() > 0) {
2190 if (auto *NL
= dyn_cast_or_null
<StoreInst
>(CC
->getNextLeader().first
))
2191 return getMemoryAccess(NL
);
2192 // Find the store with the minimum DFS number.
2193 auto *V
= getMinDFSOfRange
<Value
>(make_filter_range(
2194 *CC
, [&](const Value
*V
) { return isa
<StoreInst
>(V
); }));
2195 return getMemoryAccess(cast
<StoreInst
>(V
));
2197 assert(CC
->getStoreCount() == 0);
2199 // Given our assertion, hitting this part must mean
2200 // !OldClass->memory_empty()
2201 if (CC
->memory_size() == 1)
2202 return *CC
->memory_begin();
2203 return getMinDFSOfRange
<const MemoryPhi
>(CC
->memory());
2206 // This function returns the next value leader of a congruence class, under the
2207 // assumption that the current leader is going away. This should end up being
2208 // the next most dominating member.
2209 Value
*NewGVN::getNextValueLeader(CongruenceClass
*CC
) const {
2210 // We don't need to sort members if there is only 1, and we don't care about
2211 // sorting the TOP class because everything either gets out of it or is
2214 if (CC
->size() == 1 || CC
== TOPClass
) {
2215 return *(CC
->begin());
2216 } else if (CC
->getNextLeader().first
) {
2217 ++NumGVNAvoidedSortedLeaderChanges
;
2218 return CC
->getNextLeader().first
;
2220 ++NumGVNSortedLeaderChanges
;
2221 // NOTE: If this ends up to slow, we can maintain a dual structure for
2222 // member testing/insertion, or keep things mostly sorted, and sort only
2223 // here, or use SparseBitVector or ....
2224 return getMinDFSOfRange
<Value
>(*CC
);
2228 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to
2229 // the memory members, etc for the move.
2231 // The invariants of this function are:
2233 // - I must be moving to NewClass from OldClass
2234 // - The StoreCount of OldClass and NewClass is expected to have been updated
2235 // for I already if it is a store.
2236 // - The OldClass memory leader has not been updated yet if I was the leader.
2237 void NewGVN::moveMemoryToNewCongruenceClass(Instruction
*I
,
2238 MemoryAccess
*InstMA
,
2239 CongruenceClass
*OldClass
,
2240 CongruenceClass
*NewClass
) {
2241 // If the leader is I, and we had a representative MemoryAccess, it should
2242 // be the MemoryAccess of OldClass.
2243 assert((!InstMA
|| !OldClass
->getMemoryLeader() ||
2244 OldClass
->getLeader() != I
||
2245 MemoryAccessToClass
.lookup(OldClass
->getMemoryLeader()) ==
2246 MemoryAccessToClass
.lookup(InstMA
)) &&
2247 "Representative MemoryAccess mismatch");
2248 // First, see what happens to the new class
2249 if (!NewClass
->getMemoryLeader()) {
2250 // Should be a new class, or a store becoming a leader of a new class.
2251 assert(NewClass
->size() == 1 ||
2252 (isa
<StoreInst
>(I
) && NewClass
->getStoreCount() == 1));
2253 NewClass
->setMemoryLeader(InstMA
);
2254 // Mark it touched if we didn't just create a singleton
2255 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2256 << NewClass
->getID()
2257 << " due to new memory instruction becoming leader\n");
2258 markMemoryLeaderChangeTouched(NewClass
);
2260 setMemoryClass(InstMA
, NewClass
);
2261 // Now, fixup the old class if necessary
2262 if (OldClass
->getMemoryLeader() == InstMA
) {
2263 if (!OldClass
->definesNoMemory()) {
2264 OldClass
->setMemoryLeader(getNextMemoryLeader(OldClass
));
2265 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2266 << OldClass
->getID() << " to "
2267 << *OldClass
->getMemoryLeader()
2268 << " due to removal of old leader " << *InstMA
<< "\n");
2269 markMemoryLeaderChangeTouched(OldClass
);
2271 OldClass
->setMemoryLeader(nullptr);
2275 // Move a value, currently in OldClass, to be part of NewClass
2276 // Update OldClass and NewClass for the move (including changing leaders, etc).
2277 void NewGVN::moveValueToNewCongruenceClass(Instruction
*I
, const Expression
*E
,
2278 CongruenceClass
*OldClass
,
2279 CongruenceClass
*NewClass
) {
2280 if (I
== OldClass
->getNextLeader().first
)
2281 OldClass
->resetNextLeader();
2284 NewClass
->insert(I
);
2286 // Ensure that the leader has the lowest RPO. If the leader changed notify all
2287 // members of the class.
2288 if (NewClass
->getLeader() != I
&&
2289 NewClass
->addPossibleLeader({I
, InstrToDFSNum(I
)})) {
2290 markValueLeaderChangeTouched(NewClass
);
2293 // Handle our special casing of stores.
2294 if (auto *SI
= dyn_cast
<StoreInst
>(I
)) {
2295 OldClass
->decStoreCount();
2296 // Okay, so when do we want to make a store a leader of a class?
2297 // If we have a store defined by an earlier load, we want the earlier load
2298 // to lead the class.
2299 // If we have a store defined by something else, we want the store to lead
2300 // the class so everything else gets the "something else" as a value.
2301 // If we have a store as the single member of the class, we want the store
2303 if (NewClass
->getStoreCount() == 0 && !NewClass
->getStoredValue()) {
2304 // If it's a store expression we are using, it means we are not equivalent
2305 // to something earlier.
2306 if (auto *SE
= dyn_cast
<StoreExpression
>(E
)) {
2307 NewClass
->setStoredValue(SE
->getStoredValue());
2308 markValueLeaderChangeTouched(NewClass
);
2309 // Shift the new class leader to be the store
2310 LLVM_DEBUG(dbgs() << "Changing leader of congruence class "
2311 << NewClass
->getID() << " from "
2312 << *NewClass
->getLeader() << " to " << *SI
2313 << " because store joined class\n");
2314 // If we changed the leader, we have to mark it changed because we don't
2315 // know what it will do to symbolic evaluation.
2316 NewClass
->setLeader({SI
, InstrToDFSNum(SI
)});
2318 // We rely on the code below handling the MemoryAccess change.
2320 NewClass
->incStoreCount();
2322 // True if there is no memory instructions left in a class that had memory
2323 // instructions before.
2325 // If it's not a memory use, set the MemoryAccess equivalence
2326 auto *InstMA
= dyn_cast_or_null
<MemoryDef
>(getMemoryAccess(I
));
2328 moveMemoryToNewCongruenceClass(I
, InstMA
, OldClass
, NewClass
);
2329 ValueToClass
[I
] = NewClass
;
2330 // See if we destroyed the class or need to swap leaders.
2331 if (OldClass
->empty() && OldClass
!= TOPClass
) {
2332 if (OldClass
->getDefiningExpr()) {
2333 LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass
->getDefiningExpr()
2334 << " from table\n");
2335 // We erase it as an exact expression to make sure we don't just erase an
2337 auto Iter
= ExpressionToClass
.find_as(
2338 ExactEqualsExpression(*OldClass
->getDefiningExpr()));
2339 if (Iter
!= ExpressionToClass
.end())
2340 ExpressionToClass
.erase(Iter
);
2341 #ifdef EXPENSIVE_CHECKS
2343 (*OldClass
->getDefiningExpr() != *E
|| ExpressionToClass
.lookup(E
)) &&
2344 "We erased the expression we just inserted, which should not happen");
2347 } else if (OldClass
->getLeader() == I
) {
2348 // When the leader changes, the value numbering of
2349 // everything may change due to symbolization changes, so we need to
2351 LLVM_DEBUG(dbgs() << "Value class leader change for class "
2352 << OldClass
->getID() << "\n");
2353 ++NumGVNLeaderChanges
;
2354 // Destroy the stored value if there are no more stores to represent it.
2355 // Note that this is basically clean up for the expression removal that
2356 // happens below. If we remove stores from a class, we may leave it as a
2357 // class of equivalent memory phis.
2358 if (OldClass
->getStoreCount() == 0) {
2359 if (OldClass
->getStoredValue())
2360 OldClass
->setStoredValue(nullptr);
2362 OldClass
->setLeader({getNextValueLeader(OldClass
),
2363 InstrToDFSNum(getNextValueLeader(OldClass
))});
2364 OldClass
->resetNextLeader();
2365 markValueLeaderChangeTouched(OldClass
);
2369 // For a given expression, mark the phi of ops instructions that could have
2370 // changed as a result.
2371 void NewGVN::markPhiOfOpsChanged(const Expression
*E
) {
2372 touchAndErase(ExpressionToPhiOfOps
, E
);
2375 // Perform congruence finding on a given value numbering expression.
2376 void NewGVN::performCongruenceFinding(Instruction
*I
, const Expression
*E
) {
2377 // This is guaranteed to return something, since it will at least find
2380 CongruenceClass
*IClass
= ValueToClass
.lookup(I
);
2381 assert(IClass
&& "Should have found a IClass");
2382 // Dead classes should have been eliminated from the mapping.
2383 assert(!IClass
->isDead() && "Found a dead class");
2385 CongruenceClass
*EClass
= nullptr;
2386 if (const auto *VE
= dyn_cast
<VariableExpression
>(E
)) {
2387 EClass
= ValueToClass
.lookup(VE
->getVariableValue());
2388 } else if (isa
<DeadExpression
>(E
)) {
2392 auto lookupResult
= ExpressionToClass
.insert({E
, nullptr});
2394 // If it's not in the value table, create a new congruence class.
2395 if (lookupResult
.second
) {
2396 CongruenceClass
*NewClass
= createCongruenceClass(nullptr, E
);
2397 auto place
= lookupResult
.first
;
2398 place
->second
= NewClass
;
2400 // Constants and variables should always be made the leader.
2401 if (const auto *CE
= dyn_cast
<ConstantExpression
>(E
)) {
2402 NewClass
->setLeader({CE
->getConstantValue(), 0});
2403 } else if (const auto *SE
= dyn_cast
<StoreExpression
>(E
)) {
2404 StoreInst
*SI
= SE
->getStoreInst();
2405 NewClass
->setLeader({SI
, InstrToDFSNum(SI
)});
2406 NewClass
->setStoredValue(SE
->getStoredValue());
2407 // The RepMemoryAccess field will be filled in properly by the
2408 // moveValueToNewCongruenceClass call.
2410 NewClass
->setLeader({I
, InstrToDFSNum(I
)});
2412 assert(!isa
<VariableExpression
>(E
) &&
2413 "VariableExpression should have been handled already");
2416 LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I
2417 << " using expression " << *E
<< " at "
2418 << NewClass
->getID() << " and leader "
2419 << *(NewClass
->getLeader()));
2420 if (NewClass
->getStoredValue())
2421 LLVM_DEBUG(dbgs() << " and stored value "
2422 << *(NewClass
->getStoredValue()));
2423 LLVM_DEBUG(dbgs() << "\n");
2425 EClass
= lookupResult
.first
->second
;
2426 if (isa
<ConstantExpression
>(E
))
2427 assert((isa
<Constant
>(EClass
->getLeader()) ||
2428 (EClass
->getStoredValue() &&
2429 isa
<Constant
>(EClass
->getStoredValue()))) &&
2430 "Any class with a constant expression should have a "
2433 assert(EClass
&& "Somehow don't have an eclass");
2435 assert(!EClass
->isDead() && "We accidentally looked up a dead class");
2438 bool ClassChanged
= IClass
!= EClass
;
2439 bool LeaderChanged
= LeaderChanges
.erase(I
);
2440 if (ClassChanged
|| LeaderChanged
) {
2441 LLVM_DEBUG(dbgs() << "New class " << EClass
->getID() << " for expression "
2444 moveValueToNewCongruenceClass(I
, E
, IClass
, EClass
);
2445 markPhiOfOpsChanged(E
);
2448 markUsersTouched(I
);
2449 if (MemoryAccess
*MA
= getMemoryAccess(I
))
2450 markMemoryUsersTouched(MA
);
2451 if (auto *CI
= dyn_cast
<CmpInst
>(I
))
2452 markPredicateUsersTouched(CI
);
2454 // If we changed the class of the store, we want to ensure nothing finds the
2455 // old store expression. In particular, loads do not compare against stored
2456 // value, so they will find old store expressions (and associated class
2457 // mappings) if we leave them in the table.
2458 if (ClassChanged
&& isa
<StoreInst
>(I
)) {
2459 auto *OldE
= ValueToExpression
.lookup(I
);
2460 // It could just be that the old class died. We don't want to erase it if we
2461 // just moved classes.
2462 if (OldE
&& isa
<StoreExpression
>(OldE
) && *E
!= *OldE
) {
2463 // Erase this as an exact expression to ensure we don't erase expressions
2464 // equivalent to it.
2465 auto Iter
= ExpressionToClass
.find_as(ExactEqualsExpression(*OldE
));
2466 if (Iter
!= ExpressionToClass
.end())
2467 ExpressionToClass
.erase(Iter
);
2470 ValueToExpression
[I
] = E
;
2473 // Process the fact that Edge (from, to) is reachable, including marking
2474 // any newly reachable blocks and instructions for processing.
2475 void NewGVN::updateReachableEdge(BasicBlock
*From
, BasicBlock
*To
) {
2476 // Check if the Edge was reachable before.
2477 if (ReachableEdges
.insert({From
, To
}).second
) {
2478 // If this block wasn't reachable before, all instructions are touched.
2479 if (ReachableBlocks
.insert(To
).second
) {
2480 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To
)
2481 << " marked reachable\n");
2482 const auto &InstRange
= BlockInstRange
.lookup(To
);
2483 TouchedInstructions
.set(InstRange
.first
, InstRange
.second
);
2485 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To
)
2486 << " was reachable, but new edge {"
2487 << getBlockName(From
) << "," << getBlockName(To
)
2488 << "} to it found\n");
2490 // We've made an edge reachable to an existing block, which may
2491 // impact predicates. Otherwise, only mark the phi nodes as touched, as
2492 // they are the only thing that depend on new edges. Anything using their
2493 // values will get propagated to if necessary.
2494 if (MemoryAccess
*MemPhi
= getMemoryAccess(To
))
2495 TouchedInstructions
.set(InstrToDFSNum(MemPhi
));
2497 // FIXME: We should just add a union op on a Bitvector and
2498 // SparseBitVector. We can do it word by word faster than we are doing it
2500 for (auto InstNum
: RevisitOnReachabilityChange
[To
])
2501 TouchedInstructions
.set(InstNum
);
2506 // Given a predicate condition (from a switch, cmp, or whatever) and a block,
2507 // see if we know some constant value for it already.
2508 Value
*NewGVN::findConditionEquivalence(Value
*Cond
) const {
2509 auto Result
= lookupOperandLeader(Cond
);
2510 return isa
<Constant
>(Result
) ? Result
: nullptr;
2513 // Process the outgoing edges of a block for reachability.
2514 void NewGVN::processOutgoingEdges(Instruction
*TI
, BasicBlock
*B
) {
2515 // Evaluate reachability of terminator instruction.
2517 BasicBlock
*TrueSucc
, *FalseSucc
;
2518 if (match(TI
, m_Br(m_Value(Cond
), TrueSucc
, FalseSucc
))) {
2519 Value
*CondEvaluated
= findConditionEquivalence(Cond
);
2520 if (!CondEvaluated
) {
2521 if (auto *I
= dyn_cast
<Instruction
>(Cond
)) {
2522 SmallPtrSet
<Value
*, 4> Visited
;
2523 auto Res
= performSymbolicEvaluation(I
, Visited
);
2524 if (const auto *CE
= dyn_cast_or_null
<ConstantExpression
>(Res
.Expr
)) {
2525 CondEvaluated
= CE
->getConstantValue();
2526 addAdditionalUsers(Res
, I
);
2528 // Did not use simplification result, no need to add the extra
2530 Res
.ExtraDep
= nullptr;
2532 } else if (isa
<ConstantInt
>(Cond
)) {
2533 CondEvaluated
= Cond
;
2537 if (CondEvaluated
&& (CI
= dyn_cast
<ConstantInt
>(CondEvaluated
))) {
2539 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2540 << " evaluated to true\n");
2541 updateReachableEdge(B
, TrueSucc
);
2542 } else if (CI
->isZero()) {
2543 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2544 << " evaluated to false\n");
2545 updateReachableEdge(B
, FalseSucc
);
2548 updateReachableEdge(B
, TrueSucc
);
2549 updateReachableEdge(B
, FalseSucc
);
2551 } else if (auto *SI
= dyn_cast
<SwitchInst
>(TI
)) {
2552 // For switches, propagate the case values into the case
2555 Value
*SwitchCond
= SI
->getCondition();
2556 Value
*CondEvaluated
= findConditionEquivalence(SwitchCond
);
2557 // See if we were able to turn this switch statement into a constant.
2558 if (CondEvaluated
&& isa
<ConstantInt
>(CondEvaluated
)) {
2559 auto *CondVal
= cast
<ConstantInt
>(CondEvaluated
);
2560 // We should be able to get case value for this.
2561 auto Case
= *SI
->findCaseValue(CondVal
);
2562 if (Case
.getCaseSuccessor() == SI
->getDefaultDest()) {
2563 // We proved the value is outside of the range of the case.
2564 // We can't do anything other than mark the default dest as reachable,
2566 updateReachableEdge(B
, SI
->getDefaultDest());
2569 // Now get where it goes and mark it reachable.
2570 BasicBlock
*TargetBlock
= Case
.getCaseSuccessor();
2571 updateReachableEdge(B
, TargetBlock
);
2573 for (BasicBlock
*TargetBlock
: successors(SI
->getParent()))
2574 updateReachableEdge(B
, TargetBlock
);
2577 // Otherwise this is either unconditional, or a type we have no
2578 // idea about. Just mark successors as reachable.
2579 for (BasicBlock
*TargetBlock
: successors(TI
->getParent()))
2580 updateReachableEdge(B
, TargetBlock
);
2582 // This also may be a memory defining terminator, in which case, set it
2583 // equivalent only to itself.
2585 auto *MA
= getMemoryAccess(TI
);
2586 if (MA
&& !isa
<MemoryUse
>(MA
)) {
2587 auto *CC
= ensureLeaderOfMemoryClass(MA
);
2588 if (setMemoryClass(MA
, CC
))
2589 markMemoryUsersTouched(MA
);
2594 // Remove the PHI of Ops PHI for I
2595 void NewGVN::removePhiOfOps(Instruction
*I
, PHINode
*PHITemp
) {
2596 InstrDFS
.erase(PHITemp
);
2597 // It's still a temp instruction. We keep it in the array so it gets erased.
2598 // However, it's no longer used by I, or in the block
2599 TempToBlock
.erase(PHITemp
);
2600 RealToTemp
.erase(I
);
2601 // We don't remove the users from the phi node uses. This wastes a little
2602 // time, but such is life. We could use two sets to track which were there
2603 // are the start of NewGVN, and which were added, but right nowt he cost of
2604 // tracking is more than the cost of checking for more phi of ops.
2607 // Add PHI Op in BB as a PHI of operations version of ExistingValue.
2608 void NewGVN::addPhiOfOps(PHINode
*Op
, BasicBlock
*BB
,
2609 Instruction
*ExistingValue
) {
2610 InstrDFS
[Op
] = InstrToDFSNum(ExistingValue
);
2611 AllTempInstructions
.insert(Op
);
2612 TempToBlock
[Op
] = BB
;
2613 RealToTemp
[ExistingValue
] = Op
;
2614 // Add all users to phi node use, as they are now uses of the phi of ops phis
2615 // and may themselves be phi of ops.
2616 for (auto *U
: ExistingValue
->users())
2617 if (auto *UI
= dyn_cast
<Instruction
>(U
))
2618 PHINodeUses
.insert(UI
);
2621 static bool okayForPHIOfOps(const Instruction
*I
) {
2622 if (!EnablePhiOfOps
)
2624 return isa
<BinaryOperator
>(I
) || isa
<SelectInst
>(I
) || isa
<CmpInst
>(I
) ||
2628 // Return true if this operand will be safe to use for phi of ops.
2630 // The reason some operands are unsafe is that we are not trying to recursively
2631 // translate everything back through phi nodes. We actually expect some lookups
2632 // of expressions to fail. In particular, a lookup where the expression cannot
2633 // exist in the predecessor. This is true even if the expression, as shown, can
2634 // be determined to be constant.
2635 bool NewGVN::OpIsSafeForPHIOfOps(Value
*V
, const BasicBlock
*PHIBlock
,
2636 SmallPtrSetImpl
<const Value
*> &Visited
) {
2637 SmallVector
<Value
*, 4> Worklist
;
2638 Worklist
.push_back(V
);
2639 while (!Worklist
.empty()) {
2640 auto *I
= Worklist
.pop_back_val();
2641 if (!isa
<Instruction
>(I
))
2644 auto OISIt
= OpSafeForPHIOfOps
.find({I
, CacheIdx
});
2645 if (OISIt
!= OpSafeForPHIOfOps
.end())
2646 return OISIt
->second
;
2648 // Keep walking until we either dominate the phi block, or hit a phi, or run
2649 // out of things to check.
2650 if (DT
->properlyDominates(getBlockForValue(I
), PHIBlock
)) {
2651 OpSafeForPHIOfOps
.insert({{I
, CacheIdx
}, true});
2654 // PHI in the same block.
2655 if (isa
<PHINode
>(I
) && getBlockForValue(I
) == PHIBlock
) {
2656 OpSafeForPHIOfOps
.insert({{I
, CacheIdx
}, false});
2660 auto *OrigI
= cast
<Instruction
>(I
);
2661 // When we hit an instruction that reads memory (load, call, etc), we must
2662 // consider any store that may happen in the loop. For now, we assume the
2663 // worst: there is a store in the loop that alias with this read.
2664 // The case where the load is outside the loop is already covered by the
2665 // dominator check above.
2666 // TODO: relax this condition
2667 if (OrigI
->mayReadFromMemory())
2670 // Check the operands of the current instruction.
2671 for (auto *Op
: OrigI
->operand_values()) {
2672 if (!isa
<Instruction
>(Op
))
2674 // Stop now if we find an unsafe operand.
2675 auto OISIt
= OpSafeForPHIOfOps
.find({OrigI
, CacheIdx
});
2676 if (OISIt
!= OpSafeForPHIOfOps
.end()) {
2677 if (!OISIt
->second
) {
2678 OpSafeForPHIOfOps
.insert({{I
, CacheIdx
}, false});
2683 if (!Visited
.insert(Op
).second
)
2685 Worklist
.push_back(cast
<Instruction
>(Op
));
2688 OpSafeForPHIOfOps
.insert({{V
, CacheIdx
}, true});
2692 // Try to find a leader for instruction TransInst, which is a phi translated
2693 // version of something in our original program. Visited is used to ensure we
2694 // don't infinite loop during translations of cycles. OrigInst is the
2695 // instruction in the original program, and PredBB is the predecessor we
2696 // translated it through.
2697 Value
*NewGVN::findLeaderForInst(Instruction
*TransInst
,
2698 SmallPtrSetImpl
<Value
*> &Visited
,
2699 MemoryAccess
*MemAccess
, Instruction
*OrigInst
,
2700 BasicBlock
*PredBB
) {
2701 unsigned IDFSNum
= InstrToDFSNum(OrigInst
);
2702 // Make sure it's marked as a temporary instruction.
2703 AllTempInstructions
.insert(TransInst
);
2704 // and make sure anything that tries to add it's DFS number is
2705 // redirected to the instruction we are making a phi of ops
2707 TempToBlock
.insert({TransInst
, PredBB
});
2708 InstrDFS
.insert({TransInst
, IDFSNum
});
2710 auto Res
= performSymbolicEvaluation(TransInst
, Visited
);
2711 const Expression
*E
= Res
.Expr
;
2712 addAdditionalUsers(Res
, OrigInst
);
2713 InstrDFS
.erase(TransInst
);
2714 AllTempInstructions
.erase(TransInst
);
2715 TempToBlock
.erase(TransInst
);
2717 TempToMemory
.erase(TransInst
);
2720 auto *FoundVal
= findPHIOfOpsLeader(E
, OrigInst
, PredBB
);
2722 ExpressionToPhiOfOps
[E
].insert(OrigInst
);
2723 LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst
2724 << " in block " << getBlockName(PredBB
) << "\n");
2727 if (auto *SI
= dyn_cast
<StoreInst
>(FoundVal
))
2728 FoundVal
= SI
->getValueOperand();
2732 // When we see an instruction that is an op of phis, generate the equivalent phi
2735 NewGVN::makePossiblePHIOfOps(Instruction
*I
,
2736 SmallPtrSetImpl
<Value
*> &Visited
) {
2737 if (!okayForPHIOfOps(I
))
2740 if (!Visited
.insert(I
).second
)
2742 // For now, we require the instruction be cycle free because we don't
2743 // *always* create a phi of ops for instructions that could be done as phi
2744 // of ops, we only do it if we think it is useful. If we did do it all the
2745 // time, we could remove the cycle free check.
2746 if (!isCycleFree(I
))
2749 SmallPtrSet
<const Value
*, 8> ProcessedPHIs
;
2750 // TODO: We don't do phi translation on memory accesses because it's
2751 // complicated. For a load, we'd need to be able to simulate a new memoryuse,
2752 // which we don't have a good way of doing ATM.
2753 auto *MemAccess
= getMemoryAccess(I
);
2754 // If the memory operation is defined by a memory operation this block that
2755 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi
2756 // can't help, as it would still be killed by that memory operation.
2757 if (MemAccess
&& !isa
<MemoryPhi
>(MemAccess
->getDefiningAccess()) &&
2758 MemAccess
->getDefiningAccess()->getBlock() == I
->getParent())
2761 // Convert op of phis to phi of ops
2762 SmallPtrSet
<const Value
*, 10> VisitedOps
;
2763 SmallVector
<Value
*, 4> Ops(I
->operand_values());
2764 BasicBlock
*SamePHIBlock
= nullptr;
2765 PHINode
*OpPHI
= nullptr;
2766 if (!DebugCounter::shouldExecute(PHIOfOpsCounter
))
2768 for (auto *Op
: Ops
) {
2769 if (!isa
<PHINode
>(Op
)) {
2770 auto *ValuePHI
= RealToTemp
.lookup(Op
);
2773 LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n");
2776 OpPHI
= cast
<PHINode
>(Op
);
2777 if (!SamePHIBlock
) {
2778 SamePHIBlock
= getBlockForValue(OpPHI
);
2779 } else if (SamePHIBlock
!= getBlockForValue(OpPHI
)) {
2782 << "PHIs for operands are not all in the same block, aborting\n");
2785 // No point in doing this for one-operand phis.
2786 // Since all PHIs for operands must be in the same block, then they must
2787 // have the same number of operands so we can just abort.
2788 if (OpPHI
->getNumOperands() == 1)
2795 SmallVector
<ValPair
, 4> PHIOps
;
2796 SmallPtrSet
<Value
*, 4> Deps
;
2797 auto *PHIBlock
= getBlockForValue(OpPHI
);
2798 RevisitOnReachabilityChange
[PHIBlock
].reset(InstrToDFSNum(I
));
2799 for (unsigned PredNum
= 0; PredNum
< OpPHI
->getNumOperands(); ++PredNum
) {
2800 auto *PredBB
= OpPHI
->getIncomingBlock(PredNum
);
2801 Value
*FoundVal
= nullptr;
2802 SmallPtrSet
<Value
*, 4> CurrentDeps
;
2803 // We could just skip unreachable edges entirely but it's tricky to do
2804 // with rewriting existing phi nodes.
2805 if (ReachableEdges
.count({PredBB
, PHIBlock
})) {
2806 // Clone the instruction, create an expression from it that is
2807 // translated back into the predecessor, and see if we have a leader.
2808 Instruction
*ValueOp
= I
->clone();
2809 // Emit the temporal instruction in the predecessor basic block where the
2810 // corresponding value is defined.
2811 ValueOp
->insertBefore(PredBB
->getTerminator());
2813 TempToMemory
.insert({ValueOp
, MemAccess
});
2814 bool SafeForPHIOfOps
= true;
2816 for (auto &Op
: ValueOp
->operands()) {
2817 auto *OrigOp
= &*Op
;
2818 // When these operand changes, it could change whether there is a
2819 // leader for us or not, so we have to add additional users.
2820 if (isa
<PHINode
>(Op
)) {
2821 Op
= Op
->DoPHITranslation(PHIBlock
, PredBB
);
2822 if (Op
!= OrigOp
&& Op
!= I
)
2823 CurrentDeps
.insert(Op
);
2824 } else if (auto *ValuePHI
= RealToTemp
.lookup(Op
)) {
2825 if (getBlockForValue(ValuePHI
) == PHIBlock
)
2826 Op
= ValuePHI
->getIncomingValueForBlock(PredBB
);
2828 // If we phi-translated the op, it must be safe.
2831 (Op
!= OrigOp
|| OpIsSafeForPHIOfOps(Op
, PHIBlock
, VisitedOps
));
2833 // FIXME: For those things that are not safe we could generate
2834 // expressions all the way down, and see if this comes out to a
2835 // constant. For anything where that is true, and unsafe, we should
2836 // have made a phi-of-ops (or value numbered it equivalent to something)
2837 // for the pieces already.
2838 FoundVal
= !SafeForPHIOfOps
? nullptr
2839 : findLeaderForInst(ValueOp
, Visited
,
2840 MemAccess
, I
, PredBB
);
2841 ValueOp
->eraseFromParent();
2843 // We failed to find a leader for the current ValueOp, but this might
2844 // change in case of the translated operands change.
2845 if (SafeForPHIOfOps
)
2846 for (auto *Dep
: CurrentDeps
)
2847 addAdditionalUsers(Dep
, I
);
2851 Deps
.insert(CurrentDeps
.begin(), CurrentDeps
.end());
2853 LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
2854 << getBlockName(PredBB
)
2855 << " because the block is unreachable\n");
2856 FoundVal
= PoisonValue::get(I
->getType());
2857 RevisitOnReachabilityChange
[PHIBlock
].set(InstrToDFSNum(I
));
2860 PHIOps
.push_back({FoundVal
, PredBB
});
2861 LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal
<< " in "
2862 << getBlockName(PredBB
) << "\n");
2864 for (auto *Dep
: Deps
)
2865 addAdditionalUsers(Dep
, I
);
2867 auto *E
= performSymbolicPHIEvaluation(PHIOps
, I
, PHIBlock
);
2868 if (isa
<ConstantExpression
>(E
) || isa
<VariableExpression
>(E
)) {
2871 << "Not creating real PHI of ops because it simplified to existing "
2872 "value or constant\n");
2873 // We have leaders for all operands, but do not create a real PHI node with
2874 // those leaders as operands, so the link between the operands and the
2875 // PHI-of-ops is not materialized in the IR. If any of those leaders
2876 // changes, the PHI-of-op may change also, so we need to add the operands as
2877 // additional users.
2878 for (auto &O
: PHIOps
)
2879 addAdditionalUsers(O
.first
, I
);
2883 auto *ValuePHI
= RealToTemp
.lookup(I
);
2884 bool NewPHI
= false;
2887 PHINode::Create(I
->getType(), OpPHI
->getNumOperands(), "phiofops");
2888 addPhiOfOps(ValuePHI
, PHIBlock
, I
);
2890 NumGVNPHIOfOpsCreated
++;
2893 for (auto PHIOp
: PHIOps
)
2894 ValuePHI
->addIncoming(PHIOp
.first
, PHIOp
.second
);
2896 TempToBlock
[ValuePHI
] = PHIBlock
;
2898 for (auto PHIOp
: PHIOps
) {
2899 ValuePHI
->setIncomingValue(i
, PHIOp
.first
);
2900 ValuePHI
->setIncomingBlock(i
, PHIOp
.second
);
2904 RevisitOnReachabilityChange
[PHIBlock
].set(InstrToDFSNum(I
));
2905 LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI
<< " for " << *I
2911 // The algorithm initially places the values of the routine in the TOP
2912 // congruence class. The leader of TOP is the undetermined value `poison`.
2913 // When the algorithm has finished, values still in TOP are unreachable.
2914 void NewGVN::initializeCongruenceClasses(Function
&F
) {
2915 NextCongruenceNum
= 0;
2917 // Note that even though we use the live on entry def as a representative
2918 // MemoryAccess, it is *not* the same as the actual live on entry def. We
2919 // have no real equivalent to poison for MemoryAccesses, and so we really
2920 // should be checking whether the MemoryAccess is top if we want to know if it
2921 // is equivalent to everything. Otherwise, what this really signifies is that
2922 // the access "it reaches all the way back to the beginning of the function"
2924 // Initialize all other instructions to be in TOP class.
2925 TOPClass
= createCongruenceClass(nullptr, nullptr);
2926 TOPClass
->setMemoryLeader(MSSA
->getLiveOnEntryDef());
2927 // The live on entry def gets put into it's own class
2928 MemoryAccessToClass
[MSSA
->getLiveOnEntryDef()] =
2929 createMemoryClass(MSSA
->getLiveOnEntryDef());
2931 for (auto *DTN
: nodes(DT
)) {
2932 BasicBlock
*BB
= DTN
->getBlock();
2933 // All MemoryAccesses are equivalent to live on entry to start. They must
2934 // be initialized to something so that initial changes are noticed. For
2935 // the maximal answer, we initialize them all to be the same as
2937 auto *MemoryBlockDefs
= MSSA
->getBlockDefs(BB
);
2938 if (MemoryBlockDefs
)
2939 for (const auto &Def
: *MemoryBlockDefs
) {
2940 MemoryAccessToClass
[&Def
] = TOPClass
;
2941 auto *MD
= dyn_cast
<MemoryDef
>(&Def
);
2942 // Insert the memory phis into the member list.
2944 const MemoryPhi
*MP
= cast
<MemoryPhi
>(&Def
);
2945 TOPClass
->memory_insert(MP
);
2946 MemoryPhiState
.insert({MP
, MPS_TOP
});
2949 if (MD
&& isa
<StoreInst
>(MD
->getMemoryInst()))
2950 TOPClass
->incStoreCount();
2953 // FIXME: This is trying to discover which instructions are uses of phi
2954 // nodes. We should move this into one of the myriad of places that walk
2955 // all the operands already.
2956 for (auto &I
: *BB
) {
2957 if (isa
<PHINode
>(&I
))
2958 for (auto *U
: I
.users())
2959 if (auto *UInst
= dyn_cast
<Instruction
>(U
))
2960 if (InstrToDFSNum(UInst
) != 0 && okayForPHIOfOps(UInst
))
2961 PHINodeUses
.insert(UInst
);
2962 // Don't insert void terminators into the class. We don't value number
2963 // them, and they just end up sitting in TOP.
2964 if (I
.isTerminator() && I
.getType()->isVoidTy())
2966 TOPClass
->insert(&I
);
2967 ValueToClass
[&I
] = TOPClass
;
2971 // Initialize arguments to be in their own unique congruence classes
2972 for (auto &FA
: F
.args())
2973 createSingletonCongruenceClass(&FA
);
2976 void NewGVN::cleanupTables() {
2977 for (CongruenceClass
*&CC
: CongruenceClasses
) {
2978 LLVM_DEBUG(dbgs() << "Congruence class " << CC
->getID() << " has "
2979 << CC
->size() << " members\n");
2980 // Make sure we delete the congruence class (probably worth switching to
2981 // a unique_ptr at some point.
2986 // Destroy the value expressions
2987 SmallVector
<Instruction
*, 8> TempInst(AllTempInstructions
.begin(),
2988 AllTempInstructions
.end());
2989 AllTempInstructions
.clear();
2991 // We have to drop all references for everything first, so there are no uses
2992 // left as we delete them.
2993 for (auto *I
: TempInst
) {
2994 I
->dropAllReferences();
2997 while (!TempInst
.empty()) {
2998 auto *I
= TempInst
.pop_back_val();
3002 ValueToClass
.clear();
3003 ArgRecycler
.clear(ExpressionAllocator
);
3004 ExpressionAllocator
.Reset();
3005 CongruenceClasses
.clear();
3006 ExpressionToClass
.clear();
3007 ValueToExpression
.clear();
3009 AdditionalUsers
.clear();
3010 ExpressionToPhiOfOps
.clear();
3011 TempToBlock
.clear();
3012 TempToMemory
.clear();
3013 PHINodeUses
.clear();
3014 OpSafeForPHIOfOps
.clear();
3015 ReachableBlocks
.clear();
3016 ReachableEdges
.clear();
3018 ProcessedCount
.clear();
3021 InstructionsToErase
.clear();
3023 BlockInstRange
.clear();
3024 TouchedInstructions
.clear();
3025 MemoryAccessToClass
.clear();
3026 PredicateToUsers
.clear();
3027 MemoryToUsers
.clear();
3028 RevisitOnReachabilityChange
.clear();
3029 IntrinsicInstPred
.clear();
3032 // Assign local DFS number mapping to instructions, and leave space for Value
3034 std::pair
<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock
*B
,
3036 unsigned End
= Start
;
3037 if (MemoryAccess
*MemPhi
= getMemoryAccess(B
)) {
3038 InstrDFS
[MemPhi
] = End
++;
3039 DFSToInstr
.emplace_back(MemPhi
);
3042 // Then the real block goes next.
3043 for (auto &I
: *B
) {
3044 // There's no need to call isInstructionTriviallyDead more than once on
3045 // an instruction. Therefore, once we know that an instruction is dead
3046 // we change its DFS number so that it doesn't get value numbered.
3047 if (isInstructionTriviallyDead(&I
, TLI
)) {
3049 LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I
<< "\n");
3050 markInstructionForDeletion(&I
);
3053 if (isa
<PHINode
>(&I
))
3054 RevisitOnReachabilityChange
[B
].set(End
);
3055 InstrDFS
[&I
] = End
++;
3056 DFSToInstr
.emplace_back(&I
);
3059 // All of the range functions taken half-open ranges (open on the end side).
3060 // So we do not subtract one from count, because at this point it is one
3061 // greater than the last instruction.
3062 return std::make_pair(Start
, End
);
3065 void NewGVN::updateProcessedCount(const Value
*V
) {
3067 if (ProcessedCount
.count(V
) == 0) {
3068 ProcessedCount
.insert({V
, 1});
3070 ++ProcessedCount
[V
];
3071 assert(ProcessedCount
[V
] < 100 &&
3072 "Seem to have processed the same Value a lot");
3077 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes
3078 void NewGVN::valueNumberMemoryPhi(MemoryPhi
*MP
) {
3079 // If all the arguments are the same, the MemoryPhi has the same value as the
3080 // argument. Filter out unreachable blocks and self phis from our operands.
3081 // TODO: We could do cycle-checking on the memory phis to allow valueizing for
3082 // self-phi checking.
3083 const BasicBlock
*PHIBlock
= MP
->getBlock();
3084 auto Filtered
= make_filter_range(MP
->operands(), [&](const Use
&U
) {
3085 return cast
<MemoryAccess
>(U
) != MP
&&
3086 !isMemoryAccessTOP(cast
<MemoryAccess
>(U
)) &&
3087 ReachableEdges
.count({MP
->getIncomingBlock(U
), PHIBlock
});
3089 // If all that is left is nothing, our memoryphi is poison. We keep it as
3090 // InitialClass. Note: The only case this should happen is if we have at
3091 // least one self-argument.
3092 if (Filtered
.begin() == Filtered
.end()) {
3093 if (setMemoryClass(MP
, TOPClass
))
3094 markMemoryUsersTouched(MP
);
3098 // Transform the remaining operands into operand leaders.
3099 // FIXME: mapped_iterator should have a range version.
3100 auto LookupFunc
= [&](const Use
&U
) {
3101 return lookupMemoryLeader(cast
<MemoryAccess
>(U
));
3103 auto MappedBegin
= map_iterator(Filtered
.begin(), LookupFunc
);
3104 auto MappedEnd
= map_iterator(Filtered
.end(), LookupFunc
);
3106 // and now check if all the elements are equal.
3107 // Sadly, we can't use std::equals since these are random access iterators.
3108 const auto *AllSameValue
= *MappedBegin
;
3110 bool AllEqual
= std::all_of(
3111 MappedBegin
, MappedEnd
,
3112 [&AllSameValue
](const MemoryAccess
*V
) { return V
== AllSameValue
; });
3115 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue
3118 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
3119 // If it's equal to something, it's in that class. Otherwise, it has to be in
3120 // a class where it is the leader (other things may be equivalent to it, but
3121 // it needs to start off in its own class, which means it must have been the
3122 // leader, and it can't have stopped being the leader because it was never
3124 CongruenceClass
*CC
=
3125 AllEqual
? getMemoryClass(AllSameValue
) : ensureLeaderOfMemoryClass(MP
);
3126 auto OldState
= MemoryPhiState
.lookup(MP
);
3127 assert(OldState
!= MPS_Invalid
&& "Invalid memory phi state");
3128 auto NewState
= AllEqual
? MPS_Equivalent
: MPS_Unique
;
3129 MemoryPhiState
[MP
] = NewState
;
3130 if (setMemoryClass(MP
, CC
) || OldState
!= NewState
)
3131 markMemoryUsersTouched(MP
);
3134 // Value number a single instruction, symbolically evaluating, performing
3135 // congruence finding, and updating mappings.
3136 void NewGVN::valueNumberInstruction(Instruction
*I
) {
3137 LLVM_DEBUG(dbgs() << "Processing instruction " << *I
<< "\n");
3138 if (!I
->isTerminator()) {
3139 const Expression
*Symbolized
= nullptr;
3140 SmallPtrSet
<Value
*, 2> Visited
;
3141 if (DebugCounter::shouldExecute(VNCounter
)) {
3142 auto Res
= performSymbolicEvaluation(I
, Visited
);
3143 Symbolized
= Res
.Expr
;
3144 addAdditionalUsers(Res
, I
);
3146 // Make a phi of ops if necessary
3147 if (Symbolized
&& !isa
<ConstantExpression
>(Symbolized
) &&
3148 !isa
<VariableExpression
>(Symbolized
) && PHINodeUses
.count(I
)) {
3149 auto *PHIE
= makePossiblePHIOfOps(I
, Visited
);
3150 // If we created a phi of ops, use it.
3151 // If we couldn't create one, make sure we don't leave one lying around
3154 } else if (auto *Op
= RealToTemp
.lookup(I
)) {
3155 removePhiOfOps(I
, Op
);
3159 // Mark the instruction as unused so we don't value number it again.
3162 // If we couldn't come up with a symbolic expression, use the unknown
3164 if (Symbolized
== nullptr)
3165 Symbolized
= createUnknownExpression(I
);
3166 performCongruenceFinding(I
, Symbolized
);
3168 // Handle terminators that return values. All of them produce values we
3169 // don't currently understand. We don't place non-value producing
3170 // terminators in a class.
3171 if (!I
->getType()->isVoidTy()) {
3172 auto *Symbolized
= createUnknownExpression(I
);
3173 performCongruenceFinding(I
, Symbolized
);
3175 processOutgoingEdges(I
, I
->getParent());
3179 // Check if there is a path, using single or equal argument phi nodes, from
3181 bool NewGVN::singleReachablePHIPath(
3182 SmallPtrSet
<const MemoryAccess
*, 8> &Visited
, const MemoryAccess
*First
,
3183 const MemoryAccess
*Second
) const {
3184 if (First
== Second
)
3186 if (MSSA
->isLiveOnEntryDef(First
))
3189 // This is not perfect, but as we're just verifying here, we can live with
3190 // the loss of precision. The real solution would be that of doing strongly
3191 // connected component finding in this routine, and it's probably not worth
3192 // the complexity for the time being. So, we just keep a set of visited
3193 // MemoryAccess and return true when we hit a cycle.
3194 if (!Visited
.insert(First
).second
)
3197 const auto *EndDef
= First
;
3198 for (const auto *ChainDef
: optimized_def_chain(First
)) {
3199 if (ChainDef
== Second
)
3201 if (MSSA
->isLiveOnEntryDef(ChainDef
))
3205 auto *MP
= cast
<MemoryPhi
>(EndDef
);
3206 auto ReachableOperandPred
= [&](const Use
&U
) {
3207 return ReachableEdges
.count({MP
->getIncomingBlock(U
), MP
->getBlock()});
3209 auto FilteredPhiArgs
=
3210 make_filter_range(MP
->operands(), ReachableOperandPred
);
3211 SmallVector
<const Value
*, 32> OperandList
;
3212 llvm::copy(FilteredPhiArgs
, std::back_inserter(OperandList
));
3213 bool Okay
= all_equal(OperandList
);
3215 return singleReachablePHIPath(Visited
, cast
<MemoryAccess
>(OperandList
[0]),
3220 // Verify the that the memory equivalence table makes sense relative to the
3221 // congruence classes. Note that this checking is not perfect, and is currently
3222 // subject to very rare false negatives. It is only useful for
3223 // testing/debugging.
3224 void NewGVN::verifyMemoryCongruency() const {
3226 // Verify that the memory table equivalence and memory member set match
3227 for (const auto *CC
: CongruenceClasses
) {
3228 if (CC
== TOPClass
|| CC
->isDead())
3230 if (CC
->getStoreCount() != 0) {
3231 assert((CC
->getStoredValue() || !isa
<StoreInst
>(CC
->getLeader())) &&
3232 "Any class with a store as a leader should have a "
3233 "representative stored value");
3234 assert(CC
->getMemoryLeader() &&
3235 "Any congruence class with a store should have a "
3236 "representative access");
3239 if (CC
->getMemoryLeader())
3240 assert(MemoryAccessToClass
.lookup(CC
->getMemoryLeader()) == CC
&&
3241 "Representative MemoryAccess does not appear to be reverse "
3243 for (const auto *M
: CC
->memory())
3244 assert(MemoryAccessToClass
.lookup(M
) == CC
&&
3245 "Memory member does not appear to be reverse mapped properly");
3248 // Anything equivalent in the MemoryAccess table should be in the same
3249 // congruence class.
3251 // Filter out the unreachable and trivially dead entries, because they may
3252 // never have been updated if the instructions were not processed.
3253 auto ReachableAccessPred
=
3254 [&](const std::pair
<const MemoryAccess
*, CongruenceClass
*> Pair
) {
3255 bool Result
= ReachableBlocks
.count(Pair
.first
->getBlock());
3256 if (!Result
|| MSSA
->isLiveOnEntryDef(Pair
.first
) ||
3257 MemoryToDFSNum(Pair
.first
) == 0)
3259 if (auto *MemDef
= dyn_cast
<MemoryDef
>(Pair
.first
))
3260 return !isInstructionTriviallyDead(MemDef
->getMemoryInst());
3262 // We could have phi nodes which operands are all trivially dead,
3263 // so we don't process them.
3264 if (auto *MemPHI
= dyn_cast
<MemoryPhi
>(Pair
.first
)) {
3265 for (const auto &U
: MemPHI
->incoming_values()) {
3266 if (auto *I
= dyn_cast
<Instruction
>(&*U
)) {
3267 if (!isInstructionTriviallyDead(I
))
3277 auto Filtered
= make_filter_range(MemoryAccessToClass
, ReachableAccessPred
);
3278 for (auto KV
: Filtered
) {
3279 if (auto *FirstMUD
= dyn_cast
<MemoryUseOrDef
>(KV
.first
)) {
3280 auto *SecondMUD
= dyn_cast
<MemoryUseOrDef
>(KV
.second
->getMemoryLeader());
3281 if (FirstMUD
&& SecondMUD
) {
3282 SmallPtrSet
<const MemoryAccess
*, 8> VisitedMAS
;
3283 assert((singleReachablePHIPath(VisitedMAS
, FirstMUD
, SecondMUD
) ||
3284 ValueToClass
.lookup(FirstMUD
->getMemoryInst()) ==
3285 ValueToClass
.lookup(SecondMUD
->getMemoryInst())) &&
3286 "The instructions for these memory operations should have "
3287 "been in the same congruence class or reachable through"
3288 "a single argument phi");
3290 } else if (auto *FirstMP
= dyn_cast
<MemoryPhi
>(KV
.first
)) {
3291 // We can only sanely verify that MemoryDefs in the operand list all have
3293 auto ReachableOperandPred
= [&](const Use
&U
) {
3294 return ReachableEdges
.count(
3295 {FirstMP
->getIncomingBlock(U
), FirstMP
->getBlock()}) &&
3298 // All arguments should in the same class, ignoring unreachable arguments
3299 auto FilteredPhiArgs
=
3300 make_filter_range(FirstMP
->operands(), ReachableOperandPred
);
3301 SmallVector
<const CongruenceClass
*, 16> PhiOpClasses
;
3302 std::transform(FilteredPhiArgs
.begin(), FilteredPhiArgs
.end(),
3303 std::back_inserter(PhiOpClasses
), [&](const Use
&U
) {
3304 const MemoryDef
*MD
= cast
<MemoryDef
>(U
);
3305 return ValueToClass
.lookup(MD
->getMemoryInst());
3307 assert(all_equal(PhiOpClasses
) &&
3308 "All MemoryPhi arguments should be in the same class");
3314 // Verify that the sparse propagation we did actually found the maximal fixpoint
3315 // We do this by storing the value to class mapping, touching all instructions,
3316 // and redoing the iteration to see if anything changed.
3317 void NewGVN::verifyIterationSettled(Function
&F
) {
3319 LLVM_DEBUG(dbgs() << "Beginning iteration verification\n");
3320 if (DebugCounter::isCounterSet(VNCounter
))
3321 DebugCounter::setCounterState(VNCounter
, StartingVNCounter
);
3323 // Note that we have to store the actual classes, as we may change existing
3324 // classes during iteration. This is because our memory iteration propagation
3325 // is not perfect, and so may waste a little work. But it should generate
3326 // exactly the same congruence classes we have now, with different IDs.
3327 std::map
<const Value
*, CongruenceClass
> BeforeIteration
;
3329 for (auto &KV
: ValueToClass
) {
3330 if (auto *I
= dyn_cast
<Instruction
>(KV
.first
))
3331 // Skip unused/dead instructions.
3332 if (InstrToDFSNum(I
) == 0)
3334 BeforeIteration
.insert({KV
.first
, *KV
.second
});
3337 TouchedInstructions
.set();
3338 TouchedInstructions
.reset(0);
3339 OpSafeForPHIOfOps
.clear();
3341 iterateTouchedInstructions();
3342 DenseSet
<std::pair
<const CongruenceClass
*, const CongruenceClass
*>>
3344 for (const auto &KV
: ValueToClass
) {
3345 if (auto *I
= dyn_cast
<Instruction
>(KV
.first
))
3346 // Skip unused/dead instructions.
3347 if (InstrToDFSNum(I
) == 0)
3349 // We could sink these uses, but i think this adds a bit of clarity here as
3350 // to what we are comparing.
3351 auto *BeforeCC
= &BeforeIteration
.find(KV
.first
)->second
;
3352 auto *AfterCC
= KV
.second
;
3353 // Note that the classes can't change at this point, so we memoize the set
3355 if (!EqualClasses
.count({BeforeCC
, AfterCC
})) {
3356 assert(BeforeCC
->isEquivalentTo(AfterCC
) &&
3357 "Value number changed after main loop completed!");
3358 EqualClasses
.insert({BeforeCC
, AfterCC
});
3364 // Verify that for each store expression in the expression to class mapping,
3365 // only the latest appears, and multiple ones do not appear.
3366 // Because loads do not use the stored value when doing equality with stores,
3367 // if we don't erase the old store expressions from the table, a load can find
3368 // a no-longer valid StoreExpression.
3369 void NewGVN::verifyStoreExpressions() const {
3371 // This is the only use of this, and it's not worth defining a complicated
3372 // densemapinfo hash/equality function for it.
3374 std::pair
<const Value
*,
3375 std::tuple
<const Value
*, const CongruenceClass
*, Value
*>>>
3377 for (const auto &KV
: ExpressionToClass
) {
3378 if (auto *SE
= dyn_cast
<StoreExpression
>(KV
.first
)) {
3379 // Make sure a version that will conflict with loads is not already there
3380 auto Res
= StoreExpressionSet
.insert(
3381 {SE
->getOperand(0), std::make_tuple(SE
->getMemoryLeader(), KV
.second
,
3382 SE
->getStoredValue())});
3383 bool Okay
= Res
.second
;
3384 // It's okay to have the same expression already in there if it is
3385 // identical in nature.
3386 // This can happen when the leader of the stored value changes over time.
3388 Okay
= (std::get
<1>(Res
.first
->second
) == KV
.second
) &&
3389 (lookupOperandLeader(std::get
<2>(Res
.first
->second
)) ==
3390 lookupOperandLeader(SE
->getStoredValue()));
3391 assert(Okay
&& "Stored expression conflict exists in expression table");
3392 auto *ValueExpr
= ValueToExpression
.lookup(SE
->getStoreInst());
3393 assert(ValueExpr
&& ValueExpr
->equals(*SE
) &&
3394 "StoreExpression in ExpressionToClass is not latest "
3395 "StoreExpression for value");
3401 // This is the main value numbering loop, it iterates over the initial touched
3402 // instruction set, propagating value numbers, marking things touched, etc,
3403 // until the set of touched instructions is completely empty.
3404 void NewGVN::iterateTouchedInstructions() {
3405 uint64_t Iterations
= 0;
3406 // Figure out where touchedinstructions starts
3407 int FirstInstr
= TouchedInstructions
.find_first();
3408 // Nothing set, nothing to iterate, just return.
3409 if (FirstInstr
== -1)
3411 const BasicBlock
*LastBlock
= getBlockForValue(InstrFromDFSNum(FirstInstr
));
3412 while (TouchedInstructions
.any()) {
3414 // Walk through all the instructions in all the blocks in RPO.
3415 // TODO: As we hit a new block, we should push and pop equalities into a
3416 // table lookupOperandLeader can use, to catch things PredicateInfo
3417 // might miss, like edge-only equivalences.
3418 for (unsigned InstrNum
: TouchedInstructions
.set_bits()) {
3420 // This instruction was found to be dead. We don't bother looking
3422 if (InstrNum
== 0) {
3423 TouchedInstructions
.reset(InstrNum
);
3427 Value
*V
= InstrFromDFSNum(InstrNum
);
3428 const BasicBlock
*CurrBlock
= getBlockForValue(V
);
3430 // If we hit a new block, do reachability processing.
3431 if (CurrBlock
!= LastBlock
) {
3432 LastBlock
= CurrBlock
;
3433 bool BlockReachable
= ReachableBlocks
.count(CurrBlock
);
3434 const auto &CurrInstRange
= BlockInstRange
.lookup(CurrBlock
);
3436 // If it's not reachable, erase any touched instructions and move on.
3437 if (!BlockReachable
) {
3438 TouchedInstructions
.reset(CurrInstRange
.first
, CurrInstRange
.second
);
3439 LLVM_DEBUG(dbgs() << "Skipping instructions in block "
3440 << getBlockName(CurrBlock
)
3441 << " because it is unreachable\n");
3444 // Use the appropriate cache for "OpIsSafeForPHIOfOps".
3445 CacheIdx
= RPOOrdering
.lookup(DT
->getNode(CurrBlock
)) - 1;
3446 updateProcessedCount(CurrBlock
);
3448 // Reset after processing (because we may mark ourselves as touched when
3449 // we propagate equalities).
3450 TouchedInstructions
.reset(InstrNum
);
3452 if (auto *MP
= dyn_cast
<MemoryPhi
>(V
)) {
3453 LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP
<< "\n");
3454 valueNumberMemoryPhi(MP
);
3455 } else if (auto *I
= dyn_cast
<Instruction
>(V
)) {
3456 valueNumberInstruction(I
);
3458 llvm_unreachable("Should have been a MemoryPhi or Instruction");
3460 updateProcessedCount(V
);
3463 NumGVNMaxIterations
= std::max(NumGVNMaxIterations
.getValue(), Iterations
);
3466 // This is the main transformation entry point.
3467 bool NewGVN::runGVN() {
3468 if (DebugCounter::isCounterSet(VNCounter
))
3469 StartingVNCounter
= DebugCounter::getCounterState(VNCounter
);
3470 bool Changed
= false;
3471 NumFuncArgs
= F
.arg_size();
3472 MSSAWalker
= MSSA
->getWalker();
3473 SingletonDeadExpression
= new (ExpressionAllocator
) DeadExpression();
3475 // Count number of instructions for sizing of hash tables, and come
3476 // up with a global dfs numbering for instructions.
3477 unsigned ICount
= 1;
3478 // Add an empty instruction to account for the fact that we start at 1
3479 DFSToInstr
.emplace_back(nullptr);
3480 // Note: We want ideal RPO traversal of the blocks, which is not quite the
3481 // same as dominator tree order, particularly with regard whether backedges
3482 // get visited first or second, given a block with multiple successors.
3483 // If we visit in the wrong order, we will end up performing N times as many
3485 // The dominator tree does guarantee that, for a given dom tree node, it's
3486 // parent must occur before it in the RPO ordering. Thus, we only need to sort
3488 ReversePostOrderTraversal
<Function
*> RPOT(&F
);
3489 unsigned Counter
= 0;
3490 for (auto &B
: RPOT
) {
3491 auto *Node
= DT
->getNode(B
);
3492 assert(Node
&& "RPO and Dominator tree should have same reachability");
3493 RPOOrdering
[Node
] = ++Counter
;
3495 // Sort dominator tree children arrays into RPO.
3496 for (auto &B
: RPOT
) {
3497 auto *Node
= DT
->getNode(B
);
3498 if (Node
->getNumChildren() > 1)
3499 llvm::sort(*Node
, [&](const DomTreeNode
*A
, const DomTreeNode
*B
) {
3500 return RPOOrdering
[A
] < RPOOrdering
[B
];
3504 // Now a standard depth first ordering of the domtree is equivalent to RPO.
3505 for (auto *DTN
: depth_first(DT
->getRootNode())) {
3506 BasicBlock
*B
= DTN
->getBlock();
3507 const auto &BlockRange
= assignDFSNumbers(B
, ICount
);
3508 BlockInstRange
.insert({B
, BlockRange
});
3509 ICount
+= BlockRange
.second
- BlockRange
.first
;
3511 initializeCongruenceClasses(F
);
3513 TouchedInstructions
.resize(ICount
);
3514 // Ensure we don't end up resizing the expressionToClass map, as
3515 // that can be quite expensive. At most, we have one expression per
3517 ExpressionToClass
.reserve(ICount
);
3519 // Initialize the touched instructions to include the entry block.
3520 const auto &InstRange
= BlockInstRange
.lookup(&F
.getEntryBlock());
3521 TouchedInstructions
.set(InstRange
.first
, InstRange
.second
);
3522 LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F
.getEntryBlock())
3523 << " marked reachable\n");
3524 ReachableBlocks
.insert(&F
.getEntryBlock());
3525 // Use index corresponding to entry block.
3528 iterateTouchedInstructions();
3529 verifyMemoryCongruency();
3530 verifyIterationSettled(F
);
3531 verifyStoreExpressions();
3533 Changed
|= eliminateInstructions(F
);
3535 // Delete all instructions marked for deletion.
3536 for (Instruction
*ToErase
: InstructionsToErase
) {
3537 if (!ToErase
->use_empty())
3538 ToErase
->replaceAllUsesWith(PoisonValue::get(ToErase
->getType()));
3540 assert(ToErase
->getParent() &&
3541 "BB containing ToErase deleted unexpectedly!");
3542 ToErase
->eraseFromParent();
3544 Changed
|= !InstructionsToErase
.empty();
3546 // Delete all unreachable blocks.
3547 auto UnreachableBlockPred
= [&](const BasicBlock
&BB
) {
3548 return !ReachableBlocks
.count(&BB
);
3551 for (auto &BB
: make_filter_range(F
, UnreachableBlockPred
)) {
3552 LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB
)
3553 << " is unreachable\n");
3554 deleteInstructionsInBlock(&BB
);
3562 struct NewGVN::ValueDFS
{
3567 // Only one of Def and U will be set.
3568 // The bool in the Def tells us whether the Def is the stored value of a
3570 PointerIntPair
<Value
*, 1, bool> Def
;
3573 bool operator<(const ValueDFS
&Other
) const {
3574 // It's not enough that any given field be less than - we have sets
3575 // of fields that need to be evaluated together to give a proper ordering.
3576 // For example, if you have;
3581 // We want the second to be less than the first, but if we just go field
3582 // by field, we will get to Val 0 < Val 50 and say the first is less than
3583 // the second. We only want it to be less than if the DFS orders are equal.
3585 // Each LLVM instruction only produces one value, and thus the lowest-level
3586 // differentiator that really matters for the stack (and what we use as a
3587 // replacement) is the local dfs number.
3588 // Everything else in the structure is instruction level, and only affects
3589 // the order in which we will replace operands of a given instruction.
3591 // For a given instruction (IE things with equal dfsin, dfsout, localnum),
3592 // the order of replacement of uses does not matter.
3596 // When you hit b, you will have two valuedfs with the same dfsin, out, and
3598 // The .val will be the same as well.
3599 // The .u's will be different.
3600 // You will replace both, and it does not matter what order you replace them
3601 // in (IE whether you replace operand 2, then operand 1, or operand 1, then
3603 // Similarly for the case of same dfsin, dfsout, localnum, but different
3608 // in c, we will a valuedfs for a, and one for b,with everything the same
3610 // It does not matter what order we replace these operands in.
3611 // You will always end up with the same IR, and this is guaranteed.
3612 return std::tie(DFSIn
, DFSOut
, LocalNum
, Def
, U
) <
3613 std::tie(Other
.DFSIn
, Other
.DFSOut
, Other
.LocalNum
, Other
.Def
,
3618 // This function converts the set of members for a congruence class from values,
3619 // to sets of defs and uses with associated DFS info. The total number of
3620 // reachable uses for each value is stored in UseCount, and instructions that
3622 // dead (have no non-dead uses) are stored in ProbablyDead.
3623 void NewGVN::convertClassToDFSOrdered(
3624 const CongruenceClass
&Dense
, SmallVectorImpl
<ValueDFS
> &DFSOrderedSet
,
3625 DenseMap
<const Value
*, unsigned int> &UseCounts
,
3626 SmallPtrSetImpl
<Instruction
*> &ProbablyDead
) const {
3627 for (auto *D
: Dense
) {
3628 // First add the value.
3629 BasicBlock
*BB
= getBlockForValue(D
);
3630 // Constants are handled prior to ever calling this function, so
3631 // we should only be left with instructions as members.
3632 assert(BB
&& "Should have figured out a basic block for value");
3634 DomTreeNode
*DomNode
= DT
->getNode(BB
);
3635 VDDef
.DFSIn
= DomNode
->getDFSNumIn();
3636 VDDef
.DFSOut
= DomNode
->getDFSNumOut();
3637 // If it's a store, use the leader of the value operand, if it's always
3638 // available, or the value operand. TODO: We could do dominance checks to
3639 // find a dominating leader, but not worth it ATM.
3640 if (auto *SI
= dyn_cast
<StoreInst
>(D
)) {
3641 auto Leader
= lookupOperandLeader(SI
->getValueOperand());
3642 if (alwaysAvailable(Leader
)) {
3643 VDDef
.Def
.setPointer(Leader
);
3645 VDDef
.Def
.setPointer(SI
->getValueOperand());
3646 VDDef
.Def
.setInt(true);
3649 VDDef
.Def
.setPointer(D
);
3651 assert(isa
<Instruction
>(D
) &&
3652 "The dense set member should always be an instruction");
3653 Instruction
*Def
= cast
<Instruction
>(D
);
3654 VDDef
.LocalNum
= InstrToDFSNum(D
);
3655 DFSOrderedSet
.push_back(VDDef
);
3656 // If there is a phi node equivalent, add it
3657 if (auto *PN
= RealToTemp
.lookup(Def
)) {
3659 dyn_cast_or_null
<PHIExpression
>(ValueToExpression
.lookup(Def
));
3661 VDDef
.Def
.setInt(false);
3662 VDDef
.Def
.setPointer(PN
);
3664 DFSOrderedSet
.push_back(VDDef
);
3668 unsigned int UseCount
= 0;
3669 // Now add the uses.
3670 for (auto &U
: Def
->uses()) {
3671 if (auto *I
= dyn_cast
<Instruction
>(U
.getUser())) {
3672 // Don't try to replace into dead uses
3673 if (InstructionsToErase
.count(I
))
3676 // Put the phi node uses in the incoming block.
3678 if (auto *P
= dyn_cast
<PHINode
>(I
)) {
3679 IBlock
= P
->getIncomingBlock(U
);
3680 // Make phi node users appear last in the incoming block
3682 VDUse
.LocalNum
= InstrDFS
.size() + 1;
3684 IBlock
= getBlockForValue(I
);
3685 VDUse
.LocalNum
= InstrToDFSNum(I
);
3688 // Skip uses in unreachable blocks, as we're going
3690 if (!ReachableBlocks
.contains(IBlock
))
3693 DomTreeNode
*DomNode
= DT
->getNode(IBlock
);
3694 VDUse
.DFSIn
= DomNode
->getDFSNumIn();
3695 VDUse
.DFSOut
= DomNode
->getDFSNumOut();
3698 DFSOrderedSet
.emplace_back(VDUse
);
3702 // If there are no uses, it's probably dead (but it may have side-effects,
3703 // so not definitely dead. Otherwise, store the number of uses so we can
3704 // track if it becomes dead later).
3706 ProbablyDead
.insert(Def
);
3708 UseCounts
[Def
] = UseCount
;
3712 // This function converts the set of members for a congruence class from values,
3713 // to the set of defs for loads and stores, with associated DFS info.
3714 void NewGVN::convertClassToLoadsAndStores(
3715 const CongruenceClass
&Dense
,
3716 SmallVectorImpl
<ValueDFS
> &LoadsAndStores
) const {
3717 for (auto *D
: Dense
) {
3718 if (!isa
<LoadInst
>(D
) && !isa
<StoreInst
>(D
))
3721 BasicBlock
*BB
= getBlockForValue(D
);
3723 DomTreeNode
*DomNode
= DT
->getNode(BB
);
3724 VD
.DFSIn
= DomNode
->getDFSNumIn();
3725 VD
.DFSOut
= DomNode
->getDFSNumOut();
3726 VD
.Def
.setPointer(D
);
3728 // If it's an instruction, use the real local dfs number.
3729 if (auto *I
= dyn_cast
<Instruction
>(D
))
3730 VD
.LocalNum
= InstrToDFSNum(I
);
3732 llvm_unreachable("Should have been an instruction");
3734 LoadsAndStores
.emplace_back(VD
);
3738 static void patchAndReplaceAllUsesWith(Instruction
*I
, Value
*Repl
) {
3739 patchReplacementInstruction(I
, Repl
);
3740 I
->replaceAllUsesWith(Repl
);
3743 void NewGVN::deleteInstructionsInBlock(BasicBlock
*BB
) {
3744 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB
);
3745 ++NumGVNBlocksDeleted
;
3747 // Delete the instructions backwards, as it has a reduced likelihood of having
3748 // to update as many def-use and use-def chains. Start after the terminator.
3749 auto StartPoint
= BB
->rbegin();
3751 // Note that we explicitly recalculate BB->rend() on each iteration,
3752 // as it may change when we remove the first instruction.
3753 for (BasicBlock::reverse_iterator
I(StartPoint
); I
!= BB
->rend();) {
3754 Instruction
&Inst
= *I
++;
3755 if (!Inst
.use_empty())
3756 Inst
.replaceAllUsesWith(PoisonValue::get(Inst
.getType()));
3757 if (isa
<LandingPadInst
>(Inst
))
3759 salvageKnowledge(&Inst
, AC
);
3761 Inst
.eraseFromParent();
3762 ++NumGVNInstrDeleted
;
3764 // Now insert something that simplifycfg will turn into an unreachable.
3765 Type
*Int8Ty
= Type::getInt8Ty(BB
->getContext());
3767 PoisonValue::get(Int8Ty
),
3768 Constant::getNullValue(PointerType::getUnqual(BB
->getContext())),
3769 BB
->getTerminator()->getIterator());
3772 void NewGVN::markInstructionForDeletion(Instruction
*I
) {
3773 LLVM_DEBUG(dbgs() << "Marking " << *I
<< " for deletion\n");
3774 InstructionsToErase
.insert(I
);
3777 void NewGVN::replaceInstruction(Instruction
*I
, Value
*V
) {
3778 LLVM_DEBUG(dbgs() << "Replacing " << *I
<< " with " << *V
<< "\n");
3779 patchAndReplaceAllUsesWith(I
, V
);
3780 // We save the actual erasing to avoid invalidating memory
3781 // dependencies until we are done with everything.
3782 markInstructionForDeletion(I
);
3787 // This is a stack that contains both the value and dfs info of where
3788 // that value is valid.
3789 class ValueDFSStack
{
3791 Value
*back() const { return ValueStack
.back(); }
3792 std::pair
<int, int> dfs_back() const { return DFSStack
.back(); }
3794 void push_back(Value
*V
, int DFSIn
, int DFSOut
) {
3795 ValueStack
.emplace_back(V
);
3796 DFSStack
.emplace_back(DFSIn
, DFSOut
);
3799 bool empty() const { return DFSStack
.empty(); }
3801 bool isInScope(int DFSIn
, int DFSOut
) const {
3804 return DFSIn
>= DFSStack
.back().first
&& DFSOut
<= DFSStack
.back().second
;
3807 void popUntilDFSScope(int DFSIn
, int DFSOut
) {
3809 // These two should always be in sync at this point.
3810 assert(ValueStack
.size() == DFSStack
.size() &&
3811 "Mismatch between ValueStack and DFSStack");
3813 !DFSStack
.empty() &&
3814 !(DFSIn
>= DFSStack
.back().first
&& DFSOut
<= DFSStack
.back().second
)) {
3815 DFSStack
.pop_back();
3816 ValueStack
.pop_back();
3821 SmallVector
<Value
*, 8> ValueStack
;
3822 SmallVector
<std::pair
<int, int>, 8> DFSStack
;
3825 } // end anonymous namespace
3827 // Given an expression, get the congruence class for it.
3828 CongruenceClass
*NewGVN::getClassForExpression(const Expression
*E
) const {
3829 if (auto *VE
= dyn_cast
<VariableExpression
>(E
))
3830 return ValueToClass
.lookup(VE
->getVariableValue());
3831 else if (isa
<DeadExpression
>(E
))
3833 return ExpressionToClass
.lookup(E
);
3836 // Given a value and a basic block we are trying to see if it is available in,
3837 // see if the value has a leader available in that block.
3838 Value
*NewGVN::findPHIOfOpsLeader(const Expression
*E
,
3839 const Instruction
*OrigInst
,
3840 const BasicBlock
*BB
) const {
3841 // It would already be constant if we could make it constant
3842 if (auto *CE
= dyn_cast
<ConstantExpression
>(E
))
3843 return CE
->getConstantValue();
3844 if (auto *VE
= dyn_cast
<VariableExpression
>(E
)) {
3845 auto *V
= VE
->getVariableValue();
3846 if (alwaysAvailable(V
) || DT
->dominates(getBlockForValue(V
), BB
))
3847 return VE
->getVariableValue();
3850 auto *CC
= getClassForExpression(E
);
3853 if (alwaysAvailable(CC
->getLeader()))
3854 return CC
->getLeader();
3856 for (auto *Member
: *CC
) {
3857 auto *MemberInst
= dyn_cast
<Instruction
>(Member
);
3858 if (MemberInst
== OrigInst
)
3860 // Anything that isn't an instruction is always available.
3863 if (DT
->dominates(getBlockForValue(MemberInst
), BB
))
3869 bool NewGVN::eliminateInstructions(Function
&F
) {
3870 // This is a non-standard eliminator. The normal way to eliminate is
3871 // to walk the dominator tree in order, keeping track of available
3872 // values, and eliminating them. However, this is mildly
3873 // pointless. It requires doing lookups on every instruction,
3874 // regardless of whether we will ever eliminate it. For
3875 // instructions part of most singleton congruence classes, we know we
3876 // will never eliminate them.
3878 // Instead, this eliminator looks at the congruence classes directly, sorts
3879 // them into a DFS ordering of the dominator tree, and then we just
3880 // perform elimination straight on the sets by walking the congruence
3881 // class member uses in order, and eliminate the ones dominated by the
3882 // last member. This is worst case O(E log E) where E = number of
3883 // instructions in a single congruence class. In theory, this is all
3884 // instructions. In practice, it is much faster, as most instructions are
3885 // either in singleton congruence classes or can't possibly be eliminated
3886 // anyway (if there are no overlapping DFS ranges in class).
3887 // When we find something not dominated, it becomes the new leader
3888 // for elimination purposes.
3889 // TODO: If we wanted to be faster, We could remove any members with no
3890 // overlapping ranges while sorting, as we will never eliminate anything
3891 // with those members, as they don't dominate anything else in our set.
3893 bool AnythingReplaced
= false;
3895 // Since we are going to walk the domtree anyway, and we can't guarantee the
3896 // DFS numbers are updated, we compute some ourselves.
3897 DT
->updateDFSNumbers();
3899 // Go through all of our phi nodes, and kill the arguments associated with
3900 // unreachable edges.
3901 auto ReplaceUnreachablePHIArgs
= [&](PHINode
*PHI
, BasicBlock
*BB
) {
3902 for (auto &Operand
: PHI
->incoming_values())
3903 if (!ReachableEdges
.count({PHI
->getIncomingBlock(Operand
), BB
})) {
3904 LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI
3906 << getBlockName(PHI
->getIncomingBlock(Operand
))
3907 << " with poison due to it being unreachable\n");
3908 Operand
.set(PoisonValue::get(PHI
->getType()));
3911 // Replace unreachable phi arguments.
3912 // At this point, RevisitOnReachabilityChange only contains:
3915 // 2. Temporaries that will convert to PHIs
3916 // 3. Operations that are affected by an unreachable edge but do not fit into
3918 // So it is a slight overshoot of what we want. We could make it exact by
3919 // using two SparseBitVectors per block.
3920 DenseMap
<const BasicBlock
*, unsigned> ReachablePredCount
;
3921 for (auto &KV
: ReachableEdges
)
3922 ReachablePredCount
[KV
.getEnd()]++;
3923 for (auto &BBPair
: RevisitOnReachabilityChange
) {
3924 for (auto InstNum
: BBPair
.second
) {
3925 auto *Inst
= InstrFromDFSNum(InstNum
);
3926 auto *PHI
= dyn_cast
<PHINode
>(Inst
);
3927 PHI
= PHI
? PHI
: dyn_cast_or_null
<PHINode
>(RealToTemp
.lookup(Inst
));
3930 auto *BB
= BBPair
.first
;
3931 if (ReachablePredCount
.lookup(BB
) != PHI
->getNumIncomingValues())
3932 ReplaceUnreachablePHIArgs(PHI
, BB
);
3936 // Map to store the use counts
3937 DenseMap
<const Value
*, unsigned int> UseCounts
;
3938 for (auto *CC
: reverse(CongruenceClasses
)) {
3939 LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC
->getID()
3941 // Track the equivalent store info so we can decide whether to try
3942 // dead store elimination.
3943 SmallVector
<ValueDFS
, 8> PossibleDeadStores
;
3944 SmallPtrSet
<Instruction
*, 8> ProbablyDead
;
3945 if (CC
->isDead() || CC
->empty())
3947 // Everything still in the TOP class is unreachable or dead.
3948 if (CC
== TOPClass
) {
3949 for (auto *M
: *CC
) {
3950 auto *VTE
= ValueToExpression
.lookup(M
);
3951 if (VTE
&& isa
<DeadExpression
>(VTE
))
3952 markInstructionForDeletion(cast
<Instruction
>(M
));
3953 assert((!ReachableBlocks
.count(cast
<Instruction
>(M
)->getParent()) ||
3954 InstructionsToErase
.count(cast
<Instruction
>(M
))) &&
3955 "Everything in TOP should be unreachable or dead at this "
3961 assert(CC
->getLeader() && "We should have had a leader");
3962 // If this is a leader that is always available, and it's a
3963 // constant or has no equivalences, just replace everything with
3964 // it. We then update the congruence class with whatever members
3967 CC
->getStoredValue() ? CC
->getStoredValue() : CC
->getLeader();
3968 if (alwaysAvailable(Leader
)) {
3969 CongruenceClass::MemberSet MembersLeft
;
3970 for (auto *M
: *CC
) {
3972 // Void things have no uses we can replace.
3973 if (Member
== Leader
|| !isa
<Instruction
>(Member
) ||
3974 Member
->getType()->isVoidTy()) {
3975 MembersLeft
.insert(Member
);
3979 LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader
) << " for "
3980 << *Member
<< "\n");
3981 auto *I
= cast
<Instruction
>(Member
);
3982 assert(Leader
!= I
&& "About to accidentally remove our leader");
3983 replaceInstruction(I
, Leader
);
3984 AnythingReplaced
= true;
3986 CC
->swap(MembersLeft
);
3988 // If this is a singleton, we can skip it.
3989 if (CC
->size() != 1 || RealToTemp
.count(Leader
)) {
3990 // This is a stack because equality replacement/etc may place
3991 // constants in the middle of the member list, and we want to use
3992 // those constant values in preference to the current leader, over
3993 // the scope of those constants.
3994 ValueDFSStack EliminationStack
;
3996 // Convert the members to DFS ordered sets and then merge them.
3997 SmallVector
<ValueDFS
, 8> DFSOrderedSet
;
3998 convertClassToDFSOrdered(*CC
, DFSOrderedSet
, UseCounts
, ProbablyDead
);
4000 // Sort the whole thing.
4001 llvm::sort(DFSOrderedSet
);
4002 for (auto &VD
: DFSOrderedSet
) {
4003 int MemberDFSIn
= VD
.DFSIn
;
4004 int MemberDFSOut
= VD
.DFSOut
;
4005 Value
*Def
= VD
.Def
.getPointer();
4006 bool FromStore
= VD
.Def
.getInt();
4008 // We ignore void things because we can't get a value from them.
4009 if (Def
&& Def
->getType()->isVoidTy())
4011 auto *DefInst
= dyn_cast_or_null
<Instruction
>(Def
);
4012 if (DefInst
&& AllTempInstructions
.count(DefInst
)) {
4013 auto *PN
= cast
<PHINode
>(DefInst
);
4015 // If this is a value phi and that's the expression we used, insert
4016 // it into the program
4017 // remove from temp instruction list.
4018 AllTempInstructions
.erase(PN
);
4019 auto *DefBlock
= getBlockForValue(Def
);
4020 LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
4022 << getBlockName(getBlockForValue(Def
)) << "\n");
4023 PN
->insertBefore(&DefBlock
->front());
4025 NumGVNPHIOfOpsEliminations
++;
4028 if (EliminationStack
.empty()) {
4029 LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n");
4031 LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
4032 << EliminationStack
.dfs_back().first
<< ","
4033 << EliminationStack
.dfs_back().second
<< ")\n");
4036 LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn
<< ","
4037 << MemberDFSOut
<< ")\n");
4038 // First, we see if we are out of scope or empty. If so,
4039 // and there equivalences, we try to replace the top of
4040 // stack with equivalences (if it's on the stack, it must
4041 // not have been eliminated yet).
4042 // Then we synchronize to our current scope, by
4043 // popping until we are back within a DFS scope that
4044 // dominates the current member.
4045 // Then, what happens depends on a few factors
4046 // If the stack is now empty, we need to push
4047 // If we have a constant or a local equivalence we want to
4048 // start using, we also push.
4049 // Otherwise, we walk along, processing members who are
4050 // dominated by this scope, and eliminate them.
4051 bool ShouldPush
= Def
&& EliminationStack
.empty();
4053 !EliminationStack
.isInScope(MemberDFSIn
, MemberDFSOut
);
4055 if (OutOfScope
|| ShouldPush
) {
4056 // Sync to our current scope.
4057 EliminationStack
.popUntilDFSScope(MemberDFSIn
, MemberDFSOut
);
4058 bool ShouldPush
= Def
&& EliminationStack
.empty();
4060 EliminationStack
.push_back(Def
, MemberDFSIn
, MemberDFSOut
);
4064 // Skip the Def's, we only want to eliminate on their uses. But mark
4065 // dominated defs as dead.
4067 // For anything in this case, what and how we value number
4068 // guarantees that any side-effects that would have occurred (ie
4069 // throwing, etc) can be proven to either still occur (because it's
4070 // dominated by something that has the same side-effects), or never
4071 // occur. Otherwise, we would not have been able to prove it value
4072 // equivalent to something else. For these things, we can just mark
4073 // it all dead. Note that this is different from the "ProbablyDead"
4074 // set, which may not be dominated by anything, and thus, are only
4075 // easy to prove dead if they are also side-effect free. Note that
4076 // because stores are put in terms of the stored value, we skip
4077 // stored values here. If the stored value is really dead, it will
4078 // still be marked for deletion when we process it in its own class.
4079 auto *DefI
= dyn_cast
<Instruction
>(Def
);
4080 if (!EliminationStack
.empty() && DefI
&& !FromStore
) {
4081 Value
*DominatingLeader
= EliminationStack
.back();
4082 if (DominatingLeader
!= Def
) {
4083 // Even if the instruction is removed, we still need to update
4084 // flags/metadata due to downstreams users of the leader.
4085 if (!match(DefI
, m_Intrinsic
<Intrinsic::ssa_copy
>()))
4086 patchReplacementInstruction(DefI
, DominatingLeader
);
4088 markInstructionForDeletion(DefI
);
4093 // At this point, we know it is a Use we are trying to possibly
4096 assert(isa
<Instruction
>(U
->get()) &&
4097 "Current def should have been an instruction");
4098 assert(isa
<Instruction
>(U
->getUser()) &&
4099 "Current user should have been an instruction");
4101 // If the thing we are replacing into is already marked to be dead,
4102 // this use is dead. Note that this is true regardless of whether
4103 // we have anything dominating the use or not. We do this here
4104 // because we are already walking all the uses anyway.
4105 Instruction
*InstUse
= cast
<Instruction
>(U
->getUser());
4106 if (InstructionsToErase
.count(InstUse
)) {
4107 auto &UseCount
= UseCounts
[U
->get()];
4108 if (--UseCount
== 0) {
4109 ProbablyDead
.insert(cast
<Instruction
>(U
->get()));
4113 // If we get to this point, and the stack is empty we must have a use
4114 // with nothing we can use to eliminate this use, so just skip it.
4115 if (EliminationStack
.empty())
4118 Value
*DominatingLeader
= EliminationStack
.back();
4120 auto *II
= dyn_cast
<IntrinsicInst
>(DominatingLeader
);
4121 bool isSSACopy
= II
&& II
->getIntrinsicID() == Intrinsic::ssa_copy
;
4123 DominatingLeader
= II
->getOperand(0);
4125 // Don't replace our existing users with ourselves.
4126 if (U
->get() == DominatingLeader
)
4129 // If we replaced something in an instruction, handle the patching of
4130 // metadata. Skip this if we are replacing predicateinfo with its
4131 // original operand, as we already know we can just drop it.
4132 auto *ReplacedInst
= cast
<Instruction
>(U
->get());
4133 auto *PI
= PredInfo
->getPredicateInfoFor(ReplacedInst
);
4134 if (!PI
|| DominatingLeader
!= PI
->OriginalOp
)
4135 patchReplacementInstruction(ReplacedInst
, DominatingLeader
);
4138 << "Found replacement " << *DominatingLeader
<< " for "
4139 << *U
->get() << " in " << *(U
->getUser()) << "\n");
4140 U
->set(DominatingLeader
);
4141 // This is now a use of the dominating leader, which means if the
4142 // dominating leader was dead, it's now live!
4143 auto &LeaderUseCount
= UseCounts
[DominatingLeader
];
4144 // It's about to be alive again.
4145 if (LeaderUseCount
== 0 && isa
<Instruction
>(DominatingLeader
))
4146 ProbablyDead
.erase(cast
<Instruction
>(DominatingLeader
));
4147 // For copy instructions, we use their operand as a leader,
4148 // which means we remove a user of the copy and it may become dead.
4150 auto It
= UseCounts
.find(II
);
4151 if (It
!= UseCounts
.end()) {
4152 unsigned &IIUseCount
= It
->second
;
4153 if (--IIUseCount
== 0)
4154 ProbablyDead
.insert(II
);
4158 AnythingReplaced
= true;
4163 // At this point, anything still in the ProbablyDead set is actually dead if
4164 // would be trivially dead.
4165 for (auto *I
: ProbablyDead
)
4166 if (wouldInstructionBeTriviallyDead(I
))
4167 markInstructionForDeletion(I
);
4169 // Cleanup the congruence class.
4170 CongruenceClass::MemberSet MembersLeft
;
4171 for (auto *Member
: *CC
)
4172 if (!isa
<Instruction
>(Member
) ||
4173 !InstructionsToErase
.count(cast
<Instruction
>(Member
)))
4174 MembersLeft
.insert(Member
);
4175 CC
->swap(MembersLeft
);
4177 // If we have possible dead stores to look at, try to eliminate them.
4178 if (CC
->getStoreCount() > 0) {
4179 convertClassToLoadsAndStores(*CC
, PossibleDeadStores
);
4180 llvm::sort(PossibleDeadStores
);
4181 ValueDFSStack EliminationStack
;
4182 for (auto &VD
: PossibleDeadStores
) {
4183 int MemberDFSIn
= VD
.DFSIn
;
4184 int MemberDFSOut
= VD
.DFSOut
;
4185 Instruction
*Member
= cast
<Instruction
>(VD
.Def
.getPointer());
4186 if (EliminationStack
.empty() ||
4187 !EliminationStack
.isInScope(MemberDFSIn
, MemberDFSOut
)) {
4188 // Sync to our current scope.
4189 EliminationStack
.popUntilDFSScope(MemberDFSIn
, MemberDFSOut
);
4190 if (EliminationStack
.empty()) {
4191 EliminationStack
.push_back(Member
, MemberDFSIn
, MemberDFSOut
);
4195 // We already did load elimination, so nothing to do here.
4196 if (isa
<LoadInst
>(Member
))
4198 assert(!EliminationStack
.empty());
4199 Instruction
*Leader
= cast
<Instruction
>(EliminationStack
.back());
4201 assert(DT
->dominates(Leader
->getParent(), Member
->getParent()));
4202 // Member is dominater by Leader, and thus dead
4203 LLVM_DEBUG(dbgs() << "Marking dead store " << *Member
4204 << " that is dominated by " << *Leader
<< "\n");
4205 markInstructionForDeletion(Member
);
4211 return AnythingReplaced
;
4214 // This function provides global ranking of operations so that we can place them
4215 // in a canonical order. Note that rank alone is not necessarily enough for a
4216 // complete ordering, as constants all have the same rank. However, generally,
4217 // we will simplify an operation with all constants so that it doesn't matter
4218 // what order they appear in.
4219 unsigned int NewGVN::getRank(const Value
*V
) const {
4220 // Prefer constants to undef to anything else
4221 // Undef is a constant, have to check it first.
4222 // Prefer poison to undef as it's less defined.
4223 // Prefer smaller constants to constantexprs
4224 // Note that the order here matters because of class inheritance
4225 if (isa
<ConstantExpr
>(V
))
4227 if (isa
<PoisonValue
>(V
))
4229 if (isa
<UndefValue
>(V
))
4231 if (isa
<Constant
>(V
))
4233 if (auto *A
= dyn_cast
<Argument
>(V
))
4234 return 4 + A
->getArgNo();
4236 // Need to shift the instruction DFS by number of arguments + 5 to account for
4237 // the constant and argument ranking above.
4238 unsigned Result
= InstrToDFSNum(V
);
4240 return 5 + NumFuncArgs
+ Result
;
4241 // Unreachable or something else, just return a really large number.
4245 // This is a function that says whether two commutative operations should
4246 // have their order swapped when canonicalizing.
4247 bool NewGVN::shouldSwapOperands(const Value
*A
, const Value
*B
) const {
4248 // Because we only care about a total ordering, and don't rewrite expressions
4249 // in this order, we order by rank, which will give a strict weak ordering to
4250 // everything but constants, and then we order by pointer address.
4251 return std::make_pair(getRank(A
), A
) > std::make_pair(getRank(B
), B
);
4254 bool NewGVN::shouldSwapOperandsForIntrinsic(const Value
*A
, const Value
*B
,
4255 const IntrinsicInst
*I
) const {
4256 auto LookupResult
= IntrinsicInstPred
.find(I
);
4257 if (shouldSwapOperands(A
, B
)) {
4258 if (LookupResult
== IntrinsicInstPred
.end())
4259 IntrinsicInstPred
.insert({I
, B
});
4261 LookupResult
->second
= B
;
4265 if (LookupResult
!= IntrinsicInstPred
.end()) {
4266 auto *SeenPredicate
= LookupResult
->second
;
4267 if (SeenPredicate
) {
4268 if (SeenPredicate
== B
)
4271 LookupResult
->second
= nullptr;
4277 PreservedAnalyses
NewGVNPass::run(Function
&F
, AnalysisManager
<Function
> &AM
) {
4278 // Apparently the order in which we get these results matter for
4279 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep
4280 // the same order here, just in case.
4281 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
4282 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
4283 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
4284 auto &AA
= AM
.getResult
<AAManager
>(F
);
4285 auto &MSSA
= AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA();
4287 NewGVN(F
, &DT
, &AC
, &TLI
, &AA
, &MSSA
, F
.getDataLayout())
4290 return PreservedAnalyses::all();
4291 PreservedAnalyses PA
;
4292 PA
.preserve
<DominatorTreeAnalysis
>();