[InstCombine] Signed saturation patterns
[llvm-complete.git] / lib / Transforms / Scalar / NewGVN.cpp
blobb213264de557eec4e5c1ef1cd97802740cc3d26b
1 //===- NewGVN.cpp - Global Value Numbering Pass ---------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file implements the new LLVM's Global Value Numbering pass.
11 /// GVN partitions values computed by a function into congruence classes.
12 /// Values ending up in the same congruence class are guaranteed to be the same
13 /// for every execution of the program. In that respect, congruency is a
14 /// compile-time approximation of equivalence of values at runtime.
15 /// The algorithm implemented here uses a sparse formulation and it's based
16 /// on the ideas described in the paper:
17 /// "A Sparse Algorithm for Predicated Global Value Numbering" from
18 /// Karthik Gargi.
19 ///
20 /// A brief overview of the algorithm: The algorithm is essentially the same as
21 /// the standard RPO value numbering algorithm (a good reference is the paper
22 /// "SCC based value numbering" by L. Taylor Simpson) with one major difference:
23 /// The RPO algorithm proceeds, on every iteration, to process every reachable
24 /// block and every instruction in that block. This is because the standard RPO
25 /// algorithm does not track what things have the same value number, it only
26 /// tracks what the value number of a given operation is (the mapping is
27 /// operation -> value number). Thus, when a value number of an operation
28 /// changes, it must reprocess everything to ensure all uses of a value number
29 /// get updated properly. In constrast, the sparse algorithm we use *also*
30 /// tracks what operations have a given value number (IE it also tracks the
31 /// reverse mapping from value number -> operations with that value number), so
32 /// that it only needs to reprocess the instructions that are affected when
33 /// something's value number changes. The vast majority of complexity and code
34 /// in this file is devoted to tracking what value numbers could change for what
35 /// instructions when various things happen. The rest of the algorithm is
36 /// devoted to performing symbolic evaluation, forward propagation, and
37 /// simplification of operations based on the value numbers deduced so far
38 ///
39 /// In order to make the GVN mostly-complete, we use a technique derived from
40 /// "Detection of Redundant Expressions: A Complete and Polynomial-time
41 /// Algorithm in SSA" by R.R. Pai. The source of incompleteness in most SSA
42 /// based GVN algorithms is related to their inability to detect equivalence
43 /// between phi of ops (IE phi(a+b, c+d)) and op of phis (phi(a,c) + phi(b, d)).
44 /// We resolve this issue by generating the equivalent "phi of ops" form for
45 /// each op of phis we see, in a way that only takes polynomial time to resolve.
46 ///
47 /// We also do not perform elimination by using any published algorithm. All
48 /// published algorithms are O(Instructions). Instead, we use a technique that
49 /// is O(number of operations with the same value number), enabling us to skip
50 /// trying to eliminate things that have unique value numbers.
52 //===----------------------------------------------------------------------===//
54 #include "llvm/Transforms/Scalar/NewGVN.h"
55 #include "llvm/ADT/ArrayRef.h"
56 #include "llvm/ADT/BitVector.h"
57 #include "llvm/ADT/DenseMap.h"
58 #include "llvm/ADT/DenseMapInfo.h"
59 #include "llvm/ADT/DenseSet.h"
60 #include "llvm/ADT/DepthFirstIterator.h"
61 #include "llvm/ADT/GraphTraits.h"
62 #include "llvm/ADT/Hashing.h"
63 #include "llvm/ADT/PointerIntPair.h"
64 #include "llvm/ADT/PostOrderIterator.h"
65 #include "llvm/ADT/SmallPtrSet.h"
66 #include "llvm/ADT/SmallVector.h"
67 #include "llvm/ADT/SparseBitVector.h"
68 #include "llvm/ADT/Statistic.h"
69 #include "llvm/ADT/iterator_range.h"
70 #include "llvm/Analysis/AliasAnalysis.h"
71 #include "llvm/Analysis/AssumptionCache.h"
72 #include "llvm/Analysis/CFGPrinter.h"
73 #include "llvm/Analysis/ConstantFolding.h"
74 #include "llvm/Analysis/GlobalsModRef.h"
75 #include "llvm/Analysis/InstructionSimplify.h"
76 #include "llvm/Analysis/MemoryBuiltins.h"
77 #include "llvm/Analysis/MemorySSA.h"
78 #include "llvm/Analysis/TargetLibraryInfo.h"
79 #include "llvm/Transforms/Utils/Local.h"
80 #include "llvm/IR/Argument.h"
81 #include "llvm/IR/BasicBlock.h"
82 #include "llvm/IR/Constant.h"
83 #include "llvm/IR/Constants.h"
84 #include "llvm/IR/Dominators.h"
85 #include "llvm/IR/Function.h"
86 #include "llvm/IR/InstrTypes.h"
87 #include "llvm/IR/Instruction.h"
88 #include "llvm/IR/Instructions.h"
89 #include "llvm/IR/IntrinsicInst.h"
90 #include "llvm/IR/Intrinsics.h"
91 #include "llvm/IR/LLVMContext.h"
92 #include "llvm/IR/PatternMatch.h"
93 #include "llvm/IR/Type.h"
94 #include "llvm/IR/Use.h"
95 #include "llvm/IR/User.h"
96 #include "llvm/IR/Value.h"
97 #include "llvm/Pass.h"
98 #include "llvm/Support/Allocator.h"
99 #include "llvm/Support/ArrayRecycler.h"
100 #include "llvm/Support/Casting.h"
101 #include "llvm/Support/CommandLine.h"
102 #include "llvm/Support/Debug.h"
103 #include "llvm/Support/DebugCounter.h"
104 #include "llvm/Support/ErrorHandling.h"
105 #include "llvm/Support/PointerLikeTypeTraits.h"
106 #include "llvm/Support/raw_ostream.h"
107 #include "llvm/Transforms/Scalar.h"
108 #include "llvm/Transforms/Scalar/GVNExpression.h"
109 #include "llvm/Transforms/Utils/PredicateInfo.h"
110 #include "llvm/Transforms/Utils/VNCoercion.h"
111 #include <algorithm>
112 #include <cassert>
113 #include <cstdint>
114 #include <iterator>
115 #include <map>
116 #include <memory>
117 #include <set>
118 #include <string>
119 #include <tuple>
120 #include <utility>
121 #include <vector>
123 using namespace llvm;
124 using namespace llvm::GVNExpression;
125 using namespace llvm::VNCoercion;
126 using namespace llvm::PatternMatch;
128 #define DEBUG_TYPE "newgvn"
130 STATISTIC(NumGVNInstrDeleted, "Number of instructions deleted");
131 STATISTIC(NumGVNBlocksDeleted, "Number of blocks deleted");
132 STATISTIC(NumGVNOpsSimplified, "Number of Expressions simplified");
133 STATISTIC(NumGVNPhisAllSame, "Number of PHIs whos arguments are all the same");
134 STATISTIC(NumGVNMaxIterations,
135 "Maximum Number of iterations it took to converge GVN");
136 STATISTIC(NumGVNLeaderChanges, "Number of leader changes");
137 STATISTIC(NumGVNSortedLeaderChanges, "Number of sorted leader changes");
138 STATISTIC(NumGVNAvoidedSortedLeaderChanges,
139 "Number of avoided sorted leader changes");
140 STATISTIC(NumGVNDeadStores, "Number of redundant/dead stores eliminated");
141 STATISTIC(NumGVNPHIOfOpsCreated, "Number of PHI of ops created");
142 STATISTIC(NumGVNPHIOfOpsEliminations,
143 "Number of things eliminated using PHI of ops");
144 DEBUG_COUNTER(VNCounter, "newgvn-vn",
145 "Controls which instructions are value numbered");
146 DEBUG_COUNTER(PHIOfOpsCounter, "newgvn-phi",
147 "Controls which instructions we create phi of ops for");
148 // Currently store defining access refinement is too slow due to basicaa being
149 // egregiously slow. This flag lets us keep it working while we work on this
150 // issue.
151 static cl::opt<bool> EnableStoreRefinement("enable-store-refinement",
152 cl::init(false), cl::Hidden);
154 /// Currently, the generation "phi of ops" can result in correctness issues.
155 static cl::opt<bool> EnablePhiOfOps("enable-phi-of-ops", cl::init(true),
156 cl::Hidden);
158 //===----------------------------------------------------------------------===//
159 // GVN Pass
160 //===----------------------------------------------------------------------===//
162 // Anchor methods.
163 namespace llvm {
164 namespace GVNExpression {
166 Expression::~Expression() = default;
167 BasicExpression::~BasicExpression() = default;
168 CallExpression::~CallExpression() = default;
169 LoadExpression::~LoadExpression() = default;
170 StoreExpression::~StoreExpression() = default;
171 AggregateValueExpression::~AggregateValueExpression() = default;
172 PHIExpression::~PHIExpression() = default;
174 } // end namespace GVNExpression
175 } // end namespace llvm
177 namespace {
179 // Tarjan's SCC finding algorithm with Nuutila's improvements
180 // SCCIterator is actually fairly complex for the simple thing we want.
181 // It also wants to hand us SCC's that are unrelated to the phi node we ask
182 // about, and have us process them there or risk redoing work.
183 // Graph traits over a filter iterator also doesn't work that well here.
184 // This SCC finder is specialized to walk use-def chains, and only follows
185 // instructions,
186 // not generic values (arguments, etc).
187 struct TarjanSCC {
188 TarjanSCC() : Components(1) {}
190 void Start(const Instruction *Start) {
191 if (Root.lookup(Start) == 0)
192 FindSCC(Start);
195 const SmallPtrSetImpl<const Value *> &getComponentFor(const Value *V) const {
196 unsigned ComponentID = ValueToComponent.lookup(V);
198 assert(ComponentID > 0 &&
199 "Asking for a component for a value we never processed");
200 return Components[ComponentID];
203 private:
204 void FindSCC(const Instruction *I) {
205 Root[I] = ++DFSNum;
206 // Store the DFS Number we had before it possibly gets incremented.
207 unsigned int OurDFS = DFSNum;
208 for (auto &Op : I->operands()) {
209 if (auto *InstOp = dyn_cast<Instruction>(Op)) {
210 if (Root.lookup(Op) == 0)
211 FindSCC(InstOp);
212 if (!InComponent.count(Op))
213 Root[I] = std::min(Root.lookup(I), Root.lookup(Op));
216 // See if we really were the root of a component, by seeing if we still have
217 // our DFSNumber. If we do, we are the root of the component, and we have
218 // completed a component. If we do not, we are not the root of a component,
219 // and belong on the component stack.
220 if (Root.lookup(I) == OurDFS) {
221 unsigned ComponentID = Components.size();
222 Components.resize(Components.size() + 1);
223 auto &Component = Components.back();
224 Component.insert(I);
225 LLVM_DEBUG(dbgs() << "Component root is " << *I << "\n");
226 InComponent.insert(I);
227 ValueToComponent[I] = ComponentID;
228 // Pop a component off the stack and label it.
229 while (!Stack.empty() && Root.lookup(Stack.back()) >= OurDFS) {
230 auto *Member = Stack.back();
231 LLVM_DEBUG(dbgs() << "Component member is " << *Member << "\n");
232 Component.insert(Member);
233 InComponent.insert(Member);
234 ValueToComponent[Member] = ComponentID;
235 Stack.pop_back();
237 } else {
238 // Part of a component, push to stack
239 Stack.push_back(I);
243 unsigned int DFSNum = 1;
244 SmallPtrSet<const Value *, 8> InComponent;
245 DenseMap<const Value *, unsigned int> Root;
246 SmallVector<const Value *, 8> Stack;
248 // Store the components as vector of ptr sets, because we need the topo order
249 // of SCC's, but not individual member order
250 SmallVector<SmallPtrSet<const Value *, 8>, 8> Components;
252 DenseMap<const Value *, unsigned> ValueToComponent;
255 // Congruence classes represent the set of expressions/instructions
256 // that are all the same *during some scope in the function*.
257 // That is, because of the way we perform equality propagation, and
258 // because of memory value numbering, it is not correct to assume
259 // you can willy-nilly replace any member with any other at any
260 // point in the function.
262 // For any Value in the Member set, it is valid to replace any dominated member
263 // with that Value.
265 // Every congruence class has a leader, and the leader is used to symbolize
266 // instructions in a canonical way (IE every operand of an instruction that is a
267 // member of the same congruence class will always be replaced with leader
268 // during symbolization). To simplify symbolization, we keep the leader as a
269 // constant if class can be proved to be a constant value. Otherwise, the
270 // leader is the member of the value set with the smallest DFS number. Each
271 // congruence class also has a defining expression, though the expression may be
272 // null. If it exists, it can be used for forward propagation and reassociation
273 // of values.
275 // For memory, we also track a representative MemoryAccess, and a set of memory
276 // members for MemoryPhis (which have no real instructions). Note that for
277 // memory, it seems tempting to try to split the memory members into a
278 // MemoryCongruenceClass or something. Unfortunately, this does not work
279 // easily. The value numbering of a given memory expression depends on the
280 // leader of the memory congruence class, and the leader of memory congruence
281 // class depends on the value numbering of a given memory expression. This
282 // leads to wasted propagation, and in some cases, missed optimization. For
283 // example: If we had value numbered two stores together before, but now do not,
284 // we move them to a new value congruence class. This in turn will move at one
285 // of the memorydefs to a new memory congruence class. Which in turn, affects
286 // the value numbering of the stores we just value numbered (because the memory
287 // congruence class is part of the value number). So while theoretically
288 // possible to split them up, it turns out to be *incredibly* complicated to get
289 // it to work right, because of the interdependency. While structurally
290 // slightly messier, it is algorithmically much simpler and faster to do what we
291 // do here, and track them both at once in the same class.
292 // Note: The default iterators for this class iterate over values
293 class CongruenceClass {
294 public:
295 using MemberType = Value;
296 using MemberSet = SmallPtrSet<MemberType *, 4>;
297 using MemoryMemberType = MemoryPhi;
298 using MemoryMemberSet = SmallPtrSet<const MemoryMemberType *, 2>;
300 explicit CongruenceClass(unsigned ID) : ID(ID) {}
301 CongruenceClass(unsigned ID, Value *Leader, const Expression *E)
302 : ID(ID), RepLeader(Leader), DefiningExpr(E) {}
304 unsigned getID() const { return ID; }
306 // True if this class has no members left. This is mainly used for assertion
307 // purposes, and for skipping empty classes.
308 bool isDead() const {
309 // If it's both dead from a value perspective, and dead from a memory
310 // perspective, it's really dead.
311 return empty() && memory_empty();
314 // Leader functions
315 Value *getLeader() const { return RepLeader; }
316 void setLeader(Value *Leader) { RepLeader = Leader; }
317 const std::pair<Value *, unsigned int> &getNextLeader() const {
318 return NextLeader;
320 void resetNextLeader() { NextLeader = {nullptr, ~0}; }
321 void addPossibleNextLeader(std::pair<Value *, unsigned int> LeaderPair) {
322 if (LeaderPair.second < NextLeader.second)
323 NextLeader = LeaderPair;
326 Value *getStoredValue() const { return RepStoredValue; }
327 void setStoredValue(Value *Leader) { RepStoredValue = Leader; }
328 const MemoryAccess *getMemoryLeader() const { return RepMemoryAccess; }
329 void setMemoryLeader(const MemoryAccess *Leader) { RepMemoryAccess = Leader; }
331 // Forward propagation info
332 const Expression *getDefiningExpr() const { return DefiningExpr; }
334 // Value member set
335 bool empty() const { return Members.empty(); }
336 unsigned size() const { return Members.size(); }
337 MemberSet::const_iterator begin() const { return Members.begin(); }
338 MemberSet::const_iterator end() const { return Members.end(); }
339 void insert(MemberType *M) { Members.insert(M); }
340 void erase(MemberType *M) { Members.erase(M); }
341 void swap(MemberSet &Other) { Members.swap(Other); }
343 // Memory member set
344 bool memory_empty() const { return MemoryMembers.empty(); }
345 unsigned memory_size() const { return MemoryMembers.size(); }
346 MemoryMemberSet::const_iterator memory_begin() const {
347 return MemoryMembers.begin();
349 MemoryMemberSet::const_iterator memory_end() const {
350 return MemoryMembers.end();
352 iterator_range<MemoryMemberSet::const_iterator> memory() const {
353 return make_range(memory_begin(), memory_end());
356 void memory_insert(const MemoryMemberType *M) { MemoryMembers.insert(M); }
357 void memory_erase(const MemoryMemberType *M) { MemoryMembers.erase(M); }
359 // Store count
360 unsigned getStoreCount() const { return StoreCount; }
361 void incStoreCount() { ++StoreCount; }
362 void decStoreCount() {
363 assert(StoreCount != 0 && "Store count went negative");
364 --StoreCount;
367 // True if this class has no memory members.
368 bool definesNoMemory() const { return StoreCount == 0 && memory_empty(); }
370 // Return true if two congruence classes are equivalent to each other. This
371 // means that every field but the ID number and the dead field are equivalent.
372 bool isEquivalentTo(const CongruenceClass *Other) const {
373 if (!Other)
374 return false;
375 if (this == Other)
376 return true;
378 if (std::tie(StoreCount, RepLeader, RepStoredValue, RepMemoryAccess) !=
379 std::tie(Other->StoreCount, Other->RepLeader, Other->RepStoredValue,
380 Other->RepMemoryAccess))
381 return false;
382 if (DefiningExpr != Other->DefiningExpr)
383 if (!DefiningExpr || !Other->DefiningExpr ||
384 *DefiningExpr != *Other->DefiningExpr)
385 return false;
387 if (Members.size() != Other->Members.size())
388 return false;
390 return all_of(Members,
391 [&](const Value *V) { return Other->Members.count(V); });
394 private:
395 unsigned ID;
397 // Representative leader.
398 Value *RepLeader = nullptr;
400 // The most dominating leader after our current leader, because the member set
401 // is not sorted and is expensive to keep sorted all the time.
402 std::pair<Value *, unsigned int> NextLeader = {nullptr, ~0U};
404 // If this is represented by a store, the value of the store.
405 Value *RepStoredValue = nullptr;
407 // If this class contains MemoryDefs or MemoryPhis, this is the leading memory
408 // access.
409 const MemoryAccess *RepMemoryAccess = nullptr;
411 // Defining Expression.
412 const Expression *DefiningExpr = nullptr;
414 // Actual members of this class.
415 MemberSet Members;
417 // This is the set of MemoryPhis that exist in the class. MemoryDefs and
418 // MemoryUses have real instructions representing them, so we only need to
419 // track MemoryPhis here.
420 MemoryMemberSet MemoryMembers;
422 // Number of stores in this congruence class.
423 // This is used so we can detect store equivalence changes properly.
424 int StoreCount = 0;
427 } // end anonymous namespace
429 namespace llvm {
431 struct ExactEqualsExpression {
432 const Expression &E;
434 explicit ExactEqualsExpression(const Expression &E) : E(E) {}
436 hash_code getComputedHash() const { return E.getComputedHash(); }
438 bool operator==(const Expression &Other) const {
439 return E.exactlyEquals(Other);
443 template <> struct DenseMapInfo<const Expression *> {
444 static const Expression *getEmptyKey() {
445 auto Val = static_cast<uintptr_t>(-1);
446 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
447 return reinterpret_cast<const Expression *>(Val);
450 static const Expression *getTombstoneKey() {
451 auto Val = static_cast<uintptr_t>(~1U);
452 Val <<= PointerLikeTypeTraits<const Expression *>::NumLowBitsAvailable;
453 return reinterpret_cast<const Expression *>(Val);
456 static unsigned getHashValue(const Expression *E) {
457 return E->getComputedHash();
460 static unsigned getHashValue(const ExactEqualsExpression &E) {
461 return E.getComputedHash();
464 static bool isEqual(const ExactEqualsExpression &LHS, const Expression *RHS) {
465 if (RHS == getTombstoneKey() || RHS == getEmptyKey())
466 return false;
467 return LHS == *RHS;
470 static bool isEqual(const Expression *LHS, const Expression *RHS) {
471 if (LHS == RHS)
472 return true;
473 if (LHS == getTombstoneKey() || RHS == getTombstoneKey() ||
474 LHS == getEmptyKey() || RHS == getEmptyKey())
475 return false;
476 // Compare hashes before equality. This is *not* what the hashtable does,
477 // since it is computing it modulo the number of buckets, whereas we are
478 // using the full hash keyspace. Since the hashes are precomputed, this
479 // check is *much* faster than equality.
480 if (LHS->getComputedHash() != RHS->getComputedHash())
481 return false;
482 return *LHS == *RHS;
486 } // end namespace llvm
488 namespace {
490 class NewGVN {
491 Function &F;
492 DominatorTree *DT;
493 const TargetLibraryInfo *TLI;
494 AliasAnalysis *AA;
495 MemorySSA *MSSA;
496 MemorySSAWalker *MSSAWalker;
497 const DataLayout &DL;
498 std::unique_ptr<PredicateInfo> PredInfo;
500 // These are the only two things the create* functions should have
501 // side-effects on due to allocating memory.
502 mutable BumpPtrAllocator ExpressionAllocator;
503 mutable ArrayRecycler<Value *> ArgRecycler;
504 mutable TarjanSCC SCCFinder;
505 const SimplifyQuery SQ;
507 // Number of function arguments, used by ranking
508 unsigned int NumFuncArgs;
510 // RPOOrdering of basic blocks
511 DenseMap<const DomTreeNode *, unsigned> RPOOrdering;
513 // Congruence class info.
515 // This class is called INITIAL in the paper. It is the class everything
516 // startsout in, and represents any value. Being an optimistic analysis,
517 // anything in the TOP class has the value TOP, which is indeterminate and
518 // equivalent to everything.
519 CongruenceClass *TOPClass;
520 std::vector<CongruenceClass *> CongruenceClasses;
521 unsigned NextCongruenceNum;
523 // Value Mappings.
524 DenseMap<Value *, CongruenceClass *> ValueToClass;
525 DenseMap<Value *, const Expression *> ValueToExpression;
527 // Value PHI handling, used to make equivalence between phi(op, op) and
528 // op(phi, phi).
529 // These mappings just store various data that would normally be part of the
530 // IR.
531 SmallPtrSet<const Instruction *, 8> PHINodeUses;
533 DenseMap<const Value *, bool> OpSafeForPHIOfOps;
535 // Map a temporary instruction we created to a parent block.
536 DenseMap<const Value *, BasicBlock *> TempToBlock;
538 // Map between the already in-program instructions and the temporary phis we
539 // created that they are known equivalent to.
540 DenseMap<const Value *, PHINode *> RealToTemp;
542 // In order to know when we should re-process instructions that have
543 // phi-of-ops, we track the set of expressions that they needed as
544 // leaders. When we discover new leaders for those expressions, we process the
545 // associated phi-of-op instructions again in case they have changed. The
546 // other way they may change is if they had leaders, and those leaders
547 // disappear. However, at the point they have leaders, there are uses of the
548 // relevant operands in the created phi node, and so they will get reprocessed
549 // through the normal user marking we perform.
550 mutable DenseMap<const Value *, SmallPtrSet<Value *, 2>> AdditionalUsers;
551 DenseMap<const Expression *, SmallPtrSet<Instruction *, 2>>
552 ExpressionToPhiOfOps;
554 // Map from temporary operation to MemoryAccess.
555 DenseMap<const Instruction *, MemoryUseOrDef *> TempToMemory;
557 // Set of all temporary instructions we created.
558 // Note: This will include instructions that were just created during value
559 // numbering. The way to test if something is using them is to check
560 // RealToTemp.
561 DenseSet<Instruction *> AllTempInstructions;
563 // This is the set of instructions to revisit on a reachability change. At
564 // the end of the main iteration loop it will contain at least all the phi of
565 // ops instructions that will be changed to phis, as well as regular phis.
566 // During the iteration loop, it may contain other things, such as phi of ops
567 // instructions that used edge reachability to reach a result, and so need to
568 // be revisited when the edge changes, independent of whether the phi they
569 // depended on changes.
570 DenseMap<BasicBlock *, SparseBitVector<>> RevisitOnReachabilityChange;
572 // Mapping from predicate info we used to the instructions we used it with.
573 // In order to correctly ensure propagation, we must keep track of what
574 // comparisons we used, so that when the values of the comparisons change, we
575 // propagate the information to the places we used the comparison.
576 mutable DenseMap<const Value *, SmallPtrSet<Instruction *, 2>>
577 PredicateToUsers;
579 // the same reasoning as PredicateToUsers. When we skip MemoryAccesses for
580 // stores, we no longer can rely solely on the def-use chains of MemorySSA.
581 mutable DenseMap<const MemoryAccess *, SmallPtrSet<MemoryAccess *, 2>>
582 MemoryToUsers;
584 // A table storing which memorydefs/phis represent a memory state provably
585 // equivalent to another memory state.
586 // We could use the congruence class machinery, but the MemoryAccess's are
587 // abstract memory states, so they can only ever be equivalent to each other,
588 // and not to constants, etc.
589 DenseMap<const MemoryAccess *, CongruenceClass *> MemoryAccessToClass;
591 // We could, if we wanted, build MemoryPhiExpressions and
592 // MemoryVariableExpressions, etc, and value number them the same way we value
593 // number phi expressions. For the moment, this seems like overkill. They
594 // can only exist in one of three states: they can be TOP (equal to
595 // everything), Equivalent to something else, or unique. Because we do not
596 // create expressions for them, we need to simulate leader change not just
597 // when they change class, but when they change state. Note: We can do the
598 // same thing for phis, and avoid having phi expressions if we wanted, We
599 // should eventually unify in one direction or the other, so this is a little
600 // bit of an experiment in which turns out easier to maintain.
601 enum MemoryPhiState { MPS_Invalid, MPS_TOP, MPS_Equivalent, MPS_Unique };
602 DenseMap<const MemoryPhi *, MemoryPhiState> MemoryPhiState;
604 enum InstCycleState { ICS_Unknown, ICS_CycleFree, ICS_Cycle };
605 mutable DenseMap<const Instruction *, InstCycleState> InstCycleState;
607 // Expression to class mapping.
608 using ExpressionClassMap = DenseMap<const Expression *, CongruenceClass *>;
609 ExpressionClassMap ExpressionToClass;
611 // We have a single expression that represents currently DeadExpressions.
612 // For dead expressions we can prove will stay dead, we mark them with
613 // DFS number zero. However, it's possible in the case of phi nodes
614 // for us to assume/prove all arguments are dead during fixpointing.
615 // We use DeadExpression for that case.
616 DeadExpression *SingletonDeadExpression = nullptr;
618 // Which values have changed as a result of leader changes.
619 SmallPtrSet<Value *, 8> LeaderChanges;
621 // Reachability info.
622 using BlockEdge = BasicBlockEdge;
623 DenseSet<BlockEdge> ReachableEdges;
624 SmallPtrSet<const BasicBlock *, 8> ReachableBlocks;
626 // This is a bitvector because, on larger functions, we may have
627 // thousands of touched instructions at once (entire blocks,
628 // instructions with hundreds of uses, etc). Even with optimization
629 // for when we mark whole blocks as touched, when this was a
630 // SmallPtrSet or DenseSet, for some functions, we spent >20% of all
631 // the time in GVN just managing this list. The bitvector, on the
632 // other hand, efficiently supports test/set/clear of both
633 // individual and ranges, as well as "find next element" This
634 // enables us to use it as a worklist with essentially 0 cost.
635 BitVector TouchedInstructions;
637 DenseMap<const BasicBlock *, std::pair<unsigned, unsigned>> BlockInstRange;
639 #ifndef NDEBUG
640 // Debugging for how many times each block and instruction got processed.
641 DenseMap<const Value *, unsigned> ProcessedCount;
642 #endif
644 // DFS info.
645 // This contains a mapping from Instructions to DFS numbers.
646 // The numbering starts at 1. An instruction with DFS number zero
647 // means that the instruction is dead.
648 DenseMap<const Value *, unsigned> InstrDFS;
650 // This contains the mapping DFS numbers to instructions.
651 SmallVector<Value *, 32> DFSToInstr;
653 // Deletion info.
654 SmallPtrSet<Instruction *, 8> InstructionsToErase;
656 public:
657 NewGVN(Function &F, DominatorTree *DT, AssumptionCache *AC,
658 TargetLibraryInfo *TLI, AliasAnalysis *AA, MemorySSA *MSSA,
659 const DataLayout &DL)
660 : F(F), DT(DT), TLI(TLI), AA(AA), MSSA(MSSA), DL(DL),
661 PredInfo(std::make_unique<PredicateInfo>(F, *DT, *AC)),
662 SQ(DL, TLI, DT, AC, /*CtxI=*/nullptr, /*UseInstrInfo=*/false) {}
664 bool runGVN();
666 private:
667 // Expression handling.
668 const Expression *createExpression(Instruction *) const;
669 const Expression *createBinaryExpression(unsigned, Type *, Value *, Value *,
670 Instruction *) const;
672 // Our canonical form for phi arguments is a pair of incoming value, incoming
673 // basic block.
674 using ValPair = std::pair<Value *, BasicBlock *>;
676 PHIExpression *createPHIExpression(ArrayRef<ValPair>, const Instruction *,
677 BasicBlock *, bool &HasBackEdge,
678 bool &OriginalOpsConstant) const;
679 const DeadExpression *createDeadExpression() const;
680 const VariableExpression *createVariableExpression(Value *) const;
681 const ConstantExpression *createConstantExpression(Constant *) const;
682 const Expression *createVariableOrConstant(Value *V) const;
683 const UnknownExpression *createUnknownExpression(Instruction *) const;
684 const StoreExpression *createStoreExpression(StoreInst *,
685 const MemoryAccess *) const;
686 LoadExpression *createLoadExpression(Type *, Value *, LoadInst *,
687 const MemoryAccess *) const;
688 const CallExpression *createCallExpression(CallInst *,
689 const MemoryAccess *) const;
690 const AggregateValueExpression *
691 createAggregateValueExpression(Instruction *) const;
692 bool setBasicExpressionInfo(Instruction *, BasicExpression *) const;
694 // Congruence class handling.
695 CongruenceClass *createCongruenceClass(Value *Leader, const Expression *E) {
696 auto *result = new CongruenceClass(NextCongruenceNum++, Leader, E);
697 CongruenceClasses.emplace_back(result);
698 return result;
701 CongruenceClass *createMemoryClass(MemoryAccess *MA) {
702 auto *CC = createCongruenceClass(nullptr, nullptr);
703 CC->setMemoryLeader(MA);
704 return CC;
707 CongruenceClass *ensureLeaderOfMemoryClass(MemoryAccess *MA) {
708 auto *CC = getMemoryClass(MA);
709 if (CC->getMemoryLeader() != MA)
710 CC = createMemoryClass(MA);
711 return CC;
714 CongruenceClass *createSingletonCongruenceClass(Value *Member) {
715 CongruenceClass *CClass = createCongruenceClass(Member, nullptr);
716 CClass->insert(Member);
717 ValueToClass[Member] = CClass;
718 return CClass;
721 void initializeCongruenceClasses(Function &F);
722 const Expression *makePossiblePHIOfOps(Instruction *,
723 SmallPtrSetImpl<Value *> &);
724 Value *findLeaderForInst(Instruction *ValueOp,
725 SmallPtrSetImpl<Value *> &Visited,
726 MemoryAccess *MemAccess, Instruction *OrigInst,
727 BasicBlock *PredBB);
728 bool OpIsSafeForPHIOfOpsHelper(Value *V, const BasicBlock *PHIBlock,
729 SmallPtrSetImpl<const Value *> &Visited,
730 SmallVectorImpl<Instruction *> &Worklist);
731 bool OpIsSafeForPHIOfOps(Value *Op, const BasicBlock *PHIBlock,
732 SmallPtrSetImpl<const Value *> &);
733 void addPhiOfOps(PHINode *Op, BasicBlock *BB, Instruction *ExistingValue);
734 void removePhiOfOps(Instruction *I, PHINode *PHITemp);
736 // Value number an Instruction or MemoryPhi.
737 void valueNumberMemoryPhi(MemoryPhi *);
738 void valueNumberInstruction(Instruction *);
740 // Symbolic evaluation.
741 const Expression *checkSimplificationResults(Expression *, Instruction *,
742 Value *) const;
743 const Expression *performSymbolicEvaluation(Value *,
744 SmallPtrSetImpl<Value *> &) const;
745 const Expression *performSymbolicLoadCoercion(Type *, Value *, LoadInst *,
746 Instruction *,
747 MemoryAccess *) const;
748 const Expression *performSymbolicLoadEvaluation(Instruction *) const;
749 const Expression *performSymbolicStoreEvaluation(Instruction *) const;
750 const Expression *performSymbolicCallEvaluation(Instruction *) const;
751 void sortPHIOps(MutableArrayRef<ValPair> Ops) const;
752 const Expression *performSymbolicPHIEvaluation(ArrayRef<ValPair>,
753 Instruction *I,
754 BasicBlock *PHIBlock) const;
755 const Expression *performSymbolicAggrValueEvaluation(Instruction *) const;
756 const Expression *performSymbolicCmpEvaluation(Instruction *) const;
757 const Expression *performSymbolicPredicateInfoEvaluation(Instruction *) const;
759 // Congruence finding.
760 bool someEquivalentDominates(const Instruction *, const Instruction *) const;
761 Value *lookupOperandLeader(Value *) const;
762 CongruenceClass *getClassForExpression(const Expression *E) const;
763 void performCongruenceFinding(Instruction *, const Expression *);
764 void moveValueToNewCongruenceClass(Instruction *, const Expression *,
765 CongruenceClass *, CongruenceClass *);
766 void moveMemoryToNewCongruenceClass(Instruction *, MemoryAccess *,
767 CongruenceClass *, CongruenceClass *);
768 Value *getNextValueLeader(CongruenceClass *) const;
769 const MemoryAccess *getNextMemoryLeader(CongruenceClass *) const;
770 bool setMemoryClass(const MemoryAccess *From, CongruenceClass *To);
771 CongruenceClass *getMemoryClass(const MemoryAccess *MA) const;
772 const MemoryAccess *lookupMemoryLeader(const MemoryAccess *) const;
773 bool isMemoryAccessTOP(const MemoryAccess *) const;
775 // Ranking
776 unsigned int getRank(const Value *) const;
777 bool shouldSwapOperands(const Value *, const Value *) const;
779 // Reachability handling.
780 void updateReachableEdge(BasicBlock *, BasicBlock *);
781 void processOutgoingEdges(Instruction *, BasicBlock *);
782 Value *findConditionEquivalence(Value *) const;
784 // Elimination.
785 struct ValueDFS;
786 void convertClassToDFSOrdered(const CongruenceClass &,
787 SmallVectorImpl<ValueDFS> &,
788 DenseMap<const Value *, unsigned int> &,
789 SmallPtrSetImpl<Instruction *> &) const;
790 void convertClassToLoadsAndStores(const CongruenceClass &,
791 SmallVectorImpl<ValueDFS> &) const;
793 bool eliminateInstructions(Function &);
794 void replaceInstruction(Instruction *, Value *);
795 void markInstructionForDeletion(Instruction *);
796 void deleteInstructionsInBlock(BasicBlock *);
797 Value *findPHIOfOpsLeader(const Expression *, const Instruction *,
798 const BasicBlock *) const;
800 // New instruction creation.
801 void handleNewInstruction(Instruction *) {}
803 // Various instruction touch utilities
804 template <typename Map, typename KeyType, typename Func>
805 void for_each_found(Map &, const KeyType &, Func);
806 template <typename Map, typename KeyType>
807 void touchAndErase(Map &, const KeyType &);
808 void markUsersTouched(Value *);
809 void markMemoryUsersTouched(const MemoryAccess *);
810 void markMemoryDefTouched(const MemoryAccess *);
811 void markPredicateUsersTouched(Instruction *);
812 void markValueLeaderChangeTouched(CongruenceClass *CC);
813 void markMemoryLeaderChangeTouched(CongruenceClass *CC);
814 void markPhiOfOpsChanged(const Expression *E);
815 void addPredicateUsers(const PredicateBase *, Instruction *) const;
816 void addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const;
817 void addAdditionalUsers(Value *To, Value *User) const;
819 // Main loop of value numbering
820 void iterateTouchedInstructions();
822 // Utilities.
823 void cleanupTables();
824 std::pair<unsigned, unsigned> assignDFSNumbers(BasicBlock *, unsigned);
825 void updateProcessedCount(const Value *V);
826 void verifyMemoryCongruency() const;
827 void verifyIterationSettled(Function &F);
828 void verifyStoreExpressions() const;
829 bool singleReachablePHIPath(SmallPtrSet<const MemoryAccess *, 8> &,
830 const MemoryAccess *, const MemoryAccess *) const;
831 BasicBlock *getBlockForValue(Value *V) const;
832 void deleteExpression(const Expression *E) const;
833 MemoryUseOrDef *getMemoryAccess(const Instruction *) const;
834 MemoryAccess *getDefiningAccess(const MemoryAccess *) const;
835 MemoryPhi *getMemoryAccess(const BasicBlock *) const;
836 template <class T, class Range> T *getMinDFSOfRange(const Range &) const;
838 unsigned InstrToDFSNum(const Value *V) const {
839 assert(isa<Instruction>(V) && "This should not be used for MemoryAccesses");
840 return InstrDFS.lookup(V);
843 unsigned InstrToDFSNum(const MemoryAccess *MA) const {
844 return MemoryToDFSNum(MA);
847 Value *InstrFromDFSNum(unsigned DFSNum) { return DFSToInstr[DFSNum]; }
849 // Given a MemoryAccess, return the relevant instruction DFS number. Note:
850 // This deliberately takes a value so it can be used with Use's, which will
851 // auto-convert to Value's but not to MemoryAccess's.
852 unsigned MemoryToDFSNum(const Value *MA) const {
853 assert(isa<MemoryAccess>(MA) &&
854 "This should not be used with instructions");
855 return isa<MemoryUseOrDef>(MA)
856 ? InstrToDFSNum(cast<MemoryUseOrDef>(MA)->getMemoryInst())
857 : InstrDFS.lookup(MA);
860 bool isCycleFree(const Instruction *) const;
861 bool isBackedge(BasicBlock *From, BasicBlock *To) const;
863 // Debug counter info. When verifying, we have to reset the value numbering
864 // debug counter to the same state it started in to get the same results.
865 int64_t StartingVNCounter;
868 } // end anonymous namespace
870 template <typename T>
871 static bool equalsLoadStoreHelper(const T &LHS, const Expression &RHS) {
872 if (!isa<LoadExpression>(RHS) && !isa<StoreExpression>(RHS))
873 return false;
874 return LHS.MemoryExpression::equals(RHS);
877 bool LoadExpression::equals(const Expression &Other) const {
878 return equalsLoadStoreHelper(*this, Other);
881 bool StoreExpression::equals(const Expression &Other) const {
882 if (!equalsLoadStoreHelper(*this, Other))
883 return false;
884 // Make sure that store vs store includes the value operand.
885 if (const auto *S = dyn_cast<StoreExpression>(&Other))
886 if (getStoredValue() != S->getStoredValue())
887 return false;
888 return true;
891 // Determine if the edge From->To is a backedge
892 bool NewGVN::isBackedge(BasicBlock *From, BasicBlock *To) const {
893 return From == To ||
894 RPOOrdering.lookup(DT->getNode(From)) >=
895 RPOOrdering.lookup(DT->getNode(To));
898 #ifndef NDEBUG
899 static std::string getBlockName(const BasicBlock *B) {
900 return DOTGraphTraits<const Function *>::getSimpleNodeLabel(B, nullptr);
902 #endif
904 // Get a MemoryAccess for an instruction, fake or real.
905 MemoryUseOrDef *NewGVN::getMemoryAccess(const Instruction *I) const {
906 auto *Result = MSSA->getMemoryAccess(I);
907 return Result ? Result : TempToMemory.lookup(I);
910 // Get a MemoryPhi for a basic block. These are all real.
911 MemoryPhi *NewGVN::getMemoryAccess(const BasicBlock *BB) const {
912 return MSSA->getMemoryAccess(BB);
915 // Get the basic block from an instruction/memory value.
916 BasicBlock *NewGVN::getBlockForValue(Value *V) const {
917 if (auto *I = dyn_cast<Instruction>(V)) {
918 auto *Parent = I->getParent();
919 if (Parent)
920 return Parent;
921 Parent = TempToBlock.lookup(V);
922 assert(Parent && "Every fake instruction should have a block");
923 return Parent;
926 auto *MP = dyn_cast<MemoryPhi>(V);
927 assert(MP && "Should have been an instruction or a MemoryPhi");
928 return MP->getBlock();
931 // Delete a definitely dead expression, so it can be reused by the expression
932 // allocator. Some of these are not in creation functions, so we have to accept
933 // const versions.
934 void NewGVN::deleteExpression(const Expression *E) const {
935 assert(isa<BasicExpression>(E));
936 auto *BE = cast<BasicExpression>(E);
937 const_cast<BasicExpression *>(BE)->deallocateOperands(ArgRecycler);
938 ExpressionAllocator.Deallocate(E);
941 // If V is a predicateinfo copy, get the thing it is a copy of.
942 static Value *getCopyOf(const Value *V) {
943 if (auto *II = dyn_cast<IntrinsicInst>(V))
944 if (II->getIntrinsicID() == Intrinsic::ssa_copy)
945 return II->getOperand(0);
946 return nullptr;
949 // Return true if V is really PN, even accounting for predicateinfo copies.
950 static bool isCopyOfPHI(const Value *V, const PHINode *PN) {
951 return V == PN || getCopyOf(V) == PN;
954 static bool isCopyOfAPHI(const Value *V) {
955 auto *CO = getCopyOf(V);
956 return CO && isa<PHINode>(CO);
959 // Sort PHI Operands into a canonical order. What we use here is an RPO
960 // order. The BlockInstRange numbers are generated in an RPO walk of the basic
961 // blocks.
962 void NewGVN::sortPHIOps(MutableArrayRef<ValPair> Ops) const {
963 llvm::sort(Ops, [&](const ValPair &P1, const ValPair &P2) {
964 return BlockInstRange.lookup(P1.second).first <
965 BlockInstRange.lookup(P2.second).first;
969 // Return true if V is a value that will always be available (IE can
970 // be placed anywhere) in the function. We don't do globals here
971 // because they are often worse to put in place.
972 static bool alwaysAvailable(Value *V) {
973 return isa<Constant>(V) || isa<Argument>(V);
976 // Create a PHIExpression from an array of {incoming edge, value} pairs. I is
977 // the original instruction we are creating a PHIExpression for (but may not be
978 // a phi node). We require, as an invariant, that all the PHIOperands in the
979 // same block are sorted the same way. sortPHIOps will sort them into a
980 // canonical order.
981 PHIExpression *NewGVN::createPHIExpression(ArrayRef<ValPair> PHIOperands,
982 const Instruction *I,
983 BasicBlock *PHIBlock,
984 bool &HasBackedge,
985 bool &OriginalOpsConstant) const {
986 unsigned NumOps = PHIOperands.size();
987 auto *E = new (ExpressionAllocator) PHIExpression(NumOps, PHIBlock);
989 E->allocateOperands(ArgRecycler, ExpressionAllocator);
990 E->setType(PHIOperands.begin()->first->getType());
991 E->setOpcode(Instruction::PHI);
993 // Filter out unreachable phi operands.
994 auto Filtered = make_filter_range(PHIOperands, [&](const ValPair &P) {
995 auto *BB = P.second;
996 if (auto *PHIOp = dyn_cast<PHINode>(I))
997 if (isCopyOfPHI(P.first, PHIOp))
998 return false;
999 if (!ReachableEdges.count({BB, PHIBlock}))
1000 return false;
1001 // Things in TOPClass are equivalent to everything.
1002 if (ValueToClass.lookup(P.first) == TOPClass)
1003 return false;
1004 OriginalOpsConstant = OriginalOpsConstant && isa<Constant>(P.first);
1005 HasBackedge = HasBackedge || isBackedge(BB, PHIBlock);
1006 return lookupOperandLeader(P.first) != I;
1008 std::transform(Filtered.begin(), Filtered.end(), op_inserter(E),
1009 [&](const ValPair &P) -> Value * {
1010 return lookupOperandLeader(P.first);
1012 return E;
1015 // Set basic expression info (Arguments, type, opcode) for Expression
1016 // E from Instruction I in block B.
1017 bool NewGVN::setBasicExpressionInfo(Instruction *I, BasicExpression *E) const {
1018 bool AllConstant = true;
1019 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
1020 E->setType(GEP->getSourceElementType());
1021 else
1022 E->setType(I->getType());
1023 E->setOpcode(I->getOpcode());
1024 E->allocateOperands(ArgRecycler, ExpressionAllocator);
1026 // Transform the operand array into an operand leader array, and keep track of
1027 // whether all members are constant.
1028 std::transform(I->op_begin(), I->op_end(), op_inserter(E), [&](Value *O) {
1029 auto Operand = lookupOperandLeader(O);
1030 AllConstant = AllConstant && isa<Constant>(Operand);
1031 return Operand;
1034 return AllConstant;
1037 const Expression *NewGVN::createBinaryExpression(unsigned Opcode, Type *T,
1038 Value *Arg1, Value *Arg2,
1039 Instruction *I) const {
1040 auto *E = new (ExpressionAllocator) BasicExpression(2);
1042 E->setType(T);
1043 E->setOpcode(Opcode);
1044 E->allocateOperands(ArgRecycler, ExpressionAllocator);
1045 if (Instruction::isCommutative(Opcode)) {
1046 // Ensure that commutative instructions that only differ by a permutation
1047 // of their operands get the same value number by sorting the operand value
1048 // numbers. Since all commutative instructions have two operands it is more
1049 // efficient to sort by hand rather than using, say, std::sort.
1050 if (shouldSwapOperands(Arg1, Arg2))
1051 std::swap(Arg1, Arg2);
1053 E->op_push_back(lookupOperandLeader(Arg1));
1054 E->op_push_back(lookupOperandLeader(Arg2));
1056 Value *V = SimplifyBinOp(Opcode, E->getOperand(0), E->getOperand(1), SQ);
1057 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1058 return SimplifiedE;
1059 return E;
1062 // Take a Value returned by simplification of Expression E/Instruction
1063 // I, and see if it resulted in a simpler expression. If so, return
1064 // that expression.
1065 const Expression *NewGVN::checkSimplificationResults(Expression *E,
1066 Instruction *I,
1067 Value *V) const {
1068 if (!V)
1069 return nullptr;
1070 if (auto *C = dyn_cast<Constant>(V)) {
1071 if (I)
1072 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1073 << " constant " << *C << "\n");
1074 NumGVNOpsSimplified++;
1075 assert(isa<BasicExpression>(E) &&
1076 "We should always have had a basic expression here");
1077 deleteExpression(E);
1078 return createConstantExpression(C);
1079 } else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
1080 if (I)
1081 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1082 << " variable " << *V << "\n");
1083 deleteExpression(E);
1084 return createVariableExpression(V);
1087 CongruenceClass *CC = ValueToClass.lookup(V);
1088 if (CC) {
1089 if (CC->getLeader() && CC->getLeader() != I) {
1090 // If we simplified to something else, we need to communicate
1091 // that we're users of the value we simplified to.
1092 if (I != V) {
1093 // Don't add temporary instructions to the user lists.
1094 if (!AllTempInstructions.count(I))
1095 addAdditionalUsers(V, I);
1097 return createVariableOrConstant(CC->getLeader());
1099 if (CC->getDefiningExpr()) {
1100 // If we simplified to something else, we need to communicate
1101 // that we're users of the value we simplified to.
1102 if (I != V) {
1103 // Don't add temporary instructions to the user lists.
1104 if (!AllTempInstructions.count(I))
1105 addAdditionalUsers(V, I);
1108 if (I)
1109 LLVM_DEBUG(dbgs() << "Simplified " << *I << " to "
1110 << " expression " << *CC->getDefiningExpr() << "\n");
1111 NumGVNOpsSimplified++;
1112 deleteExpression(E);
1113 return CC->getDefiningExpr();
1117 return nullptr;
1120 // Create a value expression from the instruction I, replacing operands with
1121 // their leaders.
1123 const Expression *NewGVN::createExpression(Instruction *I) const {
1124 auto *E = new (ExpressionAllocator) BasicExpression(I->getNumOperands());
1126 bool AllConstant = setBasicExpressionInfo(I, E);
1128 if (I->isCommutative()) {
1129 // Ensure that commutative instructions that only differ by a permutation
1130 // of their operands get the same value number by sorting the operand value
1131 // numbers. Since all commutative instructions have two operands it is more
1132 // efficient to sort by hand rather than using, say, std::sort.
1133 assert(I->getNumOperands() == 2 && "Unsupported commutative instruction!");
1134 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1)))
1135 E->swapOperands(0, 1);
1137 // Perform simplification.
1138 if (auto *CI = dyn_cast<CmpInst>(I)) {
1139 // Sort the operand value numbers so x<y and y>x get the same value
1140 // number.
1141 CmpInst::Predicate Predicate = CI->getPredicate();
1142 if (shouldSwapOperands(E->getOperand(0), E->getOperand(1))) {
1143 E->swapOperands(0, 1);
1144 Predicate = CmpInst::getSwappedPredicate(Predicate);
1146 E->setOpcode((CI->getOpcode() << 8) | Predicate);
1147 // TODO: 25% of our time is spent in SimplifyCmpInst with pointer operands
1148 assert(I->getOperand(0)->getType() == I->getOperand(1)->getType() &&
1149 "Wrong types on cmp instruction");
1150 assert((E->getOperand(0)->getType() == I->getOperand(0)->getType() &&
1151 E->getOperand(1)->getType() == I->getOperand(1)->getType()));
1152 Value *V =
1153 SimplifyCmpInst(Predicate, E->getOperand(0), E->getOperand(1), SQ);
1154 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1155 return SimplifiedE;
1156 } else if (isa<SelectInst>(I)) {
1157 if (isa<Constant>(E->getOperand(0)) ||
1158 E->getOperand(1) == E->getOperand(2)) {
1159 assert(E->getOperand(1)->getType() == I->getOperand(1)->getType() &&
1160 E->getOperand(2)->getType() == I->getOperand(2)->getType());
1161 Value *V = SimplifySelectInst(E->getOperand(0), E->getOperand(1),
1162 E->getOperand(2), SQ);
1163 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1164 return SimplifiedE;
1166 } else if (I->isBinaryOp()) {
1167 Value *V =
1168 SimplifyBinOp(E->getOpcode(), E->getOperand(0), E->getOperand(1), SQ);
1169 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1170 return SimplifiedE;
1171 } else if (auto *CI = dyn_cast<CastInst>(I)) {
1172 Value *V =
1173 SimplifyCastInst(CI->getOpcode(), E->getOperand(0), CI->getType(), SQ);
1174 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1175 return SimplifiedE;
1176 } else if (isa<GetElementPtrInst>(I)) {
1177 Value *V = SimplifyGEPInst(
1178 E->getType(), ArrayRef<Value *>(E->op_begin(), E->op_end()), SQ);
1179 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1180 return SimplifiedE;
1181 } else if (AllConstant) {
1182 // We don't bother trying to simplify unless all of the operands
1183 // were constant.
1184 // TODO: There are a lot of Simplify*'s we could call here, if we
1185 // wanted to. The original motivating case for this code was a
1186 // zext i1 false to i8, which we don't have an interface to
1187 // simplify (IE there is no SimplifyZExt).
1189 SmallVector<Constant *, 8> C;
1190 for (Value *Arg : E->operands())
1191 C.emplace_back(cast<Constant>(Arg));
1193 if (Value *V = ConstantFoldInstOperands(I, C, DL, TLI))
1194 if (const Expression *SimplifiedE = checkSimplificationResults(E, I, V))
1195 return SimplifiedE;
1197 return E;
1200 const AggregateValueExpression *
1201 NewGVN::createAggregateValueExpression(Instruction *I) const {
1202 if (auto *II = dyn_cast<InsertValueInst>(I)) {
1203 auto *E = new (ExpressionAllocator)
1204 AggregateValueExpression(I->getNumOperands(), II->getNumIndices());
1205 setBasicExpressionInfo(I, E);
1206 E->allocateIntOperands(ExpressionAllocator);
1207 std::copy(II->idx_begin(), II->idx_end(), int_op_inserter(E));
1208 return E;
1209 } else if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
1210 auto *E = new (ExpressionAllocator)
1211 AggregateValueExpression(I->getNumOperands(), EI->getNumIndices());
1212 setBasicExpressionInfo(EI, E);
1213 E->allocateIntOperands(ExpressionAllocator);
1214 std::copy(EI->idx_begin(), EI->idx_end(), int_op_inserter(E));
1215 return E;
1217 llvm_unreachable("Unhandled type of aggregate value operation");
1220 const DeadExpression *NewGVN::createDeadExpression() const {
1221 // DeadExpression has no arguments and all DeadExpression's are the same,
1222 // so we only need one of them.
1223 return SingletonDeadExpression;
1226 const VariableExpression *NewGVN::createVariableExpression(Value *V) const {
1227 auto *E = new (ExpressionAllocator) VariableExpression(V);
1228 E->setOpcode(V->getValueID());
1229 return E;
1232 const Expression *NewGVN::createVariableOrConstant(Value *V) const {
1233 if (auto *C = dyn_cast<Constant>(V))
1234 return createConstantExpression(C);
1235 return createVariableExpression(V);
1238 const ConstantExpression *NewGVN::createConstantExpression(Constant *C) const {
1239 auto *E = new (ExpressionAllocator) ConstantExpression(C);
1240 E->setOpcode(C->getValueID());
1241 return E;
1244 const UnknownExpression *NewGVN::createUnknownExpression(Instruction *I) const {
1245 auto *E = new (ExpressionAllocator) UnknownExpression(I);
1246 E->setOpcode(I->getOpcode());
1247 return E;
1250 const CallExpression *
1251 NewGVN::createCallExpression(CallInst *CI, const MemoryAccess *MA) const {
1252 // FIXME: Add operand bundles for calls.
1253 auto *E =
1254 new (ExpressionAllocator) CallExpression(CI->getNumOperands(), CI, MA);
1255 setBasicExpressionInfo(CI, E);
1256 return E;
1259 // Return true if some equivalent of instruction Inst dominates instruction U.
1260 bool NewGVN::someEquivalentDominates(const Instruction *Inst,
1261 const Instruction *U) const {
1262 auto *CC = ValueToClass.lookup(Inst);
1263 // This must be an instruction because we are only called from phi nodes
1264 // in the case that the value it needs to check against is an instruction.
1266 // The most likely candidates for dominance are the leader and the next leader.
1267 // The leader or nextleader will dominate in all cases where there is an
1268 // equivalent that is higher up in the dom tree.
1269 // We can't *only* check them, however, because the
1270 // dominator tree could have an infinite number of non-dominating siblings
1271 // with instructions that are in the right congruence class.
1272 // A
1273 // B C D E F G
1274 // |
1275 // H
1276 // Instruction U could be in H, with equivalents in every other sibling.
1277 // Depending on the rpo order picked, the leader could be the equivalent in
1278 // any of these siblings.
1279 if (!CC)
1280 return false;
1281 if (alwaysAvailable(CC->getLeader()))
1282 return true;
1283 if (DT->dominates(cast<Instruction>(CC->getLeader()), U))
1284 return true;
1285 if (CC->getNextLeader().first &&
1286 DT->dominates(cast<Instruction>(CC->getNextLeader().first), U))
1287 return true;
1288 return llvm::any_of(*CC, [&](const Value *Member) {
1289 return Member != CC->getLeader() &&
1290 DT->dominates(cast<Instruction>(Member), U);
1294 // See if we have a congruence class and leader for this operand, and if so,
1295 // return it. Otherwise, return the operand itself.
1296 Value *NewGVN::lookupOperandLeader(Value *V) const {
1297 CongruenceClass *CC = ValueToClass.lookup(V);
1298 if (CC) {
1299 // Everything in TOP is represented by undef, as it can be any value.
1300 // We do have to make sure we get the type right though, so we can't set the
1301 // RepLeader to undef.
1302 if (CC == TOPClass)
1303 return UndefValue::get(V->getType());
1304 return CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
1307 return V;
1310 const MemoryAccess *NewGVN::lookupMemoryLeader(const MemoryAccess *MA) const {
1311 auto *CC = getMemoryClass(MA);
1312 assert(CC->getMemoryLeader() &&
1313 "Every MemoryAccess should be mapped to a congruence class with a "
1314 "representative memory access");
1315 return CC->getMemoryLeader();
1318 // Return true if the MemoryAccess is really equivalent to everything. This is
1319 // equivalent to the lattice value "TOP" in most lattices. This is the initial
1320 // state of all MemoryAccesses.
1321 bool NewGVN::isMemoryAccessTOP(const MemoryAccess *MA) const {
1322 return getMemoryClass(MA) == TOPClass;
1325 LoadExpression *NewGVN::createLoadExpression(Type *LoadType, Value *PointerOp,
1326 LoadInst *LI,
1327 const MemoryAccess *MA) const {
1328 auto *E =
1329 new (ExpressionAllocator) LoadExpression(1, LI, lookupMemoryLeader(MA));
1330 E->allocateOperands(ArgRecycler, ExpressionAllocator);
1331 E->setType(LoadType);
1333 // Give store and loads same opcode so they value number together.
1334 E->setOpcode(0);
1335 E->op_push_back(PointerOp);
1336 if (LI)
1337 E->setAlignment(MaybeAlign(LI->getAlignment()));
1339 // TODO: Value number heap versions. We may be able to discover
1340 // things alias analysis can't on it's own (IE that a store and a
1341 // load have the same value, and thus, it isn't clobbering the load).
1342 return E;
1345 const StoreExpression *
1346 NewGVN::createStoreExpression(StoreInst *SI, const MemoryAccess *MA) const {
1347 auto *StoredValueLeader = lookupOperandLeader(SI->getValueOperand());
1348 auto *E = new (ExpressionAllocator)
1349 StoreExpression(SI->getNumOperands(), SI, StoredValueLeader, MA);
1350 E->allocateOperands(ArgRecycler, ExpressionAllocator);
1351 E->setType(SI->getValueOperand()->getType());
1353 // Give store and loads same opcode so they value number together.
1354 E->setOpcode(0);
1355 E->op_push_back(lookupOperandLeader(SI->getPointerOperand()));
1357 // TODO: Value number heap versions. We may be able to discover
1358 // things alias analysis can't on it's own (IE that a store and a
1359 // load have the same value, and thus, it isn't clobbering the load).
1360 return E;
1363 const Expression *NewGVN::performSymbolicStoreEvaluation(Instruction *I) const {
1364 // Unlike loads, we never try to eliminate stores, so we do not check if they
1365 // are simple and avoid value numbering them.
1366 auto *SI = cast<StoreInst>(I);
1367 auto *StoreAccess = getMemoryAccess(SI);
1368 // Get the expression, if any, for the RHS of the MemoryDef.
1369 const MemoryAccess *StoreRHS = StoreAccess->getDefiningAccess();
1370 if (EnableStoreRefinement)
1371 StoreRHS = MSSAWalker->getClobberingMemoryAccess(StoreAccess);
1372 // If we bypassed the use-def chains, make sure we add a use.
1373 StoreRHS = lookupMemoryLeader(StoreRHS);
1374 if (StoreRHS != StoreAccess->getDefiningAccess())
1375 addMemoryUsers(StoreRHS, StoreAccess);
1376 // If we are defined by ourselves, use the live on entry def.
1377 if (StoreRHS == StoreAccess)
1378 StoreRHS = MSSA->getLiveOnEntryDef();
1380 if (SI->isSimple()) {
1381 // See if we are defined by a previous store expression, it already has a
1382 // value, and it's the same value as our current store. FIXME: Right now, we
1383 // only do this for simple stores, we should expand to cover memcpys, etc.
1384 const auto *LastStore = createStoreExpression(SI, StoreRHS);
1385 const auto *LastCC = ExpressionToClass.lookup(LastStore);
1386 // We really want to check whether the expression we matched was a store. No
1387 // easy way to do that. However, we can check that the class we found has a
1388 // store, which, assuming the value numbering state is not corrupt, is
1389 // sufficient, because we must also be equivalent to that store's expression
1390 // for it to be in the same class as the load.
1391 if (LastCC && LastCC->getStoredValue() == LastStore->getStoredValue())
1392 return LastStore;
1393 // Also check if our value operand is defined by a load of the same memory
1394 // location, and the memory state is the same as it was then (otherwise, it
1395 // could have been overwritten later. See test32 in
1396 // transforms/DeadStoreElimination/simple.ll).
1397 if (auto *LI = dyn_cast<LoadInst>(LastStore->getStoredValue()))
1398 if ((lookupOperandLeader(LI->getPointerOperand()) ==
1399 LastStore->getOperand(0)) &&
1400 (lookupMemoryLeader(getMemoryAccess(LI)->getDefiningAccess()) ==
1401 StoreRHS))
1402 return LastStore;
1403 deleteExpression(LastStore);
1406 // If the store is not equivalent to anything, value number it as a store that
1407 // produces a unique memory state (instead of using it's MemoryUse, we use
1408 // it's MemoryDef).
1409 return createStoreExpression(SI, StoreAccess);
1412 // See if we can extract the value of a loaded pointer from a load, a store, or
1413 // a memory instruction.
1414 const Expression *
1415 NewGVN::performSymbolicLoadCoercion(Type *LoadType, Value *LoadPtr,
1416 LoadInst *LI, Instruction *DepInst,
1417 MemoryAccess *DefiningAccess) const {
1418 assert((!LI || LI->isSimple()) && "Not a simple load");
1419 if (auto *DepSI = dyn_cast<StoreInst>(DepInst)) {
1420 // Can't forward from non-atomic to atomic without violating memory model.
1421 // Also don't need to coerce if they are the same type, we will just
1422 // propagate.
1423 if (LI->isAtomic() > DepSI->isAtomic() ||
1424 LoadType == DepSI->getValueOperand()->getType())
1425 return nullptr;
1426 int Offset = analyzeLoadFromClobberingStore(LoadType, LoadPtr, DepSI, DL);
1427 if (Offset >= 0) {
1428 if (auto *C = dyn_cast<Constant>(
1429 lookupOperandLeader(DepSI->getValueOperand()))) {
1430 LLVM_DEBUG(dbgs() << "Coercing load from store " << *DepSI
1431 << " to constant " << *C << "\n");
1432 return createConstantExpression(
1433 getConstantStoreValueForLoad(C, Offset, LoadType, DL));
1436 } else if (auto *DepLI = dyn_cast<LoadInst>(DepInst)) {
1437 // Can't forward from non-atomic to atomic without violating memory model.
1438 if (LI->isAtomic() > DepLI->isAtomic())
1439 return nullptr;
1440 int Offset = analyzeLoadFromClobberingLoad(LoadType, LoadPtr, DepLI, DL);
1441 if (Offset >= 0) {
1442 // We can coerce a constant load into a load.
1443 if (auto *C = dyn_cast<Constant>(lookupOperandLeader(DepLI)))
1444 if (auto *PossibleConstant =
1445 getConstantLoadValueForLoad(C, Offset, LoadType, DL)) {
1446 LLVM_DEBUG(dbgs() << "Coercing load from load " << *LI
1447 << " to constant " << *PossibleConstant << "\n");
1448 return createConstantExpression(PossibleConstant);
1451 } else if (auto *DepMI = dyn_cast<MemIntrinsic>(DepInst)) {
1452 int Offset = analyzeLoadFromClobberingMemInst(LoadType, LoadPtr, DepMI, DL);
1453 if (Offset >= 0) {
1454 if (auto *PossibleConstant =
1455 getConstantMemInstValueForLoad(DepMI, Offset, LoadType, DL)) {
1456 LLVM_DEBUG(dbgs() << "Coercing load from meminst " << *DepMI
1457 << " to constant " << *PossibleConstant << "\n");
1458 return createConstantExpression(PossibleConstant);
1463 // All of the below are only true if the loaded pointer is produced
1464 // by the dependent instruction.
1465 if (LoadPtr != lookupOperandLeader(DepInst) &&
1466 !AA->isMustAlias(LoadPtr, DepInst))
1467 return nullptr;
1468 // If this load really doesn't depend on anything, then we must be loading an
1469 // undef value. This can happen when loading for a fresh allocation with no
1470 // intervening stores, for example. Note that this is only true in the case
1471 // that the result of the allocation is pointer equal to the load ptr.
1472 if (isa<AllocaInst>(DepInst) || isMallocLikeFn(DepInst, TLI)) {
1473 return createConstantExpression(UndefValue::get(LoadType));
1475 // If this load occurs either right after a lifetime begin,
1476 // then the loaded value is undefined.
1477 else if (auto *II = dyn_cast<IntrinsicInst>(DepInst)) {
1478 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1479 return createConstantExpression(UndefValue::get(LoadType));
1481 // If this load follows a calloc (which zero initializes memory),
1482 // then the loaded value is zero
1483 else if (isCallocLikeFn(DepInst, TLI)) {
1484 return createConstantExpression(Constant::getNullValue(LoadType));
1487 return nullptr;
1490 const Expression *NewGVN::performSymbolicLoadEvaluation(Instruction *I) const {
1491 auto *LI = cast<LoadInst>(I);
1493 // We can eliminate in favor of non-simple loads, but we won't be able to
1494 // eliminate the loads themselves.
1495 if (!LI->isSimple())
1496 return nullptr;
1498 Value *LoadAddressLeader = lookupOperandLeader(LI->getPointerOperand());
1499 // Load of undef is undef.
1500 if (isa<UndefValue>(LoadAddressLeader))
1501 return createConstantExpression(UndefValue::get(LI->getType()));
1502 MemoryAccess *OriginalAccess = getMemoryAccess(I);
1503 MemoryAccess *DefiningAccess =
1504 MSSAWalker->getClobberingMemoryAccess(OriginalAccess);
1506 if (!MSSA->isLiveOnEntryDef(DefiningAccess)) {
1507 if (auto *MD = dyn_cast<MemoryDef>(DefiningAccess)) {
1508 Instruction *DefiningInst = MD->getMemoryInst();
1509 // If the defining instruction is not reachable, replace with undef.
1510 if (!ReachableBlocks.count(DefiningInst->getParent()))
1511 return createConstantExpression(UndefValue::get(LI->getType()));
1512 // This will handle stores and memory insts. We only do if it the
1513 // defining access has a different type, or it is a pointer produced by
1514 // certain memory operations that cause the memory to have a fixed value
1515 // (IE things like calloc).
1516 if (const auto *CoercionResult =
1517 performSymbolicLoadCoercion(LI->getType(), LoadAddressLeader, LI,
1518 DefiningInst, DefiningAccess))
1519 return CoercionResult;
1523 const auto *LE = createLoadExpression(LI->getType(), LoadAddressLeader, LI,
1524 DefiningAccess);
1525 // If our MemoryLeader is not our defining access, add a use to the
1526 // MemoryLeader, so that we get reprocessed when it changes.
1527 if (LE->getMemoryLeader() != DefiningAccess)
1528 addMemoryUsers(LE->getMemoryLeader(), OriginalAccess);
1529 return LE;
1532 const Expression *
1533 NewGVN::performSymbolicPredicateInfoEvaluation(Instruction *I) const {
1534 auto *PI = PredInfo->getPredicateInfoFor(I);
1535 if (!PI)
1536 return nullptr;
1538 LLVM_DEBUG(dbgs() << "Found predicate info from instruction !\n");
1540 auto *PWC = dyn_cast<PredicateWithCondition>(PI);
1541 if (!PWC)
1542 return nullptr;
1544 auto *CopyOf = I->getOperand(0);
1545 auto *Cond = PWC->Condition;
1547 // If this a copy of the condition, it must be either true or false depending
1548 // on the predicate info type and edge.
1549 if (CopyOf == Cond) {
1550 // We should not need to add predicate users because the predicate info is
1551 // already a use of this operand.
1552 if (isa<PredicateAssume>(PI))
1553 return createConstantExpression(ConstantInt::getTrue(Cond->getType()));
1554 if (auto *PBranch = dyn_cast<PredicateBranch>(PI)) {
1555 if (PBranch->TrueEdge)
1556 return createConstantExpression(ConstantInt::getTrue(Cond->getType()));
1557 return createConstantExpression(ConstantInt::getFalse(Cond->getType()));
1559 if (auto *PSwitch = dyn_cast<PredicateSwitch>(PI))
1560 return createConstantExpression(cast<Constant>(PSwitch->CaseValue));
1563 // Not a copy of the condition, so see what the predicates tell us about this
1564 // value. First, though, we check to make sure the value is actually a copy
1565 // of one of the condition operands. It's possible, in certain cases, for it
1566 // to be a copy of a predicateinfo copy. In particular, if two branch
1567 // operations use the same condition, and one branch dominates the other, we
1568 // will end up with a copy of a copy. This is currently a small deficiency in
1569 // predicateinfo. What will end up happening here is that we will value
1570 // number both copies the same anyway.
1572 // Everything below relies on the condition being a comparison.
1573 auto *Cmp = dyn_cast<CmpInst>(Cond);
1574 if (!Cmp)
1575 return nullptr;
1577 if (CopyOf != Cmp->getOperand(0) && CopyOf != Cmp->getOperand(1)) {
1578 LLVM_DEBUG(dbgs() << "Copy is not of any condition operands!\n");
1579 return nullptr;
1581 Value *FirstOp = lookupOperandLeader(Cmp->getOperand(0));
1582 Value *SecondOp = lookupOperandLeader(Cmp->getOperand(1));
1583 bool SwappedOps = false;
1584 // Sort the ops.
1585 if (shouldSwapOperands(FirstOp, SecondOp)) {
1586 std::swap(FirstOp, SecondOp);
1587 SwappedOps = true;
1589 CmpInst::Predicate Predicate =
1590 SwappedOps ? Cmp->getSwappedPredicate() : Cmp->getPredicate();
1592 if (isa<PredicateAssume>(PI)) {
1593 // If we assume the operands are equal, then they are equal.
1594 if (Predicate == CmpInst::ICMP_EQ) {
1595 addPredicateUsers(PI, I);
1596 addAdditionalUsers(SwappedOps ? Cmp->getOperand(1) : Cmp->getOperand(0),
1598 return createVariableOrConstant(FirstOp);
1601 if (const auto *PBranch = dyn_cast<PredicateBranch>(PI)) {
1602 // If we are *not* a copy of the comparison, we may equal to the other
1603 // operand when the predicate implies something about equality of
1604 // operations. In particular, if the comparison is true/false when the
1605 // operands are equal, and we are on the right edge, we know this operation
1606 // is equal to something.
1607 if ((PBranch->TrueEdge && Predicate == CmpInst::ICMP_EQ) ||
1608 (!PBranch->TrueEdge && Predicate == CmpInst::ICMP_NE)) {
1609 addPredicateUsers(PI, I);
1610 addAdditionalUsers(SwappedOps ? Cmp->getOperand(1) : Cmp->getOperand(0),
1612 return createVariableOrConstant(FirstOp);
1614 // Handle the special case of floating point.
1615 if (((PBranch->TrueEdge && Predicate == CmpInst::FCMP_OEQ) ||
1616 (!PBranch->TrueEdge && Predicate == CmpInst::FCMP_UNE)) &&
1617 isa<ConstantFP>(FirstOp) && !cast<ConstantFP>(FirstOp)->isZero()) {
1618 addPredicateUsers(PI, I);
1619 addAdditionalUsers(SwappedOps ? Cmp->getOperand(1) : Cmp->getOperand(0),
1621 return createConstantExpression(cast<Constant>(FirstOp));
1624 return nullptr;
1627 // Evaluate read only and pure calls, and create an expression result.
1628 const Expression *NewGVN::performSymbolicCallEvaluation(Instruction *I) const {
1629 auto *CI = cast<CallInst>(I);
1630 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1631 // Intrinsics with the returned attribute are copies of arguments.
1632 if (auto *ReturnedValue = II->getReturnedArgOperand()) {
1633 if (II->getIntrinsicID() == Intrinsic::ssa_copy)
1634 if (const auto *Result = performSymbolicPredicateInfoEvaluation(I))
1635 return Result;
1636 return createVariableOrConstant(ReturnedValue);
1639 if (AA->doesNotAccessMemory(CI)) {
1640 return createCallExpression(CI, TOPClass->getMemoryLeader());
1641 } else if (AA->onlyReadsMemory(CI)) {
1642 if (auto *MA = MSSA->getMemoryAccess(CI)) {
1643 auto *DefiningAccess = MSSAWalker->getClobberingMemoryAccess(MA);
1644 return createCallExpression(CI, DefiningAccess);
1645 } else // MSSA determined that CI does not access memory.
1646 return createCallExpression(CI, TOPClass->getMemoryLeader());
1648 return nullptr;
1651 // Retrieve the memory class for a given MemoryAccess.
1652 CongruenceClass *NewGVN::getMemoryClass(const MemoryAccess *MA) const {
1653 auto *Result = MemoryAccessToClass.lookup(MA);
1654 assert(Result && "Should have found memory class");
1655 return Result;
1658 // Update the MemoryAccess equivalence table to say that From is equal to To,
1659 // and return true if this is different from what already existed in the table.
1660 bool NewGVN::setMemoryClass(const MemoryAccess *From,
1661 CongruenceClass *NewClass) {
1662 assert(NewClass &&
1663 "Every MemoryAccess should be getting mapped to a non-null class");
1664 LLVM_DEBUG(dbgs() << "Setting " << *From);
1665 LLVM_DEBUG(dbgs() << " equivalent to congruence class ");
1666 LLVM_DEBUG(dbgs() << NewClass->getID()
1667 << " with current MemoryAccess leader ");
1668 LLVM_DEBUG(dbgs() << *NewClass->getMemoryLeader() << "\n");
1670 auto LookupResult = MemoryAccessToClass.find(From);
1671 bool Changed = false;
1672 // If it's already in the table, see if the value changed.
1673 if (LookupResult != MemoryAccessToClass.end()) {
1674 auto *OldClass = LookupResult->second;
1675 if (OldClass != NewClass) {
1676 // If this is a phi, we have to handle memory member updates.
1677 if (auto *MP = dyn_cast<MemoryPhi>(From)) {
1678 OldClass->memory_erase(MP);
1679 NewClass->memory_insert(MP);
1680 // This may have killed the class if it had no non-memory members
1681 if (OldClass->getMemoryLeader() == From) {
1682 if (OldClass->definesNoMemory()) {
1683 OldClass->setMemoryLeader(nullptr);
1684 } else {
1685 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
1686 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
1687 << OldClass->getID() << " to "
1688 << *OldClass->getMemoryLeader()
1689 << " due to removal of a memory member " << *From
1690 << "\n");
1691 markMemoryLeaderChangeTouched(OldClass);
1695 // It wasn't equivalent before, and now it is.
1696 LookupResult->second = NewClass;
1697 Changed = true;
1701 return Changed;
1704 // Determine if a instruction is cycle-free. That means the values in the
1705 // instruction don't depend on any expressions that can change value as a result
1706 // of the instruction. For example, a non-cycle free instruction would be v =
1707 // phi(0, v+1).
1708 bool NewGVN::isCycleFree(const Instruction *I) const {
1709 // In order to compute cycle-freeness, we do SCC finding on the instruction,
1710 // and see what kind of SCC it ends up in. If it is a singleton, it is
1711 // cycle-free. If it is not in a singleton, it is only cycle free if the
1712 // other members are all phi nodes (as they do not compute anything, they are
1713 // copies).
1714 auto ICS = InstCycleState.lookup(I);
1715 if (ICS == ICS_Unknown) {
1716 SCCFinder.Start(I);
1717 auto &SCC = SCCFinder.getComponentFor(I);
1718 // It's cycle free if it's size 1 or the SCC is *only* phi nodes.
1719 if (SCC.size() == 1)
1720 InstCycleState.insert({I, ICS_CycleFree});
1721 else {
1722 bool AllPhis = llvm::all_of(SCC, [](const Value *V) {
1723 return isa<PHINode>(V) || isCopyOfAPHI(V);
1725 ICS = AllPhis ? ICS_CycleFree : ICS_Cycle;
1726 for (auto *Member : SCC)
1727 if (auto *MemberPhi = dyn_cast<PHINode>(Member))
1728 InstCycleState.insert({MemberPhi, ICS});
1731 if (ICS == ICS_Cycle)
1732 return false;
1733 return true;
1736 // Evaluate PHI nodes symbolically and create an expression result.
1737 const Expression *
1738 NewGVN::performSymbolicPHIEvaluation(ArrayRef<ValPair> PHIOps,
1739 Instruction *I,
1740 BasicBlock *PHIBlock) const {
1741 // True if one of the incoming phi edges is a backedge.
1742 bool HasBackedge = false;
1743 // All constant tracks the state of whether all the *original* phi operands
1744 // This is really shorthand for "this phi cannot cycle due to forward
1745 // change in value of the phi is guaranteed not to later change the value of
1746 // the phi. IE it can't be v = phi(undef, v+1)
1747 bool OriginalOpsConstant = true;
1748 auto *E = cast<PHIExpression>(createPHIExpression(
1749 PHIOps, I, PHIBlock, HasBackedge, OriginalOpsConstant));
1750 // We match the semantics of SimplifyPhiNode from InstructionSimplify here.
1751 // See if all arguments are the same.
1752 // We track if any were undef because they need special handling.
1753 bool HasUndef = false;
1754 auto Filtered = make_filter_range(E->operands(), [&](Value *Arg) {
1755 if (isa<UndefValue>(Arg)) {
1756 HasUndef = true;
1757 return false;
1759 return true;
1761 // If we are left with no operands, it's dead.
1762 if (Filtered.empty()) {
1763 // If it has undef at this point, it means there are no-non-undef arguments,
1764 // and thus, the value of the phi node must be undef.
1765 if (HasUndef) {
1766 LLVM_DEBUG(
1767 dbgs() << "PHI Node " << *I
1768 << " has no non-undef arguments, valuing it as undef\n");
1769 return createConstantExpression(UndefValue::get(I->getType()));
1772 LLVM_DEBUG(dbgs() << "No arguments of PHI node " << *I << " are live\n");
1773 deleteExpression(E);
1774 return createDeadExpression();
1776 Value *AllSameValue = *(Filtered.begin());
1777 ++Filtered.begin();
1778 // Can't use std::equal here, sadly, because filter.begin moves.
1779 if (llvm::all_of(Filtered, [&](Value *Arg) { return Arg == AllSameValue; })) {
1780 // In LLVM's non-standard representation of phi nodes, it's possible to have
1781 // phi nodes with cycles (IE dependent on other phis that are .... dependent
1782 // on the original phi node), especially in weird CFG's where some arguments
1783 // are unreachable, or uninitialized along certain paths. This can cause
1784 // infinite loops during evaluation. We work around this by not trying to
1785 // really evaluate them independently, but instead using a variable
1786 // expression to say if one is equivalent to the other.
1787 // We also special case undef, so that if we have an undef, we can't use the
1788 // common value unless it dominates the phi block.
1789 if (HasUndef) {
1790 // If we have undef and at least one other value, this is really a
1791 // multivalued phi, and we need to know if it's cycle free in order to
1792 // evaluate whether we can ignore the undef. The other parts of this are
1793 // just shortcuts. If there is no backedge, or all operands are
1794 // constants, it also must be cycle free.
1795 if (HasBackedge && !OriginalOpsConstant &&
1796 !isa<UndefValue>(AllSameValue) && !isCycleFree(I))
1797 return E;
1799 // Only have to check for instructions
1800 if (auto *AllSameInst = dyn_cast<Instruction>(AllSameValue))
1801 if (!someEquivalentDominates(AllSameInst, I))
1802 return E;
1804 // Can't simplify to something that comes later in the iteration.
1805 // Otherwise, when and if it changes congruence class, we will never catch
1806 // up. We will always be a class behind it.
1807 if (isa<Instruction>(AllSameValue) &&
1808 InstrToDFSNum(AllSameValue) > InstrToDFSNum(I))
1809 return E;
1810 NumGVNPhisAllSame++;
1811 LLVM_DEBUG(dbgs() << "Simplified PHI node " << *I << " to " << *AllSameValue
1812 << "\n");
1813 deleteExpression(E);
1814 return createVariableOrConstant(AllSameValue);
1816 return E;
1819 const Expression *
1820 NewGVN::performSymbolicAggrValueEvaluation(Instruction *I) const {
1821 if (auto *EI = dyn_cast<ExtractValueInst>(I)) {
1822 auto *WO = dyn_cast<WithOverflowInst>(EI->getAggregateOperand());
1823 if (WO && EI->getNumIndices() == 1 && *EI->idx_begin() == 0)
1824 // EI is an extract from one of our with.overflow intrinsics. Synthesize
1825 // a semantically equivalent expression instead of an extract value
1826 // expression.
1827 return createBinaryExpression(WO->getBinaryOp(), EI->getType(),
1828 WO->getLHS(), WO->getRHS(), I);
1831 return createAggregateValueExpression(I);
1834 const Expression *NewGVN::performSymbolicCmpEvaluation(Instruction *I) const {
1835 assert(isa<CmpInst>(I) && "Expected a cmp instruction.");
1837 auto *CI = cast<CmpInst>(I);
1838 // See if our operands are equal to those of a previous predicate, and if so,
1839 // if it implies true or false.
1840 auto Op0 = lookupOperandLeader(CI->getOperand(0));
1841 auto Op1 = lookupOperandLeader(CI->getOperand(1));
1842 auto OurPredicate = CI->getPredicate();
1843 if (shouldSwapOperands(Op0, Op1)) {
1844 std::swap(Op0, Op1);
1845 OurPredicate = CI->getSwappedPredicate();
1848 // Avoid processing the same info twice.
1849 const PredicateBase *LastPredInfo = nullptr;
1850 // See if we know something about the comparison itself, like it is the target
1851 // of an assume.
1852 auto *CmpPI = PredInfo->getPredicateInfoFor(I);
1853 if (dyn_cast_or_null<PredicateAssume>(CmpPI))
1854 return createConstantExpression(ConstantInt::getTrue(CI->getType()));
1856 if (Op0 == Op1) {
1857 // This condition does not depend on predicates, no need to add users
1858 if (CI->isTrueWhenEqual())
1859 return createConstantExpression(ConstantInt::getTrue(CI->getType()));
1860 else if (CI->isFalseWhenEqual())
1861 return createConstantExpression(ConstantInt::getFalse(CI->getType()));
1864 // NOTE: Because we are comparing both operands here and below, and using
1865 // previous comparisons, we rely on fact that predicateinfo knows to mark
1866 // comparisons that use renamed operands as users of the earlier comparisons.
1867 // It is *not* enough to just mark predicateinfo renamed operands as users of
1868 // the earlier comparisons, because the *other* operand may have changed in a
1869 // previous iteration.
1870 // Example:
1871 // icmp slt %a, %b
1872 // %b.0 = ssa.copy(%b)
1873 // false branch:
1874 // icmp slt %c, %b.0
1876 // %c and %a may start out equal, and thus, the code below will say the second
1877 // %icmp is false. c may become equal to something else, and in that case the
1878 // %second icmp *must* be reexamined, but would not if only the renamed
1879 // %operands are considered users of the icmp.
1881 // *Currently* we only check one level of comparisons back, and only mark one
1882 // level back as touched when changes happen. If you modify this code to look
1883 // back farther through comparisons, you *must* mark the appropriate
1884 // comparisons as users in PredicateInfo.cpp, or you will cause bugs. See if
1885 // we know something just from the operands themselves
1887 // See if our operands have predicate info, so that we may be able to derive
1888 // something from a previous comparison.
1889 for (const auto &Op : CI->operands()) {
1890 auto *PI = PredInfo->getPredicateInfoFor(Op);
1891 if (const auto *PBranch = dyn_cast_or_null<PredicateBranch>(PI)) {
1892 if (PI == LastPredInfo)
1893 continue;
1894 LastPredInfo = PI;
1895 // In phi of ops cases, we may have predicate info that we are evaluating
1896 // in a different context.
1897 if (!DT->dominates(PBranch->To, getBlockForValue(I)))
1898 continue;
1899 // TODO: Along the false edge, we may know more things too, like
1900 // icmp of
1901 // same operands is false.
1902 // TODO: We only handle actual comparison conditions below, not
1903 // and/or.
1904 auto *BranchCond = dyn_cast<CmpInst>(PBranch->Condition);
1905 if (!BranchCond)
1906 continue;
1907 auto *BranchOp0 = lookupOperandLeader(BranchCond->getOperand(0));
1908 auto *BranchOp1 = lookupOperandLeader(BranchCond->getOperand(1));
1909 auto BranchPredicate = BranchCond->getPredicate();
1910 if (shouldSwapOperands(BranchOp0, BranchOp1)) {
1911 std::swap(BranchOp0, BranchOp1);
1912 BranchPredicate = BranchCond->getSwappedPredicate();
1914 if (BranchOp0 == Op0 && BranchOp1 == Op1) {
1915 if (PBranch->TrueEdge) {
1916 // If we know the previous predicate is true and we are in the true
1917 // edge then we may be implied true or false.
1918 if (CmpInst::isImpliedTrueByMatchingCmp(BranchPredicate,
1919 OurPredicate)) {
1920 addPredicateUsers(PI, I);
1921 return createConstantExpression(
1922 ConstantInt::getTrue(CI->getType()));
1925 if (CmpInst::isImpliedFalseByMatchingCmp(BranchPredicate,
1926 OurPredicate)) {
1927 addPredicateUsers(PI, I);
1928 return createConstantExpression(
1929 ConstantInt::getFalse(CI->getType()));
1931 } else {
1932 // Just handle the ne and eq cases, where if we have the same
1933 // operands, we may know something.
1934 if (BranchPredicate == OurPredicate) {
1935 addPredicateUsers(PI, I);
1936 // Same predicate, same ops,we know it was false, so this is false.
1937 return createConstantExpression(
1938 ConstantInt::getFalse(CI->getType()));
1939 } else if (BranchPredicate ==
1940 CmpInst::getInversePredicate(OurPredicate)) {
1941 addPredicateUsers(PI, I);
1942 // Inverse predicate, we know the other was false, so this is true.
1943 return createConstantExpression(
1944 ConstantInt::getTrue(CI->getType()));
1950 // Create expression will take care of simplifyCmpInst
1951 return createExpression(I);
1954 // Substitute and symbolize the value before value numbering.
1955 const Expression *
1956 NewGVN::performSymbolicEvaluation(Value *V,
1957 SmallPtrSetImpl<Value *> &Visited) const {
1958 const Expression *E = nullptr;
1959 if (auto *C = dyn_cast<Constant>(V))
1960 E = createConstantExpression(C);
1961 else if (isa<Argument>(V) || isa<GlobalVariable>(V)) {
1962 E = createVariableExpression(V);
1963 } else {
1964 // TODO: memory intrinsics.
1965 // TODO: Some day, we should do the forward propagation and reassociation
1966 // parts of the algorithm.
1967 auto *I = cast<Instruction>(V);
1968 switch (I->getOpcode()) {
1969 case Instruction::ExtractValue:
1970 case Instruction::InsertValue:
1971 E = performSymbolicAggrValueEvaluation(I);
1972 break;
1973 case Instruction::PHI: {
1974 SmallVector<ValPair, 3> Ops;
1975 auto *PN = cast<PHINode>(I);
1976 for (unsigned i = 0; i < PN->getNumOperands(); ++i)
1977 Ops.push_back({PN->getIncomingValue(i), PN->getIncomingBlock(i)});
1978 // Sort to ensure the invariant createPHIExpression requires is met.
1979 sortPHIOps(Ops);
1980 E = performSymbolicPHIEvaluation(Ops, I, getBlockForValue(I));
1981 } break;
1982 case Instruction::Call:
1983 E = performSymbolicCallEvaluation(I);
1984 break;
1985 case Instruction::Store:
1986 E = performSymbolicStoreEvaluation(I);
1987 break;
1988 case Instruction::Load:
1989 E = performSymbolicLoadEvaluation(I);
1990 break;
1991 case Instruction::BitCast:
1992 case Instruction::AddrSpaceCast:
1993 E = createExpression(I);
1994 break;
1995 case Instruction::ICmp:
1996 case Instruction::FCmp:
1997 E = performSymbolicCmpEvaluation(I);
1998 break;
1999 case Instruction::FNeg:
2000 case Instruction::Add:
2001 case Instruction::FAdd:
2002 case Instruction::Sub:
2003 case Instruction::FSub:
2004 case Instruction::Mul:
2005 case Instruction::FMul:
2006 case Instruction::UDiv:
2007 case Instruction::SDiv:
2008 case Instruction::FDiv:
2009 case Instruction::URem:
2010 case Instruction::SRem:
2011 case Instruction::FRem:
2012 case Instruction::Shl:
2013 case Instruction::LShr:
2014 case Instruction::AShr:
2015 case Instruction::And:
2016 case Instruction::Or:
2017 case Instruction::Xor:
2018 case Instruction::Trunc:
2019 case Instruction::ZExt:
2020 case Instruction::SExt:
2021 case Instruction::FPToUI:
2022 case Instruction::FPToSI:
2023 case Instruction::UIToFP:
2024 case Instruction::SIToFP:
2025 case Instruction::FPTrunc:
2026 case Instruction::FPExt:
2027 case Instruction::PtrToInt:
2028 case Instruction::IntToPtr:
2029 case Instruction::Select:
2030 case Instruction::ExtractElement:
2031 case Instruction::InsertElement:
2032 case Instruction::ShuffleVector:
2033 case Instruction::GetElementPtr:
2034 E = createExpression(I);
2035 break;
2036 default:
2037 return nullptr;
2040 return E;
2043 // Look up a container in a map, and then call a function for each thing in the
2044 // found container.
2045 template <typename Map, typename KeyType, typename Func>
2046 void NewGVN::for_each_found(Map &M, const KeyType &Key, Func F) {
2047 const auto Result = M.find_as(Key);
2048 if (Result != M.end())
2049 for (typename Map::mapped_type::value_type Mapped : Result->second)
2050 F(Mapped);
2053 // Look up a container of values/instructions in a map, and touch all the
2054 // instructions in the container. Then erase value from the map.
2055 template <typename Map, typename KeyType>
2056 void NewGVN::touchAndErase(Map &M, const KeyType &Key) {
2057 const auto Result = M.find_as(Key);
2058 if (Result != M.end()) {
2059 for (const typename Map::mapped_type::value_type Mapped : Result->second)
2060 TouchedInstructions.set(InstrToDFSNum(Mapped));
2061 M.erase(Result);
2065 void NewGVN::addAdditionalUsers(Value *To, Value *User) const {
2066 assert(User && To != User);
2067 if (isa<Instruction>(To))
2068 AdditionalUsers[To].insert(User);
2071 void NewGVN::markUsersTouched(Value *V) {
2072 // Now mark the users as touched.
2073 for (auto *User : V->users()) {
2074 assert(isa<Instruction>(User) && "Use of value not within an instruction?");
2075 TouchedInstructions.set(InstrToDFSNum(User));
2077 touchAndErase(AdditionalUsers, V);
2080 void NewGVN::addMemoryUsers(const MemoryAccess *To, MemoryAccess *U) const {
2081 LLVM_DEBUG(dbgs() << "Adding memory user " << *U << " to " << *To << "\n");
2082 MemoryToUsers[To].insert(U);
2085 void NewGVN::markMemoryDefTouched(const MemoryAccess *MA) {
2086 TouchedInstructions.set(MemoryToDFSNum(MA));
2089 void NewGVN::markMemoryUsersTouched(const MemoryAccess *MA) {
2090 if (isa<MemoryUse>(MA))
2091 return;
2092 for (auto U : MA->users())
2093 TouchedInstructions.set(MemoryToDFSNum(U));
2094 touchAndErase(MemoryToUsers, MA);
2097 // Add I to the set of users of a given predicate.
2098 void NewGVN::addPredicateUsers(const PredicateBase *PB, Instruction *I) const {
2099 // Don't add temporary instructions to the user lists.
2100 if (AllTempInstructions.count(I))
2101 return;
2103 if (auto *PBranch = dyn_cast<PredicateBranch>(PB))
2104 PredicateToUsers[PBranch->Condition].insert(I);
2105 else if (auto *PAssume = dyn_cast<PredicateAssume>(PB))
2106 PredicateToUsers[PAssume->Condition].insert(I);
2109 // Touch all the predicates that depend on this instruction.
2110 void NewGVN::markPredicateUsersTouched(Instruction *I) {
2111 touchAndErase(PredicateToUsers, I);
2114 // Mark users affected by a memory leader change.
2115 void NewGVN::markMemoryLeaderChangeTouched(CongruenceClass *CC) {
2116 for (auto M : CC->memory())
2117 markMemoryDefTouched(M);
2120 // Touch the instructions that need to be updated after a congruence class has a
2121 // leader change, and mark changed values.
2122 void NewGVN::markValueLeaderChangeTouched(CongruenceClass *CC) {
2123 for (auto M : *CC) {
2124 if (auto *I = dyn_cast<Instruction>(M))
2125 TouchedInstructions.set(InstrToDFSNum(I));
2126 LeaderChanges.insert(M);
2130 // Give a range of things that have instruction DFS numbers, this will return
2131 // the member of the range with the smallest dfs number.
2132 template <class T, class Range>
2133 T *NewGVN::getMinDFSOfRange(const Range &R) const {
2134 std::pair<T *, unsigned> MinDFS = {nullptr, ~0U};
2135 for (const auto X : R) {
2136 auto DFSNum = InstrToDFSNum(X);
2137 if (DFSNum < MinDFS.second)
2138 MinDFS = {X, DFSNum};
2140 return MinDFS.first;
2143 // This function returns the MemoryAccess that should be the next leader of
2144 // congruence class CC, under the assumption that the current leader is going to
2145 // disappear.
2146 const MemoryAccess *NewGVN::getNextMemoryLeader(CongruenceClass *CC) const {
2147 // TODO: If this ends up to slow, we can maintain a next memory leader like we
2148 // do for regular leaders.
2149 // Make sure there will be a leader to find.
2150 assert(!CC->definesNoMemory() && "Can't get next leader if there is none");
2151 if (CC->getStoreCount() > 0) {
2152 if (auto *NL = dyn_cast_or_null<StoreInst>(CC->getNextLeader().first))
2153 return getMemoryAccess(NL);
2154 // Find the store with the minimum DFS number.
2155 auto *V = getMinDFSOfRange<Value>(make_filter_range(
2156 *CC, [&](const Value *V) { return isa<StoreInst>(V); }));
2157 return getMemoryAccess(cast<StoreInst>(V));
2159 assert(CC->getStoreCount() == 0);
2161 // Given our assertion, hitting this part must mean
2162 // !OldClass->memory_empty()
2163 if (CC->memory_size() == 1)
2164 return *CC->memory_begin();
2165 return getMinDFSOfRange<const MemoryPhi>(CC->memory());
2168 // This function returns the next value leader of a congruence class, under the
2169 // assumption that the current leader is going away. This should end up being
2170 // the next most dominating member.
2171 Value *NewGVN::getNextValueLeader(CongruenceClass *CC) const {
2172 // We don't need to sort members if there is only 1, and we don't care about
2173 // sorting the TOP class because everything either gets out of it or is
2174 // unreachable.
2176 if (CC->size() == 1 || CC == TOPClass) {
2177 return *(CC->begin());
2178 } else if (CC->getNextLeader().first) {
2179 ++NumGVNAvoidedSortedLeaderChanges;
2180 return CC->getNextLeader().first;
2181 } else {
2182 ++NumGVNSortedLeaderChanges;
2183 // NOTE: If this ends up to slow, we can maintain a dual structure for
2184 // member testing/insertion, or keep things mostly sorted, and sort only
2185 // here, or use SparseBitVector or ....
2186 return getMinDFSOfRange<Value>(*CC);
2190 // Move a MemoryAccess, currently in OldClass, to NewClass, including updates to
2191 // the memory members, etc for the move.
2193 // The invariants of this function are:
2195 // - I must be moving to NewClass from OldClass
2196 // - The StoreCount of OldClass and NewClass is expected to have been updated
2197 // for I already if it is a store.
2198 // - The OldClass memory leader has not been updated yet if I was the leader.
2199 void NewGVN::moveMemoryToNewCongruenceClass(Instruction *I,
2200 MemoryAccess *InstMA,
2201 CongruenceClass *OldClass,
2202 CongruenceClass *NewClass) {
2203 // If the leader is I, and we had a representative MemoryAccess, it should
2204 // be the MemoryAccess of OldClass.
2205 assert((!InstMA || !OldClass->getMemoryLeader() ||
2206 OldClass->getLeader() != I ||
2207 MemoryAccessToClass.lookup(OldClass->getMemoryLeader()) ==
2208 MemoryAccessToClass.lookup(InstMA)) &&
2209 "Representative MemoryAccess mismatch");
2210 // First, see what happens to the new class
2211 if (!NewClass->getMemoryLeader()) {
2212 // Should be a new class, or a store becoming a leader of a new class.
2213 assert(NewClass->size() == 1 ||
2214 (isa<StoreInst>(I) && NewClass->getStoreCount() == 1));
2215 NewClass->setMemoryLeader(InstMA);
2216 // Mark it touched if we didn't just create a singleton
2217 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2218 << NewClass->getID()
2219 << " due to new memory instruction becoming leader\n");
2220 markMemoryLeaderChangeTouched(NewClass);
2222 setMemoryClass(InstMA, NewClass);
2223 // Now, fixup the old class if necessary
2224 if (OldClass->getMemoryLeader() == InstMA) {
2225 if (!OldClass->definesNoMemory()) {
2226 OldClass->setMemoryLeader(getNextMemoryLeader(OldClass));
2227 LLVM_DEBUG(dbgs() << "Memory class leader change for class "
2228 << OldClass->getID() << " to "
2229 << *OldClass->getMemoryLeader()
2230 << " due to removal of old leader " << *InstMA << "\n");
2231 markMemoryLeaderChangeTouched(OldClass);
2232 } else
2233 OldClass->setMemoryLeader(nullptr);
2237 // Move a value, currently in OldClass, to be part of NewClass
2238 // Update OldClass and NewClass for the move (including changing leaders, etc).
2239 void NewGVN::moveValueToNewCongruenceClass(Instruction *I, const Expression *E,
2240 CongruenceClass *OldClass,
2241 CongruenceClass *NewClass) {
2242 if (I == OldClass->getNextLeader().first)
2243 OldClass->resetNextLeader();
2245 OldClass->erase(I);
2246 NewClass->insert(I);
2248 if (NewClass->getLeader() != I)
2249 NewClass->addPossibleNextLeader({I, InstrToDFSNum(I)});
2250 // Handle our special casing of stores.
2251 if (auto *SI = dyn_cast<StoreInst>(I)) {
2252 OldClass->decStoreCount();
2253 // Okay, so when do we want to make a store a leader of a class?
2254 // If we have a store defined by an earlier load, we want the earlier load
2255 // to lead the class.
2256 // If we have a store defined by something else, we want the store to lead
2257 // the class so everything else gets the "something else" as a value.
2258 // If we have a store as the single member of the class, we want the store
2259 // as the leader
2260 if (NewClass->getStoreCount() == 0 && !NewClass->getStoredValue()) {
2261 // If it's a store expression we are using, it means we are not equivalent
2262 // to something earlier.
2263 if (auto *SE = dyn_cast<StoreExpression>(E)) {
2264 NewClass->setStoredValue(SE->getStoredValue());
2265 markValueLeaderChangeTouched(NewClass);
2266 // Shift the new class leader to be the store
2267 LLVM_DEBUG(dbgs() << "Changing leader of congruence class "
2268 << NewClass->getID() << " from "
2269 << *NewClass->getLeader() << " to " << *SI
2270 << " because store joined class\n");
2271 // If we changed the leader, we have to mark it changed because we don't
2272 // know what it will do to symbolic evaluation.
2273 NewClass->setLeader(SI);
2275 // We rely on the code below handling the MemoryAccess change.
2277 NewClass->incStoreCount();
2279 // True if there is no memory instructions left in a class that had memory
2280 // instructions before.
2282 // If it's not a memory use, set the MemoryAccess equivalence
2283 auto *InstMA = dyn_cast_or_null<MemoryDef>(getMemoryAccess(I));
2284 if (InstMA)
2285 moveMemoryToNewCongruenceClass(I, InstMA, OldClass, NewClass);
2286 ValueToClass[I] = NewClass;
2287 // See if we destroyed the class or need to swap leaders.
2288 if (OldClass->empty() && OldClass != TOPClass) {
2289 if (OldClass->getDefiningExpr()) {
2290 LLVM_DEBUG(dbgs() << "Erasing expression " << *OldClass->getDefiningExpr()
2291 << " from table\n");
2292 // We erase it as an exact expression to make sure we don't just erase an
2293 // equivalent one.
2294 auto Iter = ExpressionToClass.find_as(
2295 ExactEqualsExpression(*OldClass->getDefiningExpr()));
2296 if (Iter != ExpressionToClass.end())
2297 ExpressionToClass.erase(Iter);
2298 #ifdef EXPENSIVE_CHECKS
2299 assert(
2300 (*OldClass->getDefiningExpr() != *E || ExpressionToClass.lookup(E)) &&
2301 "We erased the expression we just inserted, which should not happen");
2302 #endif
2304 } else if (OldClass->getLeader() == I) {
2305 // When the leader changes, the value numbering of
2306 // everything may change due to symbolization changes, so we need to
2307 // reprocess.
2308 LLVM_DEBUG(dbgs() << "Value class leader change for class "
2309 << OldClass->getID() << "\n");
2310 ++NumGVNLeaderChanges;
2311 // Destroy the stored value if there are no more stores to represent it.
2312 // Note that this is basically clean up for the expression removal that
2313 // happens below. If we remove stores from a class, we may leave it as a
2314 // class of equivalent memory phis.
2315 if (OldClass->getStoreCount() == 0) {
2316 if (OldClass->getStoredValue())
2317 OldClass->setStoredValue(nullptr);
2319 OldClass->setLeader(getNextValueLeader(OldClass));
2320 OldClass->resetNextLeader();
2321 markValueLeaderChangeTouched(OldClass);
2325 // For a given expression, mark the phi of ops instructions that could have
2326 // changed as a result.
2327 void NewGVN::markPhiOfOpsChanged(const Expression *E) {
2328 touchAndErase(ExpressionToPhiOfOps, E);
2331 // Perform congruence finding on a given value numbering expression.
2332 void NewGVN::performCongruenceFinding(Instruction *I, const Expression *E) {
2333 // This is guaranteed to return something, since it will at least find
2334 // TOP.
2336 CongruenceClass *IClass = ValueToClass.lookup(I);
2337 assert(IClass && "Should have found a IClass");
2338 // Dead classes should have been eliminated from the mapping.
2339 assert(!IClass->isDead() && "Found a dead class");
2341 CongruenceClass *EClass = nullptr;
2342 if (const auto *VE = dyn_cast<VariableExpression>(E)) {
2343 EClass = ValueToClass.lookup(VE->getVariableValue());
2344 } else if (isa<DeadExpression>(E)) {
2345 EClass = TOPClass;
2347 if (!EClass) {
2348 auto lookupResult = ExpressionToClass.insert({E, nullptr});
2350 // If it's not in the value table, create a new congruence class.
2351 if (lookupResult.second) {
2352 CongruenceClass *NewClass = createCongruenceClass(nullptr, E);
2353 auto place = lookupResult.first;
2354 place->second = NewClass;
2356 // Constants and variables should always be made the leader.
2357 if (const auto *CE = dyn_cast<ConstantExpression>(E)) {
2358 NewClass->setLeader(CE->getConstantValue());
2359 } else if (const auto *SE = dyn_cast<StoreExpression>(E)) {
2360 StoreInst *SI = SE->getStoreInst();
2361 NewClass->setLeader(SI);
2362 NewClass->setStoredValue(SE->getStoredValue());
2363 // The RepMemoryAccess field will be filled in properly by the
2364 // moveValueToNewCongruenceClass call.
2365 } else {
2366 NewClass->setLeader(I);
2368 assert(!isa<VariableExpression>(E) &&
2369 "VariableExpression should have been handled already");
2371 EClass = NewClass;
2372 LLVM_DEBUG(dbgs() << "Created new congruence class for " << *I
2373 << " using expression " << *E << " at "
2374 << NewClass->getID() << " and leader "
2375 << *(NewClass->getLeader()));
2376 if (NewClass->getStoredValue())
2377 LLVM_DEBUG(dbgs() << " and stored value "
2378 << *(NewClass->getStoredValue()));
2379 LLVM_DEBUG(dbgs() << "\n");
2380 } else {
2381 EClass = lookupResult.first->second;
2382 if (isa<ConstantExpression>(E))
2383 assert((isa<Constant>(EClass->getLeader()) ||
2384 (EClass->getStoredValue() &&
2385 isa<Constant>(EClass->getStoredValue()))) &&
2386 "Any class with a constant expression should have a "
2387 "constant leader");
2389 assert(EClass && "Somehow don't have an eclass");
2391 assert(!EClass->isDead() && "We accidentally looked up a dead class");
2394 bool ClassChanged = IClass != EClass;
2395 bool LeaderChanged = LeaderChanges.erase(I);
2396 if (ClassChanged || LeaderChanged) {
2397 LLVM_DEBUG(dbgs() << "New class " << EClass->getID() << " for expression "
2398 << *E << "\n");
2399 if (ClassChanged) {
2400 moveValueToNewCongruenceClass(I, E, IClass, EClass);
2401 markPhiOfOpsChanged(E);
2404 markUsersTouched(I);
2405 if (MemoryAccess *MA = getMemoryAccess(I))
2406 markMemoryUsersTouched(MA);
2407 if (auto *CI = dyn_cast<CmpInst>(I))
2408 markPredicateUsersTouched(CI);
2410 // If we changed the class of the store, we want to ensure nothing finds the
2411 // old store expression. In particular, loads do not compare against stored
2412 // value, so they will find old store expressions (and associated class
2413 // mappings) if we leave them in the table.
2414 if (ClassChanged && isa<StoreInst>(I)) {
2415 auto *OldE = ValueToExpression.lookup(I);
2416 // It could just be that the old class died. We don't want to erase it if we
2417 // just moved classes.
2418 if (OldE && isa<StoreExpression>(OldE) && *E != *OldE) {
2419 // Erase this as an exact expression to ensure we don't erase expressions
2420 // equivalent to it.
2421 auto Iter = ExpressionToClass.find_as(ExactEqualsExpression(*OldE));
2422 if (Iter != ExpressionToClass.end())
2423 ExpressionToClass.erase(Iter);
2426 ValueToExpression[I] = E;
2429 // Process the fact that Edge (from, to) is reachable, including marking
2430 // any newly reachable blocks and instructions for processing.
2431 void NewGVN::updateReachableEdge(BasicBlock *From, BasicBlock *To) {
2432 // Check if the Edge was reachable before.
2433 if (ReachableEdges.insert({From, To}).second) {
2434 // If this block wasn't reachable before, all instructions are touched.
2435 if (ReachableBlocks.insert(To).second) {
2436 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
2437 << " marked reachable\n");
2438 const auto &InstRange = BlockInstRange.lookup(To);
2439 TouchedInstructions.set(InstRange.first, InstRange.second);
2440 } else {
2441 LLVM_DEBUG(dbgs() << "Block " << getBlockName(To)
2442 << " was reachable, but new edge {"
2443 << getBlockName(From) << "," << getBlockName(To)
2444 << "} to it found\n");
2446 // We've made an edge reachable to an existing block, which may
2447 // impact predicates. Otherwise, only mark the phi nodes as touched, as
2448 // they are the only thing that depend on new edges. Anything using their
2449 // values will get propagated to if necessary.
2450 if (MemoryAccess *MemPhi = getMemoryAccess(To))
2451 TouchedInstructions.set(InstrToDFSNum(MemPhi));
2453 // FIXME: We should just add a union op on a Bitvector and
2454 // SparseBitVector. We can do it word by word faster than we are doing it
2455 // here.
2456 for (auto InstNum : RevisitOnReachabilityChange[To])
2457 TouchedInstructions.set(InstNum);
2462 // Given a predicate condition (from a switch, cmp, or whatever) and a block,
2463 // see if we know some constant value for it already.
2464 Value *NewGVN::findConditionEquivalence(Value *Cond) const {
2465 auto Result = lookupOperandLeader(Cond);
2466 return isa<Constant>(Result) ? Result : nullptr;
2469 // Process the outgoing edges of a block for reachability.
2470 void NewGVN::processOutgoingEdges(Instruction *TI, BasicBlock *B) {
2471 // Evaluate reachability of terminator instruction.
2472 Value *Cond;
2473 BasicBlock *TrueSucc, *FalseSucc;
2474 if (match(TI, m_Br(m_Value(Cond), TrueSucc, FalseSucc))) {
2475 Value *CondEvaluated = findConditionEquivalence(Cond);
2476 if (!CondEvaluated) {
2477 if (auto *I = dyn_cast<Instruction>(Cond)) {
2478 const Expression *E = createExpression(I);
2479 if (const auto *CE = dyn_cast<ConstantExpression>(E)) {
2480 CondEvaluated = CE->getConstantValue();
2482 } else if (isa<ConstantInt>(Cond)) {
2483 CondEvaluated = Cond;
2486 ConstantInt *CI;
2487 if (CondEvaluated && (CI = dyn_cast<ConstantInt>(CondEvaluated))) {
2488 if (CI->isOne()) {
2489 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2490 << " evaluated to true\n");
2491 updateReachableEdge(B, TrueSucc);
2492 } else if (CI->isZero()) {
2493 LLVM_DEBUG(dbgs() << "Condition for Terminator " << *TI
2494 << " evaluated to false\n");
2495 updateReachableEdge(B, FalseSucc);
2497 } else {
2498 updateReachableEdge(B, TrueSucc);
2499 updateReachableEdge(B, FalseSucc);
2501 } else if (auto *SI = dyn_cast<SwitchInst>(TI)) {
2502 // For switches, propagate the case values into the case
2503 // destinations.
2505 Value *SwitchCond = SI->getCondition();
2506 Value *CondEvaluated = findConditionEquivalence(SwitchCond);
2507 // See if we were able to turn this switch statement into a constant.
2508 if (CondEvaluated && isa<ConstantInt>(CondEvaluated)) {
2509 auto *CondVal = cast<ConstantInt>(CondEvaluated);
2510 // We should be able to get case value for this.
2511 auto Case = *SI->findCaseValue(CondVal);
2512 if (Case.getCaseSuccessor() == SI->getDefaultDest()) {
2513 // We proved the value is outside of the range of the case.
2514 // We can't do anything other than mark the default dest as reachable,
2515 // and go home.
2516 updateReachableEdge(B, SI->getDefaultDest());
2517 return;
2519 // Now get where it goes and mark it reachable.
2520 BasicBlock *TargetBlock = Case.getCaseSuccessor();
2521 updateReachableEdge(B, TargetBlock);
2522 } else {
2523 for (unsigned i = 0, e = SI->getNumSuccessors(); i != e; ++i) {
2524 BasicBlock *TargetBlock = SI->getSuccessor(i);
2525 updateReachableEdge(B, TargetBlock);
2528 } else {
2529 // Otherwise this is either unconditional, or a type we have no
2530 // idea about. Just mark successors as reachable.
2531 for (unsigned i = 0, e = TI->getNumSuccessors(); i != e; ++i) {
2532 BasicBlock *TargetBlock = TI->getSuccessor(i);
2533 updateReachableEdge(B, TargetBlock);
2536 // This also may be a memory defining terminator, in which case, set it
2537 // equivalent only to itself.
2539 auto *MA = getMemoryAccess(TI);
2540 if (MA && !isa<MemoryUse>(MA)) {
2541 auto *CC = ensureLeaderOfMemoryClass(MA);
2542 if (setMemoryClass(MA, CC))
2543 markMemoryUsersTouched(MA);
2548 // Remove the PHI of Ops PHI for I
2549 void NewGVN::removePhiOfOps(Instruction *I, PHINode *PHITemp) {
2550 InstrDFS.erase(PHITemp);
2551 // It's still a temp instruction. We keep it in the array so it gets erased.
2552 // However, it's no longer used by I, or in the block
2553 TempToBlock.erase(PHITemp);
2554 RealToTemp.erase(I);
2555 // We don't remove the users from the phi node uses. This wastes a little
2556 // time, but such is life. We could use two sets to track which were there
2557 // are the start of NewGVN, and which were added, but right nowt he cost of
2558 // tracking is more than the cost of checking for more phi of ops.
2561 // Add PHI Op in BB as a PHI of operations version of ExistingValue.
2562 void NewGVN::addPhiOfOps(PHINode *Op, BasicBlock *BB,
2563 Instruction *ExistingValue) {
2564 InstrDFS[Op] = InstrToDFSNum(ExistingValue);
2565 AllTempInstructions.insert(Op);
2566 TempToBlock[Op] = BB;
2567 RealToTemp[ExistingValue] = Op;
2568 // Add all users to phi node use, as they are now uses of the phi of ops phis
2569 // and may themselves be phi of ops.
2570 for (auto *U : ExistingValue->users())
2571 if (auto *UI = dyn_cast<Instruction>(U))
2572 PHINodeUses.insert(UI);
2575 static bool okayForPHIOfOps(const Instruction *I) {
2576 if (!EnablePhiOfOps)
2577 return false;
2578 return isa<BinaryOperator>(I) || isa<SelectInst>(I) || isa<CmpInst>(I) ||
2579 isa<LoadInst>(I);
2582 bool NewGVN::OpIsSafeForPHIOfOpsHelper(
2583 Value *V, const BasicBlock *PHIBlock,
2584 SmallPtrSetImpl<const Value *> &Visited,
2585 SmallVectorImpl<Instruction *> &Worklist) {
2587 if (!isa<Instruction>(V))
2588 return true;
2589 auto OISIt = OpSafeForPHIOfOps.find(V);
2590 if (OISIt != OpSafeForPHIOfOps.end())
2591 return OISIt->second;
2593 // Keep walking until we either dominate the phi block, or hit a phi, or run
2594 // out of things to check.
2595 if (DT->properlyDominates(getBlockForValue(V), PHIBlock)) {
2596 OpSafeForPHIOfOps.insert({V, true});
2597 return true;
2599 // PHI in the same block.
2600 if (isa<PHINode>(V) && getBlockForValue(V) == PHIBlock) {
2601 OpSafeForPHIOfOps.insert({V, false});
2602 return false;
2605 auto *OrigI = cast<Instruction>(V);
2606 for (auto *Op : OrigI->operand_values()) {
2607 if (!isa<Instruction>(Op))
2608 continue;
2609 // Stop now if we find an unsafe operand.
2610 auto OISIt = OpSafeForPHIOfOps.find(OrigI);
2611 if (OISIt != OpSafeForPHIOfOps.end()) {
2612 if (!OISIt->second) {
2613 OpSafeForPHIOfOps.insert({V, false});
2614 return false;
2616 continue;
2618 if (!Visited.insert(Op).second)
2619 continue;
2620 Worklist.push_back(cast<Instruction>(Op));
2622 return true;
2625 // Return true if this operand will be safe to use for phi of ops.
2627 // The reason some operands are unsafe is that we are not trying to recursively
2628 // translate everything back through phi nodes. We actually expect some lookups
2629 // of expressions to fail. In particular, a lookup where the expression cannot
2630 // exist in the predecessor. This is true even if the expression, as shown, can
2631 // be determined to be constant.
2632 bool NewGVN::OpIsSafeForPHIOfOps(Value *V, const BasicBlock *PHIBlock,
2633 SmallPtrSetImpl<const Value *> &Visited) {
2634 SmallVector<Instruction *, 4> Worklist;
2635 if (!OpIsSafeForPHIOfOpsHelper(V, PHIBlock, Visited, Worklist))
2636 return false;
2637 while (!Worklist.empty()) {
2638 auto *I = Worklist.pop_back_val();
2639 if (!OpIsSafeForPHIOfOpsHelper(I, PHIBlock, Visited, Worklist))
2640 return false;
2642 OpSafeForPHIOfOps.insert({V, true});
2643 return true;
2646 // Try to find a leader for instruction TransInst, which is a phi translated
2647 // version of something in our original program. Visited is used to ensure we
2648 // don't infinite loop during translations of cycles. OrigInst is the
2649 // instruction in the original program, and PredBB is the predecessor we
2650 // translated it through.
2651 Value *NewGVN::findLeaderForInst(Instruction *TransInst,
2652 SmallPtrSetImpl<Value *> &Visited,
2653 MemoryAccess *MemAccess, Instruction *OrigInst,
2654 BasicBlock *PredBB) {
2655 unsigned IDFSNum = InstrToDFSNum(OrigInst);
2656 // Make sure it's marked as a temporary instruction.
2657 AllTempInstructions.insert(TransInst);
2658 // and make sure anything that tries to add it's DFS number is
2659 // redirected to the instruction we are making a phi of ops
2660 // for.
2661 TempToBlock.insert({TransInst, PredBB});
2662 InstrDFS.insert({TransInst, IDFSNum});
2664 const Expression *E = performSymbolicEvaluation(TransInst, Visited);
2665 InstrDFS.erase(TransInst);
2666 AllTempInstructions.erase(TransInst);
2667 TempToBlock.erase(TransInst);
2668 if (MemAccess)
2669 TempToMemory.erase(TransInst);
2670 if (!E)
2671 return nullptr;
2672 auto *FoundVal = findPHIOfOpsLeader(E, OrigInst, PredBB);
2673 if (!FoundVal) {
2674 ExpressionToPhiOfOps[E].insert(OrigInst);
2675 LLVM_DEBUG(dbgs() << "Cannot find phi of ops operand for " << *TransInst
2676 << " in block " << getBlockName(PredBB) << "\n");
2677 return nullptr;
2679 if (auto *SI = dyn_cast<StoreInst>(FoundVal))
2680 FoundVal = SI->getValueOperand();
2681 return FoundVal;
2684 // When we see an instruction that is an op of phis, generate the equivalent phi
2685 // of ops form.
2686 const Expression *
2687 NewGVN::makePossiblePHIOfOps(Instruction *I,
2688 SmallPtrSetImpl<Value *> &Visited) {
2689 if (!okayForPHIOfOps(I))
2690 return nullptr;
2692 if (!Visited.insert(I).second)
2693 return nullptr;
2694 // For now, we require the instruction be cycle free because we don't
2695 // *always* create a phi of ops for instructions that could be done as phi
2696 // of ops, we only do it if we think it is useful. If we did do it all the
2697 // time, we could remove the cycle free check.
2698 if (!isCycleFree(I))
2699 return nullptr;
2701 SmallPtrSet<const Value *, 8> ProcessedPHIs;
2702 // TODO: We don't do phi translation on memory accesses because it's
2703 // complicated. For a load, we'd need to be able to simulate a new memoryuse,
2704 // which we don't have a good way of doing ATM.
2705 auto *MemAccess = getMemoryAccess(I);
2706 // If the memory operation is defined by a memory operation this block that
2707 // isn't a MemoryPhi, transforming the pointer backwards through a scalar phi
2708 // can't help, as it would still be killed by that memory operation.
2709 if (MemAccess && !isa<MemoryPhi>(MemAccess->getDefiningAccess()) &&
2710 MemAccess->getDefiningAccess()->getBlock() == I->getParent())
2711 return nullptr;
2713 // Convert op of phis to phi of ops
2714 SmallPtrSet<const Value *, 10> VisitedOps;
2715 SmallVector<Value *, 4> Ops(I->operand_values());
2716 BasicBlock *SamePHIBlock = nullptr;
2717 PHINode *OpPHI = nullptr;
2718 if (!DebugCounter::shouldExecute(PHIOfOpsCounter))
2719 return nullptr;
2720 for (auto *Op : Ops) {
2721 if (!isa<PHINode>(Op)) {
2722 auto *ValuePHI = RealToTemp.lookup(Op);
2723 if (!ValuePHI)
2724 continue;
2725 LLVM_DEBUG(dbgs() << "Found possible dependent phi of ops\n");
2726 Op = ValuePHI;
2728 OpPHI = cast<PHINode>(Op);
2729 if (!SamePHIBlock) {
2730 SamePHIBlock = getBlockForValue(OpPHI);
2731 } else if (SamePHIBlock != getBlockForValue(OpPHI)) {
2732 LLVM_DEBUG(
2733 dbgs()
2734 << "PHIs for operands are not all in the same block, aborting\n");
2735 return nullptr;
2737 // No point in doing this for one-operand phis.
2738 if (OpPHI->getNumOperands() == 1) {
2739 OpPHI = nullptr;
2740 continue;
2744 if (!OpPHI)
2745 return nullptr;
2747 SmallVector<ValPair, 4> PHIOps;
2748 SmallPtrSet<Value *, 4> Deps;
2749 auto *PHIBlock = getBlockForValue(OpPHI);
2750 RevisitOnReachabilityChange[PHIBlock].reset(InstrToDFSNum(I));
2751 for (unsigned PredNum = 0; PredNum < OpPHI->getNumOperands(); ++PredNum) {
2752 auto *PredBB = OpPHI->getIncomingBlock(PredNum);
2753 Value *FoundVal = nullptr;
2754 SmallPtrSet<Value *, 4> CurrentDeps;
2755 // We could just skip unreachable edges entirely but it's tricky to do
2756 // with rewriting existing phi nodes.
2757 if (ReachableEdges.count({PredBB, PHIBlock})) {
2758 // Clone the instruction, create an expression from it that is
2759 // translated back into the predecessor, and see if we have a leader.
2760 Instruction *ValueOp = I->clone();
2761 if (MemAccess)
2762 TempToMemory.insert({ValueOp, MemAccess});
2763 bool SafeForPHIOfOps = true;
2764 VisitedOps.clear();
2765 for (auto &Op : ValueOp->operands()) {
2766 auto *OrigOp = &*Op;
2767 // When these operand changes, it could change whether there is a
2768 // leader for us or not, so we have to add additional users.
2769 if (isa<PHINode>(Op)) {
2770 Op = Op->DoPHITranslation(PHIBlock, PredBB);
2771 if (Op != OrigOp && Op != I)
2772 CurrentDeps.insert(Op);
2773 } else if (auto *ValuePHI = RealToTemp.lookup(Op)) {
2774 if (getBlockForValue(ValuePHI) == PHIBlock)
2775 Op = ValuePHI->getIncomingValueForBlock(PredBB);
2777 // If we phi-translated the op, it must be safe.
2778 SafeForPHIOfOps =
2779 SafeForPHIOfOps &&
2780 (Op != OrigOp || OpIsSafeForPHIOfOps(Op, PHIBlock, VisitedOps));
2782 // FIXME: For those things that are not safe we could generate
2783 // expressions all the way down, and see if this comes out to a
2784 // constant. For anything where that is true, and unsafe, we should
2785 // have made a phi-of-ops (or value numbered it equivalent to something)
2786 // for the pieces already.
2787 FoundVal = !SafeForPHIOfOps ? nullptr
2788 : findLeaderForInst(ValueOp, Visited,
2789 MemAccess, I, PredBB);
2790 ValueOp->deleteValue();
2791 if (!FoundVal) {
2792 // We failed to find a leader for the current ValueOp, but this might
2793 // change in case of the translated operands change.
2794 if (SafeForPHIOfOps)
2795 for (auto Dep : CurrentDeps)
2796 addAdditionalUsers(Dep, I);
2798 return nullptr;
2800 Deps.insert(CurrentDeps.begin(), CurrentDeps.end());
2801 } else {
2802 LLVM_DEBUG(dbgs() << "Skipping phi of ops operand for incoming block "
2803 << getBlockName(PredBB)
2804 << " because the block is unreachable\n");
2805 FoundVal = UndefValue::get(I->getType());
2806 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
2809 PHIOps.push_back({FoundVal, PredBB});
2810 LLVM_DEBUG(dbgs() << "Found phi of ops operand " << *FoundVal << " in "
2811 << getBlockName(PredBB) << "\n");
2813 for (auto Dep : Deps)
2814 addAdditionalUsers(Dep, I);
2815 sortPHIOps(PHIOps);
2816 auto *E = performSymbolicPHIEvaluation(PHIOps, I, PHIBlock);
2817 if (isa<ConstantExpression>(E) || isa<VariableExpression>(E)) {
2818 LLVM_DEBUG(
2819 dbgs()
2820 << "Not creating real PHI of ops because it simplified to existing "
2821 "value or constant\n");
2822 return E;
2824 auto *ValuePHI = RealToTemp.lookup(I);
2825 bool NewPHI = false;
2826 if (!ValuePHI) {
2827 ValuePHI =
2828 PHINode::Create(I->getType(), OpPHI->getNumOperands(), "phiofops");
2829 addPhiOfOps(ValuePHI, PHIBlock, I);
2830 NewPHI = true;
2831 NumGVNPHIOfOpsCreated++;
2833 if (NewPHI) {
2834 for (auto PHIOp : PHIOps)
2835 ValuePHI->addIncoming(PHIOp.first, PHIOp.second);
2836 } else {
2837 TempToBlock[ValuePHI] = PHIBlock;
2838 unsigned int i = 0;
2839 for (auto PHIOp : PHIOps) {
2840 ValuePHI->setIncomingValue(i, PHIOp.first);
2841 ValuePHI->setIncomingBlock(i, PHIOp.second);
2842 ++i;
2845 RevisitOnReachabilityChange[PHIBlock].set(InstrToDFSNum(I));
2846 LLVM_DEBUG(dbgs() << "Created phi of ops " << *ValuePHI << " for " << *I
2847 << "\n");
2849 return E;
2852 // The algorithm initially places the values of the routine in the TOP
2853 // congruence class. The leader of TOP is the undetermined value `undef`.
2854 // When the algorithm has finished, values still in TOP are unreachable.
2855 void NewGVN::initializeCongruenceClasses(Function &F) {
2856 NextCongruenceNum = 0;
2858 // Note that even though we use the live on entry def as a representative
2859 // MemoryAccess, it is *not* the same as the actual live on entry def. We
2860 // have no real equivalemnt to undef for MemoryAccesses, and so we really
2861 // should be checking whether the MemoryAccess is top if we want to know if it
2862 // is equivalent to everything. Otherwise, what this really signifies is that
2863 // the access "it reaches all the way back to the beginning of the function"
2865 // Initialize all other instructions to be in TOP class.
2866 TOPClass = createCongruenceClass(nullptr, nullptr);
2867 TOPClass->setMemoryLeader(MSSA->getLiveOnEntryDef());
2868 // The live on entry def gets put into it's own class
2869 MemoryAccessToClass[MSSA->getLiveOnEntryDef()] =
2870 createMemoryClass(MSSA->getLiveOnEntryDef());
2872 for (auto DTN : nodes(DT)) {
2873 BasicBlock *BB = DTN->getBlock();
2874 // All MemoryAccesses are equivalent to live on entry to start. They must
2875 // be initialized to something so that initial changes are noticed. For
2876 // the maximal answer, we initialize them all to be the same as
2877 // liveOnEntry.
2878 auto *MemoryBlockDefs = MSSA->getBlockDefs(BB);
2879 if (MemoryBlockDefs)
2880 for (const auto &Def : *MemoryBlockDefs) {
2881 MemoryAccessToClass[&Def] = TOPClass;
2882 auto *MD = dyn_cast<MemoryDef>(&Def);
2883 // Insert the memory phis into the member list.
2884 if (!MD) {
2885 const MemoryPhi *MP = cast<MemoryPhi>(&Def);
2886 TOPClass->memory_insert(MP);
2887 MemoryPhiState.insert({MP, MPS_TOP});
2890 if (MD && isa<StoreInst>(MD->getMemoryInst()))
2891 TOPClass->incStoreCount();
2894 // FIXME: This is trying to discover which instructions are uses of phi
2895 // nodes. We should move this into one of the myriad of places that walk
2896 // all the operands already.
2897 for (auto &I : *BB) {
2898 if (isa<PHINode>(&I))
2899 for (auto *U : I.users())
2900 if (auto *UInst = dyn_cast<Instruction>(U))
2901 if (InstrToDFSNum(UInst) != 0 && okayForPHIOfOps(UInst))
2902 PHINodeUses.insert(UInst);
2903 // Don't insert void terminators into the class. We don't value number
2904 // them, and they just end up sitting in TOP.
2905 if (I.isTerminator() && I.getType()->isVoidTy())
2906 continue;
2907 TOPClass->insert(&I);
2908 ValueToClass[&I] = TOPClass;
2912 // Initialize arguments to be in their own unique congruence classes
2913 for (auto &FA : F.args())
2914 createSingletonCongruenceClass(&FA);
2917 void NewGVN::cleanupTables() {
2918 for (unsigned i = 0, e = CongruenceClasses.size(); i != e; ++i) {
2919 LLVM_DEBUG(dbgs() << "Congruence class " << CongruenceClasses[i]->getID()
2920 << " has " << CongruenceClasses[i]->size()
2921 << " members\n");
2922 // Make sure we delete the congruence class (probably worth switching to
2923 // a unique_ptr at some point.
2924 delete CongruenceClasses[i];
2925 CongruenceClasses[i] = nullptr;
2928 // Destroy the value expressions
2929 SmallVector<Instruction *, 8> TempInst(AllTempInstructions.begin(),
2930 AllTempInstructions.end());
2931 AllTempInstructions.clear();
2933 // We have to drop all references for everything first, so there are no uses
2934 // left as we delete them.
2935 for (auto *I : TempInst) {
2936 I->dropAllReferences();
2939 while (!TempInst.empty()) {
2940 auto *I = TempInst.back();
2941 TempInst.pop_back();
2942 I->deleteValue();
2945 ValueToClass.clear();
2946 ArgRecycler.clear(ExpressionAllocator);
2947 ExpressionAllocator.Reset();
2948 CongruenceClasses.clear();
2949 ExpressionToClass.clear();
2950 ValueToExpression.clear();
2951 RealToTemp.clear();
2952 AdditionalUsers.clear();
2953 ExpressionToPhiOfOps.clear();
2954 TempToBlock.clear();
2955 TempToMemory.clear();
2956 PHINodeUses.clear();
2957 OpSafeForPHIOfOps.clear();
2958 ReachableBlocks.clear();
2959 ReachableEdges.clear();
2960 #ifndef NDEBUG
2961 ProcessedCount.clear();
2962 #endif
2963 InstrDFS.clear();
2964 InstructionsToErase.clear();
2965 DFSToInstr.clear();
2966 BlockInstRange.clear();
2967 TouchedInstructions.clear();
2968 MemoryAccessToClass.clear();
2969 PredicateToUsers.clear();
2970 MemoryToUsers.clear();
2971 RevisitOnReachabilityChange.clear();
2974 // Assign local DFS number mapping to instructions, and leave space for Value
2975 // PHI's.
2976 std::pair<unsigned, unsigned> NewGVN::assignDFSNumbers(BasicBlock *B,
2977 unsigned Start) {
2978 unsigned End = Start;
2979 if (MemoryAccess *MemPhi = getMemoryAccess(B)) {
2980 InstrDFS[MemPhi] = End++;
2981 DFSToInstr.emplace_back(MemPhi);
2984 // Then the real block goes next.
2985 for (auto &I : *B) {
2986 // There's no need to call isInstructionTriviallyDead more than once on
2987 // an instruction. Therefore, once we know that an instruction is dead
2988 // we change its DFS number so that it doesn't get value numbered.
2989 if (isInstructionTriviallyDead(&I, TLI)) {
2990 InstrDFS[&I] = 0;
2991 LLVM_DEBUG(dbgs() << "Skipping trivially dead instruction " << I << "\n");
2992 markInstructionForDeletion(&I);
2993 continue;
2995 if (isa<PHINode>(&I))
2996 RevisitOnReachabilityChange[B].set(End);
2997 InstrDFS[&I] = End++;
2998 DFSToInstr.emplace_back(&I);
3001 // All of the range functions taken half-open ranges (open on the end side).
3002 // So we do not subtract one from count, because at this point it is one
3003 // greater than the last instruction.
3004 return std::make_pair(Start, End);
3007 void NewGVN::updateProcessedCount(const Value *V) {
3008 #ifndef NDEBUG
3009 if (ProcessedCount.count(V) == 0) {
3010 ProcessedCount.insert({V, 1});
3011 } else {
3012 ++ProcessedCount[V];
3013 assert(ProcessedCount[V] < 100 &&
3014 "Seem to have processed the same Value a lot");
3016 #endif
3019 // Evaluate MemoryPhi nodes symbolically, just like PHI nodes
3020 void NewGVN::valueNumberMemoryPhi(MemoryPhi *MP) {
3021 // If all the arguments are the same, the MemoryPhi has the same value as the
3022 // argument. Filter out unreachable blocks and self phis from our operands.
3023 // TODO: We could do cycle-checking on the memory phis to allow valueizing for
3024 // self-phi checking.
3025 const BasicBlock *PHIBlock = MP->getBlock();
3026 auto Filtered = make_filter_range(MP->operands(), [&](const Use &U) {
3027 return cast<MemoryAccess>(U) != MP &&
3028 !isMemoryAccessTOP(cast<MemoryAccess>(U)) &&
3029 ReachableEdges.count({MP->getIncomingBlock(U), PHIBlock});
3031 // If all that is left is nothing, our memoryphi is undef. We keep it as
3032 // InitialClass. Note: The only case this should happen is if we have at
3033 // least one self-argument.
3034 if (Filtered.begin() == Filtered.end()) {
3035 if (setMemoryClass(MP, TOPClass))
3036 markMemoryUsersTouched(MP);
3037 return;
3040 // Transform the remaining operands into operand leaders.
3041 // FIXME: mapped_iterator should have a range version.
3042 auto LookupFunc = [&](const Use &U) {
3043 return lookupMemoryLeader(cast<MemoryAccess>(U));
3045 auto MappedBegin = map_iterator(Filtered.begin(), LookupFunc);
3046 auto MappedEnd = map_iterator(Filtered.end(), LookupFunc);
3048 // and now check if all the elements are equal.
3049 // Sadly, we can't use std::equals since these are random access iterators.
3050 const auto *AllSameValue = *MappedBegin;
3051 ++MappedBegin;
3052 bool AllEqual = std::all_of(
3053 MappedBegin, MappedEnd,
3054 [&AllSameValue](const MemoryAccess *V) { return V == AllSameValue; });
3056 if (AllEqual)
3057 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to " << *AllSameValue
3058 << "\n");
3059 else
3060 LLVM_DEBUG(dbgs() << "Memory Phi value numbered to itself\n");
3061 // If it's equal to something, it's in that class. Otherwise, it has to be in
3062 // a class where it is the leader (other things may be equivalent to it, but
3063 // it needs to start off in its own class, which means it must have been the
3064 // leader, and it can't have stopped being the leader because it was never
3065 // removed).
3066 CongruenceClass *CC =
3067 AllEqual ? getMemoryClass(AllSameValue) : ensureLeaderOfMemoryClass(MP);
3068 auto OldState = MemoryPhiState.lookup(MP);
3069 assert(OldState != MPS_Invalid && "Invalid memory phi state");
3070 auto NewState = AllEqual ? MPS_Equivalent : MPS_Unique;
3071 MemoryPhiState[MP] = NewState;
3072 if (setMemoryClass(MP, CC) || OldState != NewState)
3073 markMemoryUsersTouched(MP);
3076 // Value number a single instruction, symbolically evaluating, performing
3077 // congruence finding, and updating mappings.
3078 void NewGVN::valueNumberInstruction(Instruction *I) {
3079 LLVM_DEBUG(dbgs() << "Processing instruction " << *I << "\n");
3080 if (!I->isTerminator()) {
3081 const Expression *Symbolized = nullptr;
3082 SmallPtrSet<Value *, 2> Visited;
3083 if (DebugCounter::shouldExecute(VNCounter)) {
3084 Symbolized = performSymbolicEvaluation(I, Visited);
3085 // Make a phi of ops if necessary
3086 if (Symbolized && !isa<ConstantExpression>(Symbolized) &&
3087 !isa<VariableExpression>(Symbolized) && PHINodeUses.count(I)) {
3088 auto *PHIE = makePossiblePHIOfOps(I, Visited);
3089 // If we created a phi of ops, use it.
3090 // If we couldn't create one, make sure we don't leave one lying around
3091 if (PHIE) {
3092 Symbolized = PHIE;
3093 } else if (auto *Op = RealToTemp.lookup(I)) {
3094 removePhiOfOps(I, Op);
3097 } else {
3098 // Mark the instruction as unused so we don't value number it again.
3099 InstrDFS[I] = 0;
3101 // If we couldn't come up with a symbolic expression, use the unknown
3102 // expression
3103 if (Symbolized == nullptr)
3104 Symbolized = createUnknownExpression(I);
3105 performCongruenceFinding(I, Symbolized);
3106 } else {
3107 // Handle terminators that return values. All of them produce values we
3108 // don't currently understand. We don't place non-value producing
3109 // terminators in a class.
3110 if (!I->getType()->isVoidTy()) {
3111 auto *Symbolized = createUnknownExpression(I);
3112 performCongruenceFinding(I, Symbolized);
3114 processOutgoingEdges(I, I->getParent());
3118 // Check if there is a path, using single or equal argument phi nodes, from
3119 // First to Second.
3120 bool NewGVN::singleReachablePHIPath(
3121 SmallPtrSet<const MemoryAccess *, 8> &Visited, const MemoryAccess *First,
3122 const MemoryAccess *Second) const {
3123 if (First == Second)
3124 return true;
3125 if (MSSA->isLiveOnEntryDef(First))
3126 return false;
3128 // This is not perfect, but as we're just verifying here, we can live with
3129 // the loss of precision. The real solution would be that of doing strongly
3130 // connected component finding in this routine, and it's probably not worth
3131 // the complexity for the time being. So, we just keep a set of visited
3132 // MemoryAccess and return true when we hit a cycle.
3133 if (Visited.count(First))
3134 return true;
3135 Visited.insert(First);
3137 const auto *EndDef = First;
3138 for (auto *ChainDef : optimized_def_chain(First)) {
3139 if (ChainDef == Second)
3140 return true;
3141 if (MSSA->isLiveOnEntryDef(ChainDef))
3142 return false;
3143 EndDef = ChainDef;
3145 auto *MP = cast<MemoryPhi>(EndDef);
3146 auto ReachableOperandPred = [&](const Use &U) {
3147 return ReachableEdges.count({MP->getIncomingBlock(U), MP->getBlock()});
3149 auto FilteredPhiArgs =
3150 make_filter_range(MP->operands(), ReachableOperandPred);
3151 SmallVector<const Value *, 32> OperandList;
3152 llvm::copy(FilteredPhiArgs, std::back_inserter(OperandList));
3153 bool Okay = is_splat(OperandList);
3154 if (Okay)
3155 return singleReachablePHIPath(Visited, cast<MemoryAccess>(OperandList[0]),
3156 Second);
3157 return false;
3160 // Verify the that the memory equivalence table makes sense relative to the
3161 // congruence classes. Note that this checking is not perfect, and is currently
3162 // subject to very rare false negatives. It is only useful for
3163 // testing/debugging.
3164 void NewGVN::verifyMemoryCongruency() const {
3165 #ifndef NDEBUG
3166 // Verify that the memory table equivalence and memory member set match
3167 for (const auto *CC : CongruenceClasses) {
3168 if (CC == TOPClass || CC->isDead())
3169 continue;
3170 if (CC->getStoreCount() != 0) {
3171 assert((CC->getStoredValue() || !isa<StoreInst>(CC->getLeader())) &&
3172 "Any class with a store as a leader should have a "
3173 "representative stored value");
3174 assert(CC->getMemoryLeader() &&
3175 "Any congruence class with a store should have a "
3176 "representative access");
3179 if (CC->getMemoryLeader())
3180 assert(MemoryAccessToClass.lookup(CC->getMemoryLeader()) == CC &&
3181 "Representative MemoryAccess does not appear to be reverse "
3182 "mapped properly");
3183 for (auto M : CC->memory())
3184 assert(MemoryAccessToClass.lookup(M) == CC &&
3185 "Memory member does not appear to be reverse mapped properly");
3188 // Anything equivalent in the MemoryAccess table should be in the same
3189 // congruence class.
3191 // Filter out the unreachable and trivially dead entries, because they may
3192 // never have been updated if the instructions were not processed.
3193 auto ReachableAccessPred =
3194 [&](const std::pair<const MemoryAccess *, CongruenceClass *> Pair) {
3195 bool Result = ReachableBlocks.count(Pair.first->getBlock());
3196 if (!Result || MSSA->isLiveOnEntryDef(Pair.first) ||
3197 MemoryToDFSNum(Pair.first) == 0)
3198 return false;
3199 if (auto *MemDef = dyn_cast<MemoryDef>(Pair.first))
3200 return !isInstructionTriviallyDead(MemDef->getMemoryInst());
3202 // We could have phi nodes which operands are all trivially dead,
3203 // so we don't process them.
3204 if (auto *MemPHI = dyn_cast<MemoryPhi>(Pair.first)) {
3205 for (auto &U : MemPHI->incoming_values()) {
3206 if (auto *I = dyn_cast<Instruction>(&*U)) {
3207 if (!isInstructionTriviallyDead(I))
3208 return true;
3211 return false;
3214 return true;
3217 auto Filtered = make_filter_range(MemoryAccessToClass, ReachableAccessPred);
3218 for (auto KV : Filtered) {
3219 if (auto *FirstMUD = dyn_cast<MemoryUseOrDef>(KV.first)) {
3220 auto *SecondMUD = dyn_cast<MemoryUseOrDef>(KV.second->getMemoryLeader());
3221 if (FirstMUD && SecondMUD) {
3222 SmallPtrSet<const MemoryAccess *, 8> VisitedMAS;
3223 assert((singleReachablePHIPath(VisitedMAS, FirstMUD, SecondMUD) ||
3224 ValueToClass.lookup(FirstMUD->getMemoryInst()) ==
3225 ValueToClass.lookup(SecondMUD->getMemoryInst())) &&
3226 "The instructions for these memory operations should have "
3227 "been in the same congruence class or reachable through"
3228 "a single argument phi");
3230 } else if (auto *FirstMP = dyn_cast<MemoryPhi>(KV.first)) {
3231 // We can only sanely verify that MemoryDefs in the operand list all have
3232 // the same class.
3233 auto ReachableOperandPred = [&](const Use &U) {
3234 return ReachableEdges.count(
3235 {FirstMP->getIncomingBlock(U), FirstMP->getBlock()}) &&
3236 isa<MemoryDef>(U);
3239 // All arguments should in the same class, ignoring unreachable arguments
3240 auto FilteredPhiArgs =
3241 make_filter_range(FirstMP->operands(), ReachableOperandPred);
3242 SmallVector<const CongruenceClass *, 16> PhiOpClasses;
3243 std::transform(FilteredPhiArgs.begin(), FilteredPhiArgs.end(),
3244 std::back_inserter(PhiOpClasses), [&](const Use &U) {
3245 const MemoryDef *MD = cast<MemoryDef>(U);
3246 return ValueToClass.lookup(MD->getMemoryInst());
3248 assert(is_splat(PhiOpClasses) &&
3249 "All MemoryPhi arguments should be in the same class");
3252 #endif
3255 // Verify that the sparse propagation we did actually found the maximal fixpoint
3256 // We do this by storing the value to class mapping, touching all instructions,
3257 // and redoing the iteration to see if anything changed.
3258 void NewGVN::verifyIterationSettled(Function &F) {
3259 #ifndef NDEBUG
3260 LLVM_DEBUG(dbgs() << "Beginning iteration verification\n");
3261 if (DebugCounter::isCounterSet(VNCounter))
3262 DebugCounter::setCounterValue(VNCounter, StartingVNCounter);
3264 // Note that we have to store the actual classes, as we may change existing
3265 // classes during iteration. This is because our memory iteration propagation
3266 // is not perfect, and so may waste a little work. But it should generate
3267 // exactly the same congruence classes we have now, with different IDs.
3268 std::map<const Value *, CongruenceClass> BeforeIteration;
3270 for (auto &KV : ValueToClass) {
3271 if (auto *I = dyn_cast<Instruction>(KV.first))
3272 // Skip unused/dead instructions.
3273 if (InstrToDFSNum(I) == 0)
3274 continue;
3275 BeforeIteration.insert({KV.first, *KV.second});
3278 TouchedInstructions.set();
3279 TouchedInstructions.reset(0);
3280 iterateTouchedInstructions();
3281 DenseSet<std::pair<const CongruenceClass *, const CongruenceClass *>>
3282 EqualClasses;
3283 for (const auto &KV : ValueToClass) {
3284 if (auto *I = dyn_cast<Instruction>(KV.first))
3285 // Skip unused/dead instructions.
3286 if (InstrToDFSNum(I) == 0)
3287 continue;
3288 // We could sink these uses, but i think this adds a bit of clarity here as
3289 // to what we are comparing.
3290 auto *BeforeCC = &BeforeIteration.find(KV.first)->second;
3291 auto *AfterCC = KV.second;
3292 // Note that the classes can't change at this point, so we memoize the set
3293 // that are equal.
3294 if (!EqualClasses.count({BeforeCC, AfterCC})) {
3295 assert(BeforeCC->isEquivalentTo(AfterCC) &&
3296 "Value number changed after main loop completed!");
3297 EqualClasses.insert({BeforeCC, AfterCC});
3300 #endif
3303 // Verify that for each store expression in the expression to class mapping,
3304 // only the latest appears, and multiple ones do not appear.
3305 // Because loads do not use the stored value when doing equality with stores,
3306 // if we don't erase the old store expressions from the table, a load can find
3307 // a no-longer valid StoreExpression.
3308 void NewGVN::verifyStoreExpressions() const {
3309 #ifndef NDEBUG
3310 // This is the only use of this, and it's not worth defining a complicated
3311 // densemapinfo hash/equality function for it.
3312 std::set<
3313 std::pair<const Value *,
3314 std::tuple<const Value *, const CongruenceClass *, Value *>>>
3315 StoreExpressionSet;
3316 for (const auto &KV : ExpressionToClass) {
3317 if (auto *SE = dyn_cast<StoreExpression>(KV.first)) {
3318 // Make sure a version that will conflict with loads is not already there
3319 auto Res = StoreExpressionSet.insert(
3320 {SE->getOperand(0), std::make_tuple(SE->getMemoryLeader(), KV.second,
3321 SE->getStoredValue())});
3322 bool Okay = Res.second;
3323 // It's okay to have the same expression already in there if it is
3324 // identical in nature.
3325 // This can happen when the leader of the stored value changes over time.
3326 if (!Okay)
3327 Okay = (std::get<1>(Res.first->second) == KV.second) &&
3328 (lookupOperandLeader(std::get<2>(Res.first->second)) ==
3329 lookupOperandLeader(SE->getStoredValue()));
3330 assert(Okay && "Stored expression conflict exists in expression table");
3331 auto *ValueExpr = ValueToExpression.lookup(SE->getStoreInst());
3332 assert(ValueExpr && ValueExpr->equals(*SE) &&
3333 "StoreExpression in ExpressionToClass is not latest "
3334 "StoreExpression for value");
3337 #endif
3340 // This is the main value numbering loop, it iterates over the initial touched
3341 // instruction set, propagating value numbers, marking things touched, etc,
3342 // until the set of touched instructions is completely empty.
3343 void NewGVN::iterateTouchedInstructions() {
3344 unsigned int Iterations = 0;
3345 // Figure out where touchedinstructions starts
3346 int FirstInstr = TouchedInstructions.find_first();
3347 // Nothing set, nothing to iterate, just return.
3348 if (FirstInstr == -1)
3349 return;
3350 const BasicBlock *LastBlock = getBlockForValue(InstrFromDFSNum(FirstInstr));
3351 while (TouchedInstructions.any()) {
3352 ++Iterations;
3353 // Walk through all the instructions in all the blocks in RPO.
3354 // TODO: As we hit a new block, we should push and pop equalities into a
3355 // table lookupOperandLeader can use, to catch things PredicateInfo
3356 // might miss, like edge-only equivalences.
3357 for (unsigned InstrNum : TouchedInstructions.set_bits()) {
3359 // This instruction was found to be dead. We don't bother looking
3360 // at it again.
3361 if (InstrNum == 0) {
3362 TouchedInstructions.reset(InstrNum);
3363 continue;
3366 Value *V = InstrFromDFSNum(InstrNum);
3367 const BasicBlock *CurrBlock = getBlockForValue(V);
3369 // If we hit a new block, do reachability processing.
3370 if (CurrBlock != LastBlock) {
3371 LastBlock = CurrBlock;
3372 bool BlockReachable = ReachableBlocks.count(CurrBlock);
3373 const auto &CurrInstRange = BlockInstRange.lookup(CurrBlock);
3375 // If it's not reachable, erase any touched instructions and move on.
3376 if (!BlockReachable) {
3377 TouchedInstructions.reset(CurrInstRange.first, CurrInstRange.second);
3378 LLVM_DEBUG(dbgs() << "Skipping instructions in block "
3379 << getBlockName(CurrBlock)
3380 << " because it is unreachable\n");
3381 continue;
3383 updateProcessedCount(CurrBlock);
3385 // Reset after processing (because we may mark ourselves as touched when
3386 // we propagate equalities).
3387 TouchedInstructions.reset(InstrNum);
3389 if (auto *MP = dyn_cast<MemoryPhi>(V)) {
3390 LLVM_DEBUG(dbgs() << "Processing MemoryPhi " << *MP << "\n");
3391 valueNumberMemoryPhi(MP);
3392 } else if (auto *I = dyn_cast<Instruction>(V)) {
3393 valueNumberInstruction(I);
3394 } else {
3395 llvm_unreachable("Should have been a MemoryPhi or Instruction");
3397 updateProcessedCount(V);
3400 NumGVNMaxIterations = std::max(NumGVNMaxIterations.getValue(), Iterations);
3403 // This is the main transformation entry point.
3404 bool NewGVN::runGVN() {
3405 if (DebugCounter::isCounterSet(VNCounter))
3406 StartingVNCounter = DebugCounter::getCounterValue(VNCounter);
3407 bool Changed = false;
3408 NumFuncArgs = F.arg_size();
3409 MSSAWalker = MSSA->getWalker();
3410 SingletonDeadExpression = new (ExpressionAllocator) DeadExpression();
3412 // Count number of instructions for sizing of hash tables, and come
3413 // up with a global dfs numbering for instructions.
3414 unsigned ICount = 1;
3415 // Add an empty instruction to account for the fact that we start at 1
3416 DFSToInstr.emplace_back(nullptr);
3417 // Note: We want ideal RPO traversal of the blocks, which is not quite the
3418 // same as dominator tree order, particularly with regard whether backedges
3419 // get visited first or second, given a block with multiple successors.
3420 // If we visit in the wrong order, we will end up performing N times as many
3421 // iterations.
3422 // The dominator tree does guarantee that, for a given dom tree node, it's
3423 // parent must occur before it in the RPO ordering. Thus, we only need to sort
3424 // the siblings.
3425 ReversePostOrderTraversal<Function *> RPOT(&F);
3426 unsigned Counter = 0;
3427 for (auto &B : RPOT) {
3428 auto *Node = DT->getNode(B);
3429 assert(Node && "RPO and Dominator tree should have same reachability");
3430 RPOOrdering[Node] = ++Counter;
3432 // Sort dominator tree children arrays into RPO.
3433 for (auto &B : RPOT) {
3434 auto *Node = DT->getNode(B);
3435 if (Node->getChildren().size() > 1)
3436 llvm::sort(Node->begin(), Node->end(),
3437 [&](const DomTreeNode *A, const DomTreeNode *B) {
3438 return RPOOrdering[A] < RPOOrdering[B];
3442 // Now a standard depth first ordering of the domtree is equivalent to RPO.
3443 for (auto DTN : depth_first(DT->getRootNode())) {
3444 BasicBlock *B = DTN->getBlock();
3445 const auto &BlockRange = assignDFSNumbers(B, ICount);
3446 BlockInstRange.insert({B, BlockRange});
3447 ICount += BlockRange.second - BlockRange.first;
3449 initializeCongruenceClasses(F);
3451 TouchedInstructions.resize(ICount);
3452 // Ensure we don't end up resizing the expressionToClass map, as
3453 // that can be quite expensive. At most, we have one expression per
3454 // instruction.
3455 ExpressionToClass.reserve(ICount);
3457 // Initialize the touched instructions to include the entry block.
3458 const auto &InstRange = BlockInstRange.lookup(&F.getEntryBlock());
3459 TouchedInstructions.set(InstRange.first, InstRange.second);
3460 LLVM_DEBUG(dbgs() << "Block " << getBlockName(&F.getEntryBlock())
3461 << " marked reachable\n");
3462 ReachableBlocks.insert(&F.getEntryBlock());
3464 iterateTouchedInstructions();
3465 verifyMemoryCongruency();
3466 verifyIterationSettled(F);
3467 verifyStoreExpressions();
3469 Changed |= eliminateInstructions(F);
3471 // Delete all instructions marked for deletion.
3472 for (Instruction *ToErase : InstructionsToErase) {
3473 if (!ToErase->use_empty())
3474 ToErase->replaceAllUsesWith(UndefValue::get(ToErase->getType()));
3476 assert(ToErase->getParent() &&
3477 "BB containing ToErase deleted unexpectedly!");
3478 ToErase->eraseFromParent();
3480 Changed |= !InstructionsToErase.empty();
3482 // Delete all unreachable blocks.
3483 auto UnreachableBlockPred = [&](const BasicBlock &BB) {
3484 return !ReachableBlocks.count(&BB);
3487 for (auto &BB : make_filter_range(F, UnreachableBlockPred)) {
3488 LLVM_DEBUG(dbgs() << "We believe block " << getBlockName(&BB)
3489 << " is unreachable\n");
3490 deleteInstructionsInBlock(&BB);
3491 Changed = true;
3494 cleanupTables();
3495 return Changed;
3498 struct NewGVN::ValueDFS {
3499 int DFSIn = 0;
3500 int DFSOut = 0;
3501 int LocalNum = 0;
3503 // Only one of Def and U will be set.
3504 // The bool in the Def tells us whether the Def is the stored value of a
3505 // store.
3506 PointerIntPair<Value *, 1, bool> Def;
3507 Use *U = nullptr;
3509 bool operator<(const ValueDFS &Other) const {
3510 // It's not enough that any given field be less than - we have sets
3511 // of fields that need to be evaluated together to give a proper ordering.
3512 // For example, if you have;
3513 // DFS (1, 3)
3514 // Val 0
3515 // DFS (1, 2)
3516 // Val 50
3517 // We want the second to be less than the first, but if we just go field
3518 // by field, we will get to Val 0 < Val 50 and say the first is less than
3519 // the second. We only want it to be less than if the DFS orders are equal.
3521 // Each LLVM instruction only produces one value, and thus the lowest-level
3522 // differentiator that really matters for the stack (and what we use as as a
3523 // replacement) is the local dfs number.
3524 // Everything else in the structure is instruction level, and only affects
3525 // the order in which we will replace operands of a given instruction.
3527 // For a given instruction (IE things with equal dfsin, dfsout, localnum),
3528 // the order of replacement of uses does not matter.
3529 // IE given,
3530 // a = 5
3531 // b = a + a
3532 // When you hit b, you will have two valuedfs with the same dfsin, out, and
3533 // localnum.
3534 // The .val will be the same as well.
3535 // The .u's will be different.
3536 // You will replace both, and it does not matter what order you replace them
3537 // in (IE whether you replace operand 2, then operand 1, or operand 1, then
3538 // operand 2).
3539 // Similarly for the case of same dfsin, dfsout, localnum, but different
3540 // .val's
3541 // a = 5
3542 // b = 6
3543 // c = a + b
3544 // in c, we will a valuedfs for a, and one for b,with everything the same
3545 // but .val and .u.
3546 // It does not matter what order we replace these operands in.
3547 // You will always end up with the same IR, and this is guaranteed.
3548 return std::tie(DFSIn, DFSOut, LocalNum, Def, U) <
3549 std::tie(Other.DFSIn, Other.DFSOut, Other.LocalNum, Other.Def,
3550 Other.U);
3554 // This function converts the set of members for a congruence class from values,
3555 // to sets of defs and uses with associated DFS info. The total number of
3556 // reachable uses for each value is stored in UseCount, and instructions that
3557 // seem
3558 // dead (have no non-dead uses) are stored in ProbablyDead.
3559 void NewGVN::convertClassToDFSOrdered(
3560 const CongruenceClass &Dense, SmallVectorImpl<ValueDFS> &DFSOrderedSet,
3561 DenseMap<const Value *, unsigned int> &UseCounts,
3562 SmallPtrSetImpl<Instruction *> &ProbablyDead) const {
3563 for (auto D : Dense) {
3564 // First add the value.
3565 BasicBlock *BB = getBlockForValue(D);
3566 // Constants are handled prior to ever calling this function, so
3567 // we should only be left with instructions as members.
3568 assert(BB && "Should have figured out a basic block for value");
3569 ValueDFS VDDef;
3570 DomTreeNode *DomNode = DT->getNode(BB);
3571 VDDef.DFSIn = DomNode->getDFSNumIn();
3572 VDDef.DFSOut = DomNode->getDFSNumOut();
3573 // If it's a store, use the leader of the value operand, if it's always
3574 // available, or the value operand. TODO: We could do dominance checks to
3575 // find a dominating leader, but not worth it ATM.
3576 if (auto *SI = dyn_cast<StoreInst>(D)) {
3577 auto Leader = lookupOperandLeader(SI->getValueOperand());
3578 if (alwaysAvailable(Leader)) {
3579 VDDef.Def.setPointer(Leader);
3580 } else {
3581 VDDef.Def.setPointer(SI->getValueOperand());
3582 VDDef.Def.setInt(true);
3584 } else {
3585 VDDef.Def.setPointer(D);
3587 assert(isa<Instruction>(D) &&
3588 "The dense set member should always be an instruction");
3589 Instruction *Def = cast<Instruction>(D);
3590 VDDef.LocalNum = InstrToDFSNum(D);
3591 DFSOrderedSet.push_back(VDDef);
3592 // If there is a phi node equivalent, add it
3593 if (auto *PN = RealToTemp.lookup(Def)) {
3594 auto *PHIE =
3595 dyn_cast_or_null<PHIExpression>(ValueToExpression.lookup(Def));
3596 if (PHIE) {
3597 VDDef.Def.setInt(false);
3598 VDDef.Def.setPointer(PN);
3599 VDDef.LocalNum = 0;
3600 DFSOrderedSet.push_back(VDDef);
3604 unsigned int UseCount = 0;
3605 // Now add the uses.
3606 for (auto &U : Def->uses()) {
3607 if (auto *I = dyn_cast<Instruction>(U.getUser())) {
3608 // Don't try to replace into dead uses
3609 if (InstructionsToErase.count(I))
3610 continue;
3611 ValueDFS VDUse;
3612 // Put the phi node uses in the incoming block.
3613 BasicBlock *IBlock;
3614 if (auto *P = dyn_cast<PHINode>(I)) {
3615 IBlock = P->getIncomingBlock(U);
3616 // Make phi node users appear last in the incoming block
3617 // they are from.
3618 VDUse.LocalNum = InstrDFS.size() + 1;
3619 } else {
3620 IBlock = getBlockForValue(I);
3621 VDUse.LocalNum = InstrToDFSNum(I);
3624 // Skip uses in unreachable blocks, as we're going
3625 // to delete them.
3626 if (ReachableBlocks.count(IBlock) == 0)
3627 continue;
3629 DomTreeNode *DomNode = DT->getNode(IBlock);
3630 VDUse.DFSIn = DomNode->getDFSNumIn();
3631 VDUse.DFSOut = DomNode->getDFSNumOut();
3632 VDUse.U = &U;
3633 ++UseCount;
3634 DFSOrderedSet.emplace_back(VDUse);
3638 // If there are no uses, it's probably dead (but it may have side-effects,
3639 // so not definitely dead. Otherwise, store the number of uses so we can
3640 // track if it becomes dead later).
3641 if (UseCount == 0)
3642 ProbablyDead.insert(Def);
3643 else
3644 UseCounts[Def] = UseCount;
3648 // This function converts the set of members for a congruence class from values,
3649 // to the set of defs for loads and stores, with associated DFS info.
3650 void NewGVN::convertClassToLoadsAndStores(
3651 const CongruenceClass &Dense,
3652 SmallVectorImpl<ValueDFS> &LoadsAndStores) const {
3653 for (auto D : Dense) {
3654 if (!isa<LoadInst>(D) && !isa<StoreInst>(D))
3655 continue;
3657 BasicBlock *BB = getBlockForValue(D);
3658 ValueDFS VD;
3659 DomTreeNode *DomNode = DT->getNode(BB);
3660 VD.DFSIn = DomNode->getDFSNumIn();
3661 VD.DFSOut = DomNode->getDFSNumOut();
3662 VD.Def.setPointer(D);
3664 // If it's an instruction, use the real local dfs number.
3665 if (auto *I = dyn_cast<Instruction>(D))
3666 VD.LocalNum = InstrToDFSNum(I);
3667 else
3668 llvm_unreachable("Should have been an instruction");
3670 LoadsAndStores.emplace_back(VD);
3674 static void patchAndReplaceAllUsesWith(Instruction *I, Value *Repl) {
3675 patchReplacementInstruction(I, Repl);
3676 I->replaceAllUsesWith(Repl);
3679 void NewGVN::deleteInstructionsInBlock(BasicBlock *BB) {
3680 LLVM_DEBUG(dbgs() << " BasicBlock Dead:" << *BB);
3681 ++NumGVNBlocksDeleted;
3683 // Delete the instructions backwards, as it has a reduced likelihood of having
3684 // to update as many def-use and use-def chains. Start after the terminator.
3685 auto StartPoint = BB->rbegin();
3686 ++StartPoint;
3687 // Note that we explicitly recalculate BB->rend() on each iteration,
3688 // as it may change when we remove the first instruction.
3689 for (BasicBlock::reverse_iterator I(StartPoint); I != BB->rend();) {
3690 Instruction &Inst = *I++;
3691 if (!Inst.use_empty())
3692 Inst.replaceAllUsesWith(UndefValue::get(Inst.getType()));
3693 if (isa<LandingPadInst>(Inst))
3694 continue;
3696 Inst.eraseFromParent();
3697 ++NumGVNInstrDeleted;
3699 // Now insert something that simplifycfg will turn into an unreachable.
3700 Type *Int8Ty = Type::getInt8Ty(BB->getContext());
3701 new StoreInst(UndefValue::get(Int8Ty),
3702 Constant::getNullValue(Int8Ty->getPointerTo()),
3703 BB->getTerminator());
3706 void NewGVN::markInstructionForDeletion(Instruction *I) {
3707 LLVM_DEBUG(dbgs() << "Marking " << *I << " for deletion\n");
3708 InstructionsToErase.insert(I);
3711 void NewGVN::replaceInstruction(Instruction *I, Value *V) {
3712 LLVM_DEBUG(dbgs() << "Replacing " << *I << " with " << *V << "\n");
3713 patchAndReplaceAllUsesWith(I, V);
3714 // We save the actual erasing to avoid invalidating memory
3715 // dependencies until we are done with everything.
3716 markInstructionForDeletion(I);
3719 namespace {
3721 // This is a stack that contains both the value and dfs info of where
3722 // that value is valid.
3723 class ValueDFSStack {
3724 public:
3725 Value *back() const { return ValueStack.back(); }
3726 std::pair<int, int> dfs_back() const { return DFSStack.back(); }
3728 void push_back(Value *V, int DFSIn, int DFSOut) {
3729 ValueStack.emplace_back(V);
3730 DFSStack.emplace_back(DFSIn, DFSOut);
3733 bool empty() const { return DFSStack.empty(); }
3735 bool isInScope(int DFSIn, int DFSOut) const {
3736 if (empty())
3737 return false;
3738 return DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second;
3741 void popUntilDFSScope(int DFSIn, int DFSOut) {
3743 // These two should always be in sync at this point.
3744 assert(ValueStack.size() == DFSStack.size() &&
3745 "Mismatch between ValueStack and DFSStack");
3746 while (
3747 !DFSStack.empty() &&
3748 !(DFSIn >= DFSStack.back().first && DFSOut <= DFSStack.back().second)) {
3749 DFSStack.pop_back();
3750 ValueStack.pop_back();
3754 private:
3755 SmallVector<Value *, 8> ValueStack;
3756 SmallVector<std::pair<int, int>, 8> DFSStack;
3759 } // end anonymous namespace
3761 // Given an expression, get the congruence class for it.
3762 CongruenceClass *NewGVN::getClassForExpression(const Expression *E) const {
3763 if (auto *VE = dyn_cast<VariableExpression>(E))
3764 return ValueToClass.lookup(VE->getVariableValue());
3765 else if (isa<DeadExpression>(E))
3766 return TOPClass;
3767 return ExpressionToClass.lookup(E);
3770 // Given a value and a basic block we are trying to see if it is available in,
3771 // see if the value has a leader available in that block.
3772 Value *NewGVN::findPHIOfOpsLeader(const Expression *E,
3773 const Instruction *OrigInst,
3774 const BasicBlock *BB) const {
3775 // It would already be constant if we could make it constant
3776 if (auto *CE = dyn_cast<ConstantExpression>(E))
3777 return CE->getConstantValue();
3778 if (auto *VE = dyn_cast<VariableExpression>(E)) {
3779 auto *V = VE->getVariableValue();
3780 if (alwaysAvailable(V) || DT->dominates(getBlockForValue(V), BB))
3781 return VE->getVariableValue();
3784 auto *CC = getClassForExpression(E);
3785 if (!CC)
3786 return nullptr;
3787 if (alwaysAvailable(CC->getLeader()))
3788 return CC->getLeader();
3790 for (auto Member : *CC) {
3791 auto *MemberInst = dyn_cast<Instruction>(Member);
3792 if (MemberInst == OrigInst)
3793 continue;
3794 // Anything that isn't an instruction is always available.
3795 if (!MemberInst)
3796 return Member;
3797 if (DT->dominates(getBlockForValue(MemberInst), BB))
3798 return Member;
3800 return nullptr;
3803 bool NewGVN::eliminateInstructions(Function &F) {
3804 // This is a non-standard eliminator. The normal way to eliminate is
3805 // to walk the dominator tree in order, keeping track of available
3806 // values, and eliminating them. However, this is mildly
3807 // pointless. It requires doing lookups on every instruction,
3808 // regardless of whether we will ever eliminate it. For
3809 // instructions part of most singleton congruence classes, we know we
3810 // will never eliminate them.
3812 // Instead, this eliminator looks at the congruence classes directly, sorts
3813 // them into a DFS ordering of the dominator tree, and then we just
3814 // perform elimination straight on the sets by walking the congruence
3815 // class member uses in order, and eliminate the ones dominated by the
3816 // last member. This is worst case O(E log E) where E = number of
3817 // instructions in a single congruence class. In theory, this is all
3818 // instructions. In practice, it is much faster, as most instructions are
3819 // either in singleton congruence classes or can't possibly be eliminated
3820 // anyway (if there are no overlapping DFS ranges in class).
3821 // When we find something not dominated, it becomes the new leader
3822 // for elimination purposes.
3823 // TODO: If we wanted to be faster, We could remove any members with no
3824 // overlapping ranges while sorting, as we will never eliminate anything
3825 // with those members, as they don't dominate anything else in our set.
3827 bool AnythingReplaced = false;
3829 // Since we are going to walk the domtree anyway, and we can't guarantee the
3830 // DFS numbers are updated, we compute some ourselves.
3831 DT->updateDFSNumbers();
3833 // Go through all of our phi nodes, and kill the arguments associated with
3834 // unreachable edges.
3835 auto ReplaceUnreachablePHIArgs = [&](PHINode *PHI, BasicBlock *BB) {
3836 for (auto &Operand : PHI->incoming_values())
3837 if (!ReachableEdges.count({PHI->getIncomingBlock(Operand), BB})) {
3838 LLVM_DEBUG(dbgs() << "Replacing incoming value of " << PHI
3839 << " for block "
3840 << getBlockName(PHI->getIncomingBlock(Operand))
3841 << " with undef due to it being unreachable\n");
3842 Operand.set(UndefValue::get(PHI->getType()));
3845 // Replace unreachable phi arguments.
3846 // At this point, RevisitOnReachabilityChange only contains:
3848 // 1. PHIs
3849 // 2. Temporaries that will convert to PHIs
3850 // 3. Operations that are affected by an unreachable edge but do not fit into
3851 // 1 or 2 (rare).
3852 // So it is a slight overshoot of what we want. We could make it exact by
3853 // using two SparseBitVectors per block.
3854 DenseMap<const BasicBlock *, unsigned> ReachablePredCount;
3855 for (auto &KV : ReachableEdges)
3856 ReachablePredCount[KV.getEnd()]++;
3857 for (auto &BBPair : RevisitOnReachabilityChange) {
3858 for (auto InstNum : BBPair.second) {
3859 auto *Inst = InstrFromDFSNum(InstNum);
3860 auto *PHI = dyn_cast<PHINode>(Inst);
3861 PHI = PHI ? PHI : dyn_cast_or_null<PHINode>(RealToTemp.lookup(Inst));
3862 if (!PHI)
3863 continue;
3864 auto *BB = BBPair.first;
3865 if (ReachablePredCount.lookup(BB) != PHI->getNumIncomingValues())
3866 ReplaceUnreachablePHIArgs(PHI, BB);
3870 // Map to store the use counts
3871 DenseMap<const Value *, unsigned int> UseCounts;
3872 for (auto *CC : reverse(CongruenceClasses)) {
3873 LLVM_DEBUG(dbgs() << "Eliminating in congruence class " << CC->getID()
3874 << "\n");
3875 // Track the equivalent store info so we can decide whether to try
3876 // dead store elimination.
3877 SmallVector<ValueDFS, 8> PossibleDeadStores;
3878 SmallPtrSet<Instruction *, 8> ProbablyDead;
3879 if (CC->isDead() || CC->empty())
3880 continue;
3881 // Everything still in the TOP class is unreachable or dead.
3882 if (CC == TOPClass) {
3883 for (auto M : *CC) {
3884 auto *VTE = ValueToExpression.lookup(M);
3885 if (VTE && isa<DeadExpression>(VTE))
3886 markInstructionForDeletion(cast<Instruction>(M));
3887 assert((!ReachableBlocks.count(cast<Instruction>(M)->getParent()) ||
3888 InstructionsToErase.count(cast<Instruction>(M))) &&
3889 "Everything in TOP should be unreachable or dead at this "
3890 "point");
3892 continue;
3895 assert(CC->getLeader() && "We should have had a leader");
3896 // If this is a leader that is always available, and it's a
3897 // constant or has no equivalences, just replace everything with
3898 // it. We then update the congruence class with whatever members
3899 // are left.
3900 Value *Leader =
3901 CC->getStoredValue() ? CC->getStoredValue() : CC->getLeader();
3902 if (alwaysAvailable(Leader)) {
3903 CongruenceClass::MemberSet MembersLeft;
3904 for (auto M : *CC) {
3905 Value *Member = M;
3906 // Void things have no uses we can replace.
3907 if (Member == Leader || !isa<Instruction>(Member) ||
3908 Member->getType()->isVoidTy()) {
3909 MembersLeft.insert(Member);
3910 continue;
3912 LLVM_DEBUG(dbgs() << "Found replacement " << *(Leader) << " for "
3913 << *Member << "\n");
3914 auto *I = cast<Instruction>(Member);
3915 assert(Leader != I && "About to accidentally remove our leader");
3916 replaceInstruction(I, Leader);
3917 AnythingReplaced = true;
3919 CC->swap(MembersLeft);
3920 } else {
3921 // If this is a singleton, we can skip it.
3922 if (CC->size() != 1 || RealToTemp.count(Leader)) {
3923 // This is a stack because equality replacement/etc may place
3924 // constants in the middle of the member list, and we want to use
3925 // those constant values in preference to the current leader, over
3926 // the scope of those constants.
3927 ValueDFSStack EliminationStack;
3929 // Convert the members to DFS ordered sets and then merge them.
3930 SmallVector<ValueDFS, 8> DFSOrderedSet;
3931 convertClassToDFSOrdered(*CC, DFSOrderedSet, UseCounts, ProbablyDead);
3933 // Sort the whole thing.
3934 llvm::sort(DFSOrderedSet);
3935 for (auto &VD : DFSOrderedSet) {
3936 int MemberDFSIn = VD.DFSIn;
3937 int MemberDFSOut = VD.DFSOut;
3938 Value *Def = VD.Def.getPointer();
3939 bool FromStore = VD.Def.getInt();
3940 Use *U = VD.U;
3941 // We ignore void things because we can't get a value from them.
3942 if (Def && Def->getType()->isVoidTy())
3943 continue;
3944 auto *DefInst = dyn_cast_or_null<Instruction>(Def);
3945 if (DefInst && AllTempInstructions.count(DefInst)) {
3946 auto *PN = cast<PHINode>(DefInst);
3948 // If this is a value phi and that's the expression we used, insert
3949 // it into the program
3950 // remove from temp instruction list.
3951 AllTempInstructions.erase(PN);
3952 auto *DefBlock = getBlockForValue(Def);
3953 LLVM_DEBUG(dbgs() << "Inserting fully real phi of ops" << *Def
3954 << " into block "
3955 << getBlockName(getBlockForValue(Def)) << "\n");
3956 PN->insertBefore(&DefBlock->front());
3957 Def = PN;
3958 NumGVNPHIOfOpsEliminations++;
3961 if (EliminationStack.empty()) {
3962 LLVM_DEBUG(dbgs() << "Elimination Stack is empty\n");
3963 } else {
3964 LLVM_DEBUG(dbgs() << "Elimination Stack Top DFS numbers are ("
3965 << EliminationStack.dfs_back().first << ","
3966 << EliminationStack.dfs_back().second << ")\n");
3969 LLVM_DEBUG(dbgs() << "Current DFS numbers are (" << MemberDFSIn << ","
3970 << MemberDFSOut << ")\n");
3971 // First, we see if we are out of scope or empty. If so,
3972 // and there equivalences, we try to replace the top of
3973 // stack with equivalences (if it's on the stack, it must
3974 // not have been eliminated yet).
3975 // Then we synchronize to our current scope, by
3976 // popping until we are back within a DFS scope that
3977 // dominates the current member.
3978 // Then, what happens depends on a few factors
3979 // If the stack is now empty, we need to push
3980 // If we have a constant or a local equivalence we want to
3981 // start using, we also push.
3982 // Otherwise, we walk along, processing members who are
3983 // dominated by this scope, and eliminate them.
3984 bool ShouldPush = Def && EliminationStack.empty();
3985 bool OutOfScope =
3986 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut);
3988 if (OutOfScope || ShouldPush) {
3989 // Sync to our current scope.
3990 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut);
3991 bool ShouldPush = Def && EliminationStack.empty();
3992 if (ShouldPush) {
3993 EliminationStack.push_back(Def, MemberDFSIn, MemberDFSOut);
3997 // Skip the Def's, we only want to eliminate on their uses. But mark
3998 // dominated defs as dead.
3999 if (Def) {
4000 // For anything in this case, what and how we value number
4001 // guarantees that any side-effets that would have occurred (ie
4002 // throwing, etc) can be proven to either still occur (because it's
4003 // dominated by something that has the same side-effects), or never
4004 // occur. Otherwise, we would not have been able to prove it value
4005 // equivalent to something else. For these things, we can just mark
4006 // it all dead. Note that this is different from the "ProbablyDead"
4007 // set, which may not be dominated by anything, and thus, are only
4008 // easy to prove dead if they are also side-effect free. Note that
4009 // because stores are put in terms of the stored value, we skip
4010 // stored values here. If the stored value is really dead, it will
4011 // still be marked for deletion when we process it in its own class.
4012 if (!EliminationStack.empty() && Def != EliminationStack.back() &&
4013 isa<Instruction>(Def) && !FromStore)
4014 markInstructionForDeletion(cast<Instruction>(Def));
4015 continue;
4017 // At this point, we know it is a Use we are trying to possibly
4018 // replace.
4020 assert(isa<Instruction>(U->get()) &&
4021 "Current def should have been an instruction");
4022 assert(isa<Instruction>(U->getUser()) &&
4023 "Current user should have been an instruction");
4025 // If the thing we are replacing into is already marked to be dead,
4026 // this use is dead. Note that this is true regardless of whether
4027 // we have anything dominating the use or not. We do this here
4028 // because we are already walking all the uses anyway.
4029 Instruction *InstUse = cast<Instruction>(U->getUser());
4030 if (InstructionsToErase.count(InstUse)) {
4031 auto &UseCount = UseCounts[U->get()];
4032 if (--UseCount == 0) {
4033 ProbablyDead.insert(cast<Instruction>(U->get()));
4037 // If we get to this point, and the stack is empty we must have a use
4038 // with nothing we can use to eliminate this use, so just skip it.
4039 if (EliminationStack.empty())
4040 continue;
4042 Value *DominatingLeader = EliminationStack.back();
4044 auto *II = dyn_cast<IntrinsicInst>(DominatingLeader);
4045 bool isSSACopy = II && II->getIntrinsicID() == Intrinsic::ssa_copy;
4046 if (isSSACopy)
4047 DominatingLeader = II->getOperand(0);
4049 // Don't replace our existing users with ourselves.
4050 if (U->get() == DominatingLeader)
4051 continue;
4052 LLVM_DEBUG(dbgs()
4053 << "Found replacement " << *DominatingLeader << " for "
4054 << *U->get() << " in " << *(U->getUser()) << "\n");
4056 // If we replaced something in an instruction, handle the patching of
4057 // metadata. Skip this if we are replacing predicateinfo with its
4058 // original operand, as we already know we can just drop it.
4059 auto *ReplacedInst = cast<Instruction>(U->get());
4060 auto *PI = PredInfo->getPredicateInfoFor(ReplacedInst);
4061 if (!PI || DominatingLeader != PI->OriginalOp)
4062 patchReplacementInstruction(ReplacedInst, DominatingLeader);
4063 U->set(DominatingLeader);
4064 // This is now a use of the dominating leader, which means if the
4065 // dominating leader was dead, it's now live!
4066 auto &LeaderUseCount = UseCounts[DominatingLeader];
4067 // It's about to be alive again.
4068 if (LeaderUseCount == 0 && isa<Instruction>(DominatingLeader))
4069 ProbablyDead.erase(cast<Instruction>(DominatingLeader));
4070 // For copy instructions, we use their operand as a leader,
4071 // which means we remove a user of the copy and it may become dead.
4072 if (isSSACopy) {
4073 unsigned &IIUseCount = UseCounts[II];
4074 if (--IIUseCount == 0)
4075 ProbablyDead.insert(II);
4077 ++LeaderUseCount;
4078 AnythingReplaced = true;
4083 // At this point, anything still in the ProbablyDead set is actually dead if
4084 // would be trivially dead.
4085 for (auto *I : ProbablyDead)
4086 if (wouldInstructionBeTriviallyDead(I))
4087 markInstructionForDeletion(I);
4089 // Cleanup the congruence class.
4090 CongruenceClass::MemberSet MembersLeft;
4091 for (auto *Member : *CC)
4092 if (!isa<Instruction>(Member) ||
4093 !InstructionsToErase.count(cast<Instruction>(Member)))
4094 MembersLeft.insert(Member);
4095 CC->swap(MembersLeft);
4097 // If we have possible dead stores to look at, try to eliminate them.
4098 if (CC->getStoreCount() > 0) {
4099 convertClassToLoadsAndStores(*CC, PossibleDeadStores);
4100 llvm::sort(PossibleDeadStores);
4101 ValueDFSStack EliminationStack;
4102 for (auto &VD : PossibleDeadStores) {
4103 int MemberDFSIn = VD.DFSIn;
4104 int MemberDFSOut = VD.DFSOut;
4105 Instruction *Member = cast<Instruction>(VD.Def.getPointer());
4106 if (EliminationStack.empty() ||
4107 !EliminationStack.isInScope(MemberDFSIn, MemberDFSOut)) {
4108 // Sync to our current scope.
4109 EliminationStack.popUntilDFSScope(MemberDFSIn, MemberDFSOut);
4110 if (EliminationStack.empty()) {
4111 EliminationStack.push_back(Member, MemberDFSIn, MemberDFSOut);
4112 continue;
4115 // We already did load elimination, so nothing to do here.
4116 if (isa<LoadInst>(Member))
4117 continue;
4118 assert(!EliminationStack.empty());
4119 Instruction *Leader = cast<Instruction>(EliminationStack.back());
4120 (void)Leader;
4121 assert(DT->dominates(Leader->getParent(), Member->getParent()));
4122 // Member is dominater by Leader, and thus dead
4123 LLVM_DEBUG(dbgs() << "Marking dead store " << *Member
4124 << " that is dominated by " << *Leader << "\n");
4125 markInstructionForDeletion(Member);
4126 CC->erase(Member);
4127 ++NumGVNDeadStores;
4131 return AnythingReplaced;
4134 // This function provides global ranking of operations so that we can place them
4135 // in a canonical order. Note that rank alone is not necessarily enough for a
4136 // complete ordering, as constants all have the same rank. However, generally,
4137 // we will simplify an operation with all constants so that it doesn't matter
4138 // what order they appear in.
4139 unsigned int NewGVN::getRank(const Value *V) const {
4140 // Prefer constants to undef to anything else
4141 // Undef is a constant, have to check it first.
4142 // Prefer smaller constants to constantexprs
4143 if (isa<ConstantExpr>(V))
4144 return 2;
4145 if (isa<UndefValue>(V))
4146 return 1;
4147 if (isa<Constant>(V))
4148 return 0;
4149 else if (auto *A = dyn_cast<Argument>(V))
4150 return 3 + A->getArgNo();
4152 // Need to shift the instruction DFS by number of arguments + 3 to account for
4153 // the constant and argument ranking above.
4154 unsigned Result = InstrToDFSNum(V);
4155 if (Result > 0)
4156 return 4 + NumFuncArgs + Result;
4157 // Unreachable or something else, just return a really large number.
4158 return ~0;
4161 // This is a function that says whether two commutative operations should
4162 // have their order swapped when canonicalizing.
4163 bool NewGVN::shouldSwapOperands(const Value *A, const Value *B) const {
4164 // Because we only care about a total ordering, and don't rewrite expressions
4165 // in this order, we order by rank, which will give a strict weak ordering to
4166 // everything but constants, and then we order by pointer address.
4167 return std::make_pair(getRank(A), A) > std::make_pair(getRank(B), B);
4170 namespace {
4172 class NewGVNLegacyPass : public FunctionPass {
4173 public:
4174 // Pass identification, replacement for typeid.
4175 static char ID;
4177 NewGVNLegacyPass() : FunctionPass(ID) {
4178 initializeNewGVNLegacyPassPass(*PassRegistry::getPassRegistry());
4181 bool runOnFunction(Function &F) override;
4183 private:
4184 void getAnalysisUsage(AnalysisUsage &AU) const override {
4185 AU.addRequired<AssumptionCacheTracker>();
4186 AU.addRequired<DominatorTreeWrapperPass>();
4187 AU.addRequired<TargetLibraryInfoWrapperPass>();
4188 AU.addRequired<MemorySSAWrapperPass>();
4189 AU.addRequired<AAResultsWrapperPass>();
4190 AU.addPreserved<DominatorTreeWrapperPass>();
4191 AU.addPreserved<GlobalsAAWrapperPass>();
4195 } // end anonymous namespace
4197 bool NewGVNLegacyPass::runOnFunction(Function &F) {
4198 if (skipFunction(F))
4199 return false;
4200 return NewGVN(F, &getAnalysis<DominatorTreeWrapperPass>().getDomTree(),
4201 &getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F),
4202 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F),
4203 &getAnalysis<AAResultsWrapperPass>().getAAResults(),
4204 &getAnalysis<MemorySSAWrapperPass>().getMSSA(),
4205 F.getParent()->getDataLayout())
4206 .runGVN();
4209 char NewGVNLegacyPass::ID = 0;
4211 INITIALIZE_PASS_BEGIN(NewGVNLegacyPass, "newgvn", "Global Value Numbering",
4212 false, false)
4213 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
4214 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
4215 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
4216 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
4217 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
4218 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
4219 INITIALIZE_PASS_END(NewGVNLegacyPass, "newgvn", "Global Value Numbering", false,
4220 false)
4222 // createGVNPass - The public interface to this file.
4223 FunctionPass *llvm::createNewGVNPass() { return new NewGVNLegacyPass(); }
4225 PreservedAnalyses NewGVNPass::run(Function &F, AnalysisManager<Function> &AM) {
4226 // Apparently the order in which we get these results matter for
4227 // the old GVN (see Chandler's comment in GVN.cpp). I'll keep
4228 // the same order here, just in case.
4229 auto &AC = AM.getResult<AssumptionAnalysis>(F);
4230 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
4231 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
4232 auto &AA = AM.getResult<AAManager>(F);
4233 auto &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
4234 bool Changed =
4235 NewGVN(F, &DT, &AC, &TLI, &AA, &MSSA, F.getParent()->getDataLayout())
4236 .runGVN();
4237 if (!Changed)
4238 return PreservedAnalyses::all();
4239 PreservedAnalyses PA;
4240 PA.preserve<DominatorTreeAnalysis>();
4241 PA.preserve<GlobalsAA>();
4242 return PA;