1 //===- EarlyCSE.cpp - Simple and fast CSE pass ----------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass performs a simple dominator tree walk that eliminates trivially
10 // redundant instructions.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Scalar/EarlyCSE.h"
15 #include "llvm/ADT/DenseMapInfo.h"
16 #include "llvm/ADT/Hashing.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/ScopedHashTable.h"
19 #include "llvm/ADT/SetVector.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/GuardUtils.h"
25 #include "llvm/Analysis/InstructionSimplify.h"
26 #include "llvm/Analysis/MemorySSA.h"
27 #include "llvm/Analysis/MemorySSAUpdater.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/TargetTransformInfo.h"
30 #include "llvm/Transforms/Utils/Local.h"
31 #include "llvm/Analysis/ValueTracking.h"
32 #include "llvm/IR/BasicBlock.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instruction.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/LLVMContext.h"
43 #include "llvm/IR/PassManager.h"
44 #include "llvm/IR/PatternMatch.h"
45 #include "llvm/IR/Type.h"
46 #include "llvm/IR/Use.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Pass.h"
49 #include "llvm/Support/Allocator.h"
50 #include "llvm/Support/AtomicOrdering.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/Debug.h"
53 #include "llvm/Support/DebugCounter.h"
54 #include "llvm/Support/RecyclingAllocator.h"
55 #include "llvm/Support/raw_ostream.h"
56 #include "llvm/Transforms/Scalar.h"
57 #include "llvm/Transforms/Utils/GuardUtils.h"
64 using namespace llvm::PatternMatch
;
66 #define DEBUG_TYPE "early-cse"
68 STATISTIC(NumSimplify
, "Number of instructions simplified or DCE'd");
69 STATISTIC(NumCSE
, "Number of instructions CSE'd");
70 STATISTIC(NumCSECVP
, "Number of compare instructions CVP'd");
71 STATISTIC(NumCSELoad
, "Number of load instructions CSE'd");
72 STATISTIC(NumCSECall
, "Number of call instructions CSE'd");
73 STATISTIC(NumDSE
, "Number of trivial dead stores removed");
75 DEBUG_COUNTER(CSECounter
, "early-cse",
76 "Controls which instructions are removed");
78 static cl::opt
<unsigned> EarlyCSEMssaOptCap(
79 "earlycse-mssa-optimization-cap", cl::init(500), cl::Hidden
,
80 cl::desc("Enable imprecision in EarlyCSE in pathological cases, in exchange "
81 "for faster compile. Caps the MemorySSA clobbering calls."));
83 static cl::opt
<bool> EarlyCSEDebugHash(
84 "earlycse-debug-hash", cl::init(false), cl::Hidden
,
85 cl::desc("Perform extra assertion checking to verify that SimpleValue's hash "
86 "function is well-behaved w.r.t. its isEqual predicate"));
88 //===----------------------------------------------------------------------===//
90 //===----------------------------------------------------------------------===//
94 /// Struct representing the available values in the scoped hash table.
98 SimpleValue(Instruction
*I
) : Inst(I
) {
99 assert((isSentinel() || canHandle(I
)) && "Inst can't be handled!");
102 bool isSentinel() const {
103 return Inst
== DenseMapInfo
<Instruction
*>::getEmptyKey() ||
104 Inst
== DenseMapInfo
<Instruction
*>::getTombstoneKey();
107 static bool canHandle(Instruction
*Inst
) {
108 // This can only handle non-void readnone functions.
109 if (CallInst
*CI
= dyn_cast
<CallInst
>(Inst
))
110 return CI
->doesNotAccessMemory() && !CI
->getType()->isVoidTy();
111 return isa
<CastInst
>(Inst
) || isa
<UnaryOperator
>(Inst
) ||
112 isa
<BinaryOperator
>(Inst
) || isa
<GetElementPtrInst
>(Inst
) ||
113 isa
<CmpInst
>(Inst
) || isa
<SelectInst
>(Inst
) ||
114 isa
<ExtractElementInst
>(Inst
) || isa
<InsertElementInst
>(Inst
) ||
115 isa
<ShuffleVectorInst
>(Inst
) || isa
<ExtractValueInst
>(Inst
) ||
116 isa
<InsertValueInst
>(Inst
);
120 } // end anonymous namespace
124 template <> struct DenseMapInfo
<SimpleValue
> {
125 static inline SimpleValue
getEmptyKey() {
126 return DenseMapInfo
<Instruction
*>::getEmptyKey();
129 static inline SimpleValue
getTombstoneKey() {
130 return DenseMapInfo
<Instruction
*>::getTombstoneKey();
133 static unsigned getHashValue(SimpleValue Val
);
134 static bool isEqual(SimpleValue LHS
, SimpleValue RHS
);
137 } // end namespace llvm
139 /// Match a 'select' including an optional 'not's of the condition.
140 static bool matchSelectWithOptionalNotCond(Value
*V
, Value
*&Cond
, Value
*&A
,
142 SelectPatternFlavor
&Flavor
) {
143 // Return false if V is not even a select.
144 if (!match(V
, m_Select(m_Value(Cond
), m_Value(A
), m_Value(B
))))
147 // Look through a 'not' of the condition operand by swapping A/B.
149 if (match(Cond
, m_Not(m_Value(CondNot
)))) {
154 // Set flavor if we find a match, or set it to unknown otherwise; in
155 // either case, return true to indicate that this is a select we can
157 if (auto *CmpI
= dyn_cast
<ICmpInst
>(Cond
))
158 Flavor
= matchDecomposedSelectPattern(CmpI
, A
, B
, A
, B
).Flavor
;
160 Flavor
= SPF_UNKNOWN
;
165 static unsigned getHashValueImpl(SimpleValue Val
) {
166 Instruction
*Inst
= Val
.Inst
;
167 // Hash in all of the operands as pointers.
168 if (BinaryOperator
*BinOp
= dyn_cast
<BinaryOperator
>(Inst
)) {
169 Value
*LHS
= BinOp
->getOperand(0);
170 Value
*RHS
= BinOp
->getOperand(1);
171 if (BinOp
->isCommutative() && BinOp
->getOperand(0) > BinOp
->getOperand(1))
174 return hash_combine(BinOp
->getOpcode(), LHS
, RHS
);
177 if (CmpInst
*CI
= dyn_cast
<CmpInst
>(Inst
)) {
178 // Compares can be commuted by swapping the comparands and
179 // updating the predicate. Choose the form that has the
180 // comparands in sorted order, or in the case of a tie, the
181 // one with the lower predicate.
182 Value
*LHS
= CI
->getOperand(0);
183 Value
*RHS
= CI
->getOperand(1);
184 CmpInst::Predicate Pred
= CI
->getPredicate();
185 CmpInst::Predicate SwappedPred
= CI
->getSwappedPredicate();
186 if (std::tie(LHS
, Pred
) > std::tie(RHS
, SwappedPred
)) {
190 return hash_combine(Inst
->getOpcode(), Pred
, LHS
, RHS
);
193 // Hash general selects to allow matching commuted true/false operands.
194 SelectPatternFlavor SPF
;
196 if (matchSelectWithOptionalNotCond(Inst
, Cond
, A
, B
, SPF
)) {
197 // Hash min/max/abs (cmp + select) to allow for commuted operands.
198 // Min/max may also have non-canonical compare predicate (eg, the compare for
199 // smin may use 'sgt' rather than 'slt'), and non-canonical operands in the
201 // TODO: We should also detect FP min/max.
202 if (SPF
== SPF_SMIN
|| SPF
== SPF_SMAX
||
203 SPF
== SPF_UMIN
|| SPF
== SPF_UMAX
) {
206 return hash_combine(Inst
->getOpcode(), SPF
, A
, B
);
208 if (SPF
== SPF_ABS
|| SPF
== SPF_NABS
) {
209 // ABS/NABS always puts the input in A and its negation in B.
210 return hash_combine(Inst
->getOpcode(), SPF
, A
, B
);
213 // Hash general selects to allow matching commuted true/false operands.
215 // If we do not have a compare as the condition, just hash in the condition.
216 CmpInst::Predicate Pred
;
218 if (!match(Cond
, m_Cmp(Pred
, m_Value(X
), m_Value(Y
))))
219 return hash_combine(Inst
->getOpcode(), Cond
, A
, B
);
221 // Similar to cmp normalization (above) - canonicalize the predicate value:
222 // select (icmp Pred, X, Y), A, B --> select (icmp InvPred, X, Y), B, A
223 if (CmpInst::getInversePredicate(Pred
) < Pred
) {
224 Pred
= CmpInst::getInversePredicate(Pred
);
227 return hash_combine(Inst
->getOpcode(), Pred
, X
, Y
, A
, B
);
230 if (CastInst
*CI
= dyn_cast
<CastInst
>(Inst
))
231 return hash_combine(CI
->getOpcode(), CI
->getType(), CI
->getOperand(0));
233 if (const ExtractValueInst
*EVI
= dyn_cast
<ExtractValueInst
>(Inst
))
234 return hash_combine(EVI
->getOpcode(), EVI
->getOperand(0),
235 hash_combine_range(EVI
->idx_begin(), EVI
->idx_end()));
237 if (const InsertValueInst
*IVI
= dyn_cast
<InsertValueInst
>(Inst
))
238 return hash_combine(IVI
->getOpcode(), IVI
->getOperand(0),
240 hash_combine_range(IVI
->idx_begin(), IVI
->idx_end()));
242 assert((isa
<CallInst
>(Inst
) || isa
<GetElementPtrInst
>(Inst
) ||
243 isa
<ExtractElementInst
>(Inst
) || isa
<InsertElementInst
>(Inst
) ||
244 isa
<ShuffleVectorInst
>(Inst
) || isa
<UnaryOperator
>(Inst
)) &&
245 "Invalid/unknown instruction");
247 // Mix in the opcode.
250 hash_combine_range(Inst
->value_op_begin(), Inst
->value_op_end()));
253 unsigned DenseMapInfo
<SimpleValue
>::getHashValue(SimpleValue Val
) {
255 // If -earlycse-debug-hash was specified, return a constant -- this
256 // will force all hashing to collide, so we'll exhaustively search
257 // the table for a match, and the assertion in isEqual will fire if
258 // there's a bug causing equal keys to hash differently.
259 if (EarlyCSEDebugHash
)
262 return getHashValueImpl(Val
);
265 static bool isEqualImpl(SimpleValue LHS
, SimpleValue RHS
) {
266 Instruction
*LHSI
= LHS
.Inst
, *RHSI
= RHS
.Inst
;
268 if (LHS
.isSentinel() || RHS
.isSentinel())
271 if (LHSI
->getOpcode() != RHSI
->getOpcode())
273 if (LHSI
->isIdenticalToWhenDefined(RHSI
))
276 // If we're not strictly identical, we still might be a commutable instruction
277 if (BinaryOperator
*LHSBinOp
= dyn_cast
<BinaryOperator
>(LHSI
)) {
278 if (!LHSBinOp
->isCommutative())
281 assert(isa
<BinaryOperator
>(RHSI
) &&
282 "same opcode, but different instruction type?");
283 BinaryOperator
*RHSBinOp
= cast
<BinaryOperator
>(RHSI
);
286 return LHSBinOp
->getOperand(0) == RHSBinOp
->getOperand(1) &&
287 LHSBinOp
->getOperand(1) == RHSBinOp
->getOperand(0);
289 if (CmpInst
*LHSCmp
= dyn_cast
<CmpInst
>(LHSI
)) {
290 assert(isa
<CmpInst
>(RHSI
) &&
291 "same opcode, but different instruction type?");
292 CmpInst
*RHSCmp
= cast
<CmpInst
>(RHSI
);
294 return LHSCmp
->getOperand(0) == RHSCmp
->getOperand(1) &&
295 LHSCmp
->getOperand(1) == RHSCmp
->getOperand(0) &&
296 LHSCmp
->getSwappedPredicate() == RHSCmp
->getPredicate();
299 // Min/max/abs can occur with commuted operands, non-canonical predicates,
300 // and/or non-canonical operands.
301 // Selects can be non-trivially equivalent via inverted conditions and swaps.
302 SelectPatternFlavor LSPF
, RSPF
;
303 Value
*CondL
, *CondR
, *LHSA
, *RHSA
, *LHSB
, *RHSB
;
304 if (matchSelectWithOptionalNotCond(LHSI
, CondL
, LHSA
, LHSB
, LSPF
) &&
305 matchSelectWithOptionalNotCond(RHSI
, CondR
, RHSA
, RHSB
, RSPF
)) {
307 // TODO: We should also detect FP min/max.
308 if (LSPF
== SPF_SMIN
|| LSPF
== SPF_SMAX
||
309 LSPF
== SPF_UMIN
|| LSPF
== SPF_UMAX
)
310 return ((LHSA
== RHSA
&& LHSB
== RHSB
) ||
311 (LHSA
== RHSB
&& LHSB
== RHSA
));
313 if (LSPF
== SPF_ABS
|| LSPF
== SPF_NABS
) {
314 // Abs results are placed in a defined order by matchSelectPattern.
315 return LHSA
== RHSA
&& LHSB
== RHSB
;
318 // select Cond, A, B <--> select not(Cond), B, A
319 if (CondL
== CondR
&& LHSA
== RHSA
&& LHSB
== RHSB
)
323 // If the true/false operands are swapped and the conditions are compares
324 // with inverted predicates, the selects are equal:
325 // select (icmp Pred, X, Y), A, B <--> select (icmp InvPred, X, Y), B, A
327 // This also handles patterns with a double-negation in the sense of not +
328 // inverse, because we looked through a 'not' in the matching function and
330 // select (cmp Pred, X, Y), A, B <--> select (not (cmp InvPred, X, Y)), B, A
332 // This intentionally does NOT handle patterns with a double-negation in
333 // the sense of not + not, because doing so could result in values
335 // as equal that hash differently in the min/max/abs cases like:
336 // select (cmp slt, X, Y), X, Y <--> select (not (not (cmp slt, X, Y))), X, Y
337 // ^ hashes as min ^ would not hash as min
338 // In the context of the EarlyCSE pass, however, such cases never reach
339 // this code, as we simplify the double-negation before hashing the second
340 // select (and so still succeed at CSEing them).
341 if (LHSA
== RHSB
&& LHSB
== RHSA
) {
342 CmpInst::Predicate PredL
, PredR
;
344 if (match(CondL
, m_Cmp(PredL
, m_Value(X
), m_Value(Y
))) &&
345 match(CondR
, m_Cmp(PredR
, m_Specific(X
), m_Specific(Y
))) &&
346 CmpInst::getInversePredicate(PredL
) == PredR
)
354 bool DenseMapInfo
<SimpleValue
>::isEqual(SimpleValue LHS
, SimpleValue RHS
) {
355 // These comparisons are nontrivial, so assert that equality implies
356 // hash equality (DenseMap demands this as an invariant).
357 bool Result
= isEqualImpl(LHS
, RHS
);
358 assert(!Result
|| (LHS
.isSentinel() && LHS
.Inst
== RHS
.Inst
) ||
359 getHashValueImpl(LHS
) == getHashValueImpl(RHS
));
363 //===----------------------------------------------------------------------===//
365 //===----------------------------------------------------------------------===//
369 /// Struct representing the available call values in the scoped hash
374 CallValue(Instruction
*I
) : Inst(I
) {
375 assert((isSentinel() || canHandle(I
)) && "Inst can't be handled!");
378 bool isSentinel() const {
379 return Inst
== DenseMapInfo
<Instruction
*>::getEmptyKey() ||
380 Inst
== DenseMapInfo
<Instruction
*>::getTombstoneKey();
383 static bool canHandle(Instruction
*Inst
) {
384 // Don't value number anything that returns void.
385 if (Inst
->getType()->isVoidTy())
388 CallInst
*CI
= dyn_cast
<CallInst
>(Inst
);
389 if (!CI
|| !CI
->onlyReadsMemory())
395 } // end anonymous namespace
399 template <> struct DenseMapInfo
<CallValue
> {
400 static inline CallValue
getEmptyKey() {
401 return DenseMapInfo
<Instruction
*>::getEmptyKey();
404 static inline CallValue
getTombstoneKey() {
405 return DenseMapInfo
<Instruction
*>::getTombstoneKey();
408 static unsigned getHashValue(CallValue Val
);
409 static bool isEqual(CallValue LHS
, CallValue RHS
);
412 } // end namespace llvm
414 unsigned DenseMapInfo
<CallValue
>::getHashValue(CallValue Val
) {
415 Instruction
*Inst
= Val
.Inst
;
416 // Hash all of the operands as pointers and mix in the opcode.
419 hash_combine_range(Inst
->value_op_begin(), Inst
->value_op_end()));
422 bool DenseMapInfo
<CallValue
>::isEqual(CallValue LHS
, CallValue RHS
) {
423 Instruction
*LHSI
= LHS
.Inst
, *RHSI
= RHS
.Inst
;
424 if (LHS
.isSentinel() || RHS
.isSentinel())
426 return LHSI
->isIdenticalTo(RHSI
);
429 //===----------------------------------------------------------------------===//
430 // EarlyCSE implementation
431 //===----------------------------------------------------------------------===//
435 /// A simple and fast domtree-based CSE pass.
437 /// This pass does a simple depth-first walk over the dominator tree,
438 /// eliminating trivially redundant instructions and using instsimplify to
439 /// canonicalize things as it goes. It is intended to be fast and catch obvious
440 /// cases so that instcombine and other passes are more effective. It is
441 /// expected that a later pass of GVN will catch the interesting/hard cases.
444 const TargetLibraryInfo
&TLI
;
445 const TargetTransformInfo
&TTI
;
448 const SimplifyQuery SQ
;
450 std::unique_ptr
<MemorySSAUpdater
> MSSAUpdater
;
453 RecyclingAllocator
<BumpPtrAllocator
,
454 ScopedHashTableVal
<SimpleValue
, Value
*>>;
456 ScopedHashTable
<SimpleValue
, Value
*, DenseMapInfo
<SimpleValue
>,
459 /// A scoped hash table of the current values of all of our simple
460 /// scalar expressions.
462 /// As we walk down the domtree, we look to see if instructions are in this:
463 /// if so, we replace them with what we find, otherwise we insert them so
464 /// that dominated values can succeed in their lookup.
465 ScopedHTType AvailableValues
;
467 /// A scoped hash table of the current values of previously encountered
468 /// memory locations.
470 /// This allows us to get efficient access to dominating loads or stores when
471 /// we have a fully redundant load. In addition to the most recent load, we
472 /// keep track of a generation count of the read, which is compared against
473 /// the current generation count. The current generation count is incremented
474 /// after every possibly writing memory operation, which ensures that we only
475 /// CSE loads with other loads that have no intervening store. Ordering
476 /// events (such as fences or atomic instructions) increment the generation
477 /// count as well; essentially, we model these as writes to all possible
478 /// locations. Note that atomic and/or volatile loads and stores can be
479 /// present the table; it is the responsibility of the consumer to inspect
480 /// the atomicity/volatility if needed.
482 Instruction
*DefInst
= nullptr;
483 unsigned Generation
= 0;
485 bool IsAtomic
= false;
487 LoadValue() = default;
488 LoadValue(Instruction
*Inst
, unsigned Generation
, unsigned MatchingId
,
490 : DefInst(Inst
), Generation(Generation
), MatchingId(MatchingId
),
491 IsAtomic(IsAtomic
) {}
494 using LoadMapAllocator
=
495 RecyclingAllocator
<BumpPtrAllocator
,
496 ScopedHashTableVal
<Value
*, LoadValue
>>;
498 ScopedHashTable
<Value
*, LoadValue
, DenseMapInfo
<Value
*>,
501 LoadHTType AvailableLoads
;
503 // A scoped hash table mapping memory locations (represented as typed
504 // addresses) to generation numbers at which that memory location became
505 // (henceforth indefinitely) invariant.
506 using InvariantMapAllocator
=
507 RecyclingAllocator
<BumpPtrAllocator
,
508 ScopedHashTableVal
<MemoryLocation
, unsigned>>;
509 using InvariantHTType
=
510 ScopedHashTable
<MemoryLocation
, unsigned, DenseMapInfo
<MemoryLocation
>,
511 InvariantMapAllocator
>;
512 InvariantHTType AvailableInvariants
;
514 /// A scoped hash table of the current values of read-only call
517 /// It uses the same generation count as loads.
519 ScopedHashTable
<CallValue
, std::pair
<Instruction
*, unsigned>>;
520 CallHTType AvailableCalls
;
522 /// This is the current generation of the memory value.
523 unsigned CurrentGeneration
= 0;
525 /// Set up the EarlyCSE runner for a particular function.
526 EarlyCSE(const DataLayout
&DL
, const TargetLibraryInfo
&TLI
,
527 const TargetTransformInfo
&TTI
, DominatorTree
&DT
,
528 AssumptionCache
&AC
, MemorySSA
*MSSA
)
529 : TLI(TLI
), TTI(TTI
), DT(DT
), AC(AC
), SQ(DL
, &TLI
, &DT
, &AC
), MSSA(MSSA
),
530 MSSAUpdater(std::make_unique
<MemorySSAUpdater
>(MSSA
)) {}
535 unsigned ClobberCounter
= 0;
536 // Almost a POD, but needs to call the constructors for the scoped hash
537 // tables so that a new scope gets pushed on. These are RAII so that the
538 // scope gets popped when the NodeScope is destroyed.
541 NodeScope(ScopedHTType
&AvailableValues
, LoadHTType
&AvailableLoads
,
542 InvariantHTType
&AvailableInvariants
, CallHTType
&AvailableCalls
)
543 : Scope(AvailableValues
), LoadScope(AvailableLoads
),
544 InvariantScope(AvailableInvariants
), CallScope(AvailableCalls
) {}
545 NodeScope(const NodeScope
&) = delete;
546 NodeScope
&operator=(const NodeScope
&) = delete;
549 ScopedHTType::ScopeTy Scope
;
550 LoadHTType::ScopeTy LoadScope
;
551 InvariantHTType::ScopeTy InvariantScope
;
552 CallHTType::ScopeTy CallScope
;
555 // Contains all the needed information to create a stack for doing a depth
556 // first traversal of the tree. This includes scopes for values, loads, and
557 // calls as well as the generation. There is a child iterator so that the
558 // children do not need to be store separately.
561 StackNode(ScopedHTType
&AvailableValues
, LoadHTType
&AvailableLoads
,
562 InvariantHTType
&AvailableInvariants
, CallHTType
&AvailableCalls
,
563 unsigned cg
, DomTreeNode
*n
, DomTreeNode::iterator child
,
564 DomTreeNode::iterator end
)
565 : CurrentGeneration(cg
), ChildGeneration(cg
), Node(n
), ChildIter(child
),
567 Scopes(AvailableValues
, AvailableLoads
, AvailableInvariants
,
570 StackNode(const StackNode
&) = delete;
571 StackNode
&operator=(const StackNode
&) = delete;
574 unsigned currentGeneration() { return CurrentGeneration
; }
575 unsigned childGeneration() { return ChildGeneration
; }
576 void childGeneration(unsigned generation
) { ChildGeneration
= generation
; }
577 DomTreeNode
*node() { return Node
; }
578 DomTreeNode::iterator
childIter() { return ChildIter
; }
580 DomTreeNode
*nextChild() {
581 DomTreeNode
*child
= *ChildIter
;
586 DomTreeNode::iterator
end() { return EndIter
; }
587 bool isProcessed() { return Processed
; }
588 void process() { Processed
= true; }
591 unsigned CurrentGeneration
;
592 unsigned ChildGeneration
;
594 DomTreeNode::iterator ChildIter
;
595 DomTreeNode::iterator EndIter
;
597 bool Processed
= false;
600 /// Wrapper class to handle memory instructions, including loads,
601 /// stores and intrinsic loads and stores defined by the target.
602 class ParseMemoryInst
{
604 ParseMemoryInst(Instruction
*Inst
, const TargetTransformInfo
&TTI
)
606 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
))
607 if (TTI
.getTgtMemIntrinsic(II
, Info
))
608 IsTargetMemInst
= true;
611 bool isLoad() const {
612 if (IsTargetMemInst
) return Info
.ReadMem
;
613 return isa
<LoadInst
>(Inst
);
616 bool isStore() const {
617 if (IsTargetMemInst
) return Info
.WriteMem
;
618 return isa
<StoreInst
>(Inst
);
621 bool isAtomic() const {
623 return Info
.Ordering
!= AtomicOrdering::NotAtomic
;
624 return Inst
->isAtomic();
627 bool isUnordered() const {
629 return Info
.isUnordered();
631 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
632 return LI
->isUnordered();
633 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
634 return SI
->isUnordered();
636 // Conservative answer
637 return !Inst
->isAtomic();
640 bool isVolatile() const {
642 return Info
.IsVolatile
;
644 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
645 return LI
->isVolatile();
646 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
647 return SI
->isVolatile();
649 // Conservative answer
653 bool isInvariantLoad() const {
654 if (auto *LI
= dyn_cast
<LoadInst
>(Inst
))
655 return LI
->hasMetadata(LLVMContext::MD_invariant_load
);
659 bool isMatchingMemLoc(const ParseMemoryInst
&Inst
) const {
660 return (getPointerOperand() == Inst
.getPointerOperand() &&
661 getMatchingId() == Inst
.getMatchingId());
664 bool isValid() const { return getPointerOperand() != nullptr; }
666 // For regular (non-intrinsic) loads/stores, this is set to -1. For
667 // intrinsic loads/stores, the id is retrieved from the corresponding
668 // field in the MemIntrinsicInfo structure. That field contains
669 // non-negative values only.
670 int getMatchingId() const {
671 if (IsTargetMemInst
) return Info
.MatchingId
;
675 Value
*getPointerOperand() const {
676 if (IsTargetMemInst
) return Info
.PtrVal
;
677 return getLoadStorePointerOperand(Inst
);
680 bool mayReadFromMemory() const {
681 if (IsTargetMemInst
) return Info
.ReadMem
;
682 return Inst
->mayReadFromMemory();
685 bool mayWriteToMemory() const {
686 if (IsTargetMemInst
) return Info
.WriteMem
;
687 return Inst
->mayWriteToMemory();
691 bool IsTargetMemInst
= false;
692 MemIntrinsicInfo Info
;
696 bool processNode(DomTreeNode
*Node
);
698 bool handleBranchCondition(Instruction
*CondInst
, const BranchInst
*BI
,
699 const BasicBlock
*BB
, const BasicBlock
*Pred
);
701 Value
*getOrCreateResult(Value
*Inst
, Type
*ExpectedType
) const {
702 if (auto *LI
= dyn_cast
<LoadInst
>(Inst
))
704 if (auto *SI
= dyn_cast
<StoreInst
>(Inst
))
705 return SI
->getValueOperand();
706 assert(isa
<IntrinsicInst
>(Inst
) && "Instruction not supported");
707 return TTI
.getOrCreateResultFromMemIntrinsic(cast
<IntrinsicInst
>(Inst
),
711 /// Return true if the instruction is known to only operate on memory
712 /// provably invariant in the given "generation".
713 bool isOperatingOnInvariantMemAt(Instruction
*I
, unsigned GenAt
);
715 bool isSameMemGeneration(unsigned EarlierGeneration
, unsigned LaterGeneration
,
716 Instruction
*EarlierInst
, Instruction
*LaterInst
);
718 void removeMSSA(Instruction
*Inst
) {
722 MSSA
->verifyMemorySSA();
723 // Removing a store here can leave MemorySSA in an unoptimized state by
724 // creating MemoryPhis that have identical arguments and by creating
725 // MemoryUses whose defining access is not an actual clobber. The phi case
726 // is handled by MemorySSA when passing OptimizePhis = true to
727 // removeMemoryAccess. The non-optimized MemoryUse case is lazily updated
728 // by MemorySSA's getClobberingMemoryAccess.
729 MSSAUpdater
->removeMemoryAccess(Inst
, true);
733 } // end anonymous namespace
735 /// Determine if the memory referenced by LaterInst is from the same heap
736 /// version as EarlierInst.
737 /// This is currently called in two scenarios:
749 /// in both cases we want to verify that there are no possible writes to the
750 /// memory referenced by p between the earlier and later instruction.
751 bool EarlyCSE::isSameMemGeneration(unsigned EarlierGeneration
,
752 unsigned LaterGeneration
,
753 Instruction
*EarlierInst
,
754 Instruction
*LaterInst
) {
755 // Check the simple memory generation tracking first.
756 if (EarlierGeneration
== LaterGeneration
)
762 // If MemorySSA has determined that one of EarlierInst or LaterInst does not
763 // read/write memory, then we can safely return true here.
764 // FIXME: We could be more aggressive when checking doesNotAccessMemory(),
765 // onlyReadsMemory(), mayReadFromMemory(), and mayWriteToMemory() in this pass
766 // by also checking the MemorySSA MemoryAccess on the instruction. Initial
767 // experiments suggest this isn't worthwhile, at least for C/C++ code compiled
768 // with the default optimization pipeline.
769 auto *EarlierMA
= MSSA
->getMemoryAccess(EarlierInst
);
772 auto *LaterMA
= MSSA
->getMemoryAccess(LaterInst
);
776 // Since we know LaterDef dominates LaterInst and EarlierInst dominates
777 // LaterInst, if LaterDef dominates EarlierInst then it can't occur between
778 // EarlierInst and LaterInst and neither can any other write that potentially
779 // clobbers LaterInst.
780 MemoryAccess
*LaterDef
;
781 if (ClobberCounter
< EarlyCSEMssaOptCap
) {
782 LaterDef
= MSSA
->getWalker()->getClobberingMemoryAccess(LaterInst
);
785 LaterDef
= LaterMA
->getDefiningAccess();
787 return MSSA
->dominates(LaterDef
, EarlierMA
);
790 bool EarlyCSE::isOperatingOnInvariantMemAt(Instruction
*I
, unsigned GenAt
) {
791 // A location loaded from with an invariant_load is assumed to *never* change
792 // within the visible scope of the compilation.
793 if (auto *LI
= dyn_cast
<LoadInst
>(I
))
794 if (LI
->hasMetadata(LLVMContext::MD_invariant_load
))
797 auto MemLocOpt
= MemoryLocation::getOrNone(I
);
799 // "target" intrinsic forms of loads aren't currently known to
800 // MemoryLocation::get. TODO
802 MemoryLocation MemLoc
= *MemLocOpt
;
803 if (!AvailableInvariants
.count(MemLoc
))
806 // Is the generation at which this became invariant older than the
808 return AvailableInvariants
.lookup(MemLoc
) <= GenAt
;
811 bool EarlyCSE::handleBranchCondition(Instruction
*CondInst
,
812 const BranchInst
*BI
, const BasicBlock
*BB
,
813 const BasicBlock
*Pred
) {
814 assert(BI
->isConditional() && "Should be a conditional branch!");
815 assert(BI
->getCondition() == CondInst
&& "Wrong condition?");
816 assert(BI
->getSuccessor(0) == BB
|| BI
->getSuccessor(1) == BB
);
817 auto *TorF
= (BI
->getSuccessor(0) == BB
)
818 ? ConstantInt::getTrue(BB
->getContext())
819 : ConstantInt::getFalse(BB
->getContext());
820 auto MatchBinOp
= [](Instruction
*I
, unsigned Opcode
) {
821 if (BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(I
))
822 return BOp
->getOpcode() == Opcode
;
825 // If the condition is AND operation, we can propagate its operands into the
826 // true branch. If it is OR operation, we can propagate them into the false
828 unsigned PropagateOpcode
=
829 (BI
->getSuccessor(0) == BB
) ? Instruction::And
: Instruction::Or
;
831 bool MadeChanges
= false;
832 SmallVector
<Instruction
*, 4> WorkList
;
833 SmallPtrSet
<Instruction
*, 4> Visited
;
834 WorkList
.push_back(CondInst
);
835 while (!WorkList
.empty()) {
836 Instruction
*Curr
= WorkList
.pop_back_val();
838 AvailableValues
.insert(Curr
, TorF
);
839 LLVM_DEBUG(dbgs() << "EarlyCSE CVP: Add conditional value for '"
840 << Curr
->getName() << "' as " << *TorF
<< " in "
841 << BB
->getName() << "\n");
842 if (!DebugCounter::shouldExecute(CSECounter
)) {
843 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
845 // Replace all dominated uses with the known value.
846 if (unsigned Count
= replaceDominatedUsesWith(Curr
, TorF
, DT
,
847 BasicBlockEdge(Pred
, BB
))) {
853 if (MatchBinOp(Curr
, PropagateOpcode
))
854 for (auto &Op
: cast
<BinaryOperator
>(Curr
)->operands())
855 if (Instruction
*OPI
= dyn_cast
<Instruction
>(Op
))
856 if (SimpleValue::canHandle(OPI
) && Visited
.insert(OPI
).second
)
857 WorkList
.push_back(OPI
);
863 bool EarlyCSE::processNode(DomTreeNode
*Node
) {
864 bool Changed
= false;
865 BasicBlock
*BB
= Node
->getBlock();
867 // If this block has a single predecessor, then the predecessor is the parent
868 // of the domtree node and all of the live out memory values are still current
869 // in this block. If this block has multiple predecessors, then they could
870 // have invalidated the live-out memory values of our parent value. For now,
871 // just be conservative and invalidate memory if this block has multiple
873 if (!BB
->getSinglePredecessor())
876 // If this node has a single predecessor which ends in a conditional branch,
877 // we can infer the value of the branch condition given that we took this
878 // path. We need the single predecessor to ensure there's not another path
879 // which reaches this block where the condition might hold a different
880 // value. Since we're adding this to the scoped hash table (like any other
881 // def), it will have been popped if we encounter a future merge block.
882 if (BasicBlock
*Pred
= BB
->getSinglePredecessor()) {
883 auto *BI
= dyn_cast
<BranchInst
>(Pred
->getTerminator());
884 if (BI
&& BI
->isConditional()) {
885 auto *CondInst
= dyn_cast
<Instruction
>(BI
->getCondition());
886 if (CondInst
&& SimpleValue::canHandle(CondInst
))
887 Changed
|= handleBranchCondition(CondInst
, BI
, BB
, Pred
);
891 /// LastStore - Keep track of the last non-volatile store that we saw... for
892 /// as long as there in no instruction that reads memory. If we see a store
893 /// to the same location, we delete the dead store. This zaps trivial dead
894 /// stores which can occur in bitfield code among other things.
895 Instruction
*LastStore
= nullptr;
897 // See if any instructions in the block can be eliminated. If so, do it. If
898 // not, add them to AvailableValues.
899 for (BasicBlock::iterator I
= BB
->begin(), E
= BB
->end(); I
!= E
;) {
900 Instruction
*Inst
= &*I
++;
902 // Dead instructions should just be removed.
903 if (isInstructionTriviallyDead(Inst
, &TLI
)) {
904 LLVM_DEBUG(dbgs() << "EarlyCSE DCE: " << *Inst
<< '\n');
905 if (!DebugCounter::shouldExecute(CSECounter
)) {
906 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
909 if (!salvageDebugInfo(*Inst
))
910 replaceDbgUsesWithUndef(Inst
);
912 Inst
->eraseFromParent();
918 // Skip assume intrinsics, they don't really have side effects (although
919 // they're marked as such to ensure preservation of control dependencies),
920 // and this pass will not bother with its removal. However, we should mark
921 // its condition as true for all dominated blocks.
922 if (match(Inst
, m_Intrinsic
<Intrinsic::assume
>())) {
924 dyn_cast
<Instruction
>(cast
<CallInst
>(Inst
)->getArgOperand(0));
925 if (CondI
&& SimpleValue::canHandle(CondI
)) {
926 LLVM_DEBUG(dbgs() << "EarlyCSE considering assumption: " << *Inst
928 AvailableValues
.insert(CondI
, ConstantInt::getTrue(BB
->getContext()));
930 LLVM_DEBUG(dbgs() << "EarlyCSE skipping assumption: " << *Inst
<< '\n');
934 // Skip sideeffect intrinsics, for the same reason as assume intrinsics.
935 if (match(Inst
, m_Intrinsic
<Intrinsic::sideeffect
>())) {
936 LLVM_DEBUG(dbgs() << "EarlyCSE skipping sideeffect: " << *Inst
<< '\n');
940 // We can skip all invariant.start intrinsics since they only read memory,
941 // and we can forward values across it. For invariant starts without
942 // invariant ends, we can use the fact that the invariantness never ends to
943 // start a scope in the current generaton which is true for all future
944 // generations. Also, we dont need to consume the last store since the
945 // semantics of invariant.start allow us to perform DSE of the last
946 // store, if there was a store following invariant.start. Consider:
949 // invariant.start(p)
951 // We can DSE the store to 30, since the store 40 to invariant location p
952 // causes undefined behaviour.
953 if (match(Inst
, m_Intrinsic
<Intrinsic::invariant_start
>())) {
954 // If there are any uses, the scope might end.
955 if (!Inst
->use_empty())
957 auto *CI
= cast
<CallInst
>(Inst
);
958 MemoryLocation MemLoc
= MemoryLocation::getForArgument(CI
, 1, TLI
);
959 // Don't start a scope if we already have a better one pushed
960 if (!AvailableInvariants
.count(MemLoc
))
961 AvailableInvariants
.insert(MemLoc
, CurrentGeneration
);
967 dyn_cast
<Instruction
>(cast
<CallInst
>(Inst
)->getArgOperand(0))) {
968 if (SimpleValue::canHandle(CondI
)) {
969 // Do we already know the actual value of this condition?
970 if (auto *KnownCond
= AvailableValues
.lookup(CondI
)) {
971 // Is the condition known to be true?
972 if (isa
<ConstantInt
>(KnownCond
) &&
973 cast
<ConstantInt
>(KnownCond
)->isOne()) {
975 << "EarlyCSE removing guard: " << *Inst
<< '\n');
977 Inst
->eraseFromParent();
981 // Use the known value if it wasn't true.
982 cast
<CallInst
>(Inst
)->setArgOperand(0, KnownCond
);
984 // The condition we're on guarding here is true for all dominated
986 AvailableValues
.insert(CondI
, ConstantInt::getTrue(BB
->getContext()));
990 // Guard intrinsics read all memory, but don't write any memory.
991 // Accordingly, don't update the generation but consume the last store (to
992 // avoid an incorrect DSE).
997 // If the instruction can be simplified (e.g. X+0 = X) then replace it with
998 // its simpler value.
999 if (Value
*V
= SimplifyInstruction(Inst
, SQ
)) {
1000 LLVM_DEBUG(dbgs() << "EarlyCSE Simplify: " << *Inst
<< " to: " << *V
1002 if (!DebugCounter::shouldExecute(CSECounter
)) {
1003 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1005 bool Killed
= false;
1006 if (!Inst
->use_empty()) {
1007 Inst
->replaceAllUsesWith(V
);
1010 if (isInstructionTriviallyDead(Inst
, &TLI
)) {
1012 Inst
->eraseFromParent();
1023 // If this is a simple instruction that we can value number, process it.
1024 if (SimpleValue::canHandle(Inst
)) {
1025 // See if the instruction has an available value. If so, use it.
1026 if (Value
*V
= AvailableValues
.lookup(Inst
)) {
1027 LLVM_DEBUG(dbgs() << "EarlyCSE CSE: " << *Inst
<< " to: " << *V
1029 if (!DebugCounter::shouldExecute(CSECounter
)) {
1030 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1033 if (auto *I
= dyn_cast
<Instruction
>(V
))
1034 I
->andIRFlags(Inst
);
1035 Inst
->replaceAllUsesWith(V
);
1037 Inst
->eraseFromParent();
1043 // Otherwise, just remember that this value is available.
1044 AvailableValues
.insert(Inst
, Inst
);
1048 ParseMemoryInst
MemInst(Inst
, TTI
);
1049 // If this is a non-volatile load, process it.
1050 if (MemInst
.isValid() && MemInst
.isLoad()) {
1051 // (conservatively) we can't peak past the ordering implied by this
1052 // operation, but we can add this load to our set of available values
1053 if (MemInst
.isVolatile() || !MemInst
.isUnordered()) {
1054 LastStore
= nullptr;
1055 ++CurrentGeneration
;
1058 if (MemInst
.isInvariantLoad()) {
1059 // If we pass an invariant load, we know that memory location is
1060 // indefinitely constant from the moment of first dereferenceability.
1061 // We conservatively treat the invariant_load as that moment. If we
1062 // pass a invariant load after already establishing a scope, don't
1063 // restart it since we want to preserve the earliest point seen.
1064 auto MemLoc
= MemoryLocation::get(Inst
);
1065 if (!AvailableInvariants
.count(MemLoc
))
1066 AvailableInvariants
.insert(MemLoc
, CurrentGeneration
);
1069 // If we have an available version of this load, and if it is the right
1070 // generation or the load is known to be from an invariant location,
1071 // replace this instruction.
1073 // If either the dominating load or the current load are invariant, then
1074 // we can assume the current load loads the same value as the dominating
1076 LoadValue InVal
= AvailableLoads
.lookup(MemInst
.getPointerOperand());
1077 if (InVal
.DefInst
!= nullptr &&
1078 InVal
.MatchingId
== MemInst
.getMatchingId() &&
1079 // We don't yet handle removing loads with ordering of any kind.
1080 !MemInst
.isVolatile() && MemInst
.isUnordered() &&
1081 // We can't replace an atomic load with one which isn't also atomic.
1082 InVal
.IsAtomic
>= MemInst
.isAtomic() &&
1083 (isOperatingOnInvariantMemAt(Inst
, InVal
.Generation
) ||
1084 isSameMemGeneration(InVal
.Generation
, CurrentGeneration
,
1085 InVal
.DefInst
, Inst
))) {
1086 Value
*Op
= getOrCreateResult(InVal
.DefInst
, Inst
->getType());
1087 if (Op
!= nullptr) {
1088 LLVM_DEBUG(dbgs() << "EarlyCSE CSE LOAD: " << *Inst
1089 << " to: " << *InVal
.DefInst
<< '\n');
1090 if (!DebugCounter::shouldExecute(CSECounter
)) {
1091 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1094 if (!Inst
->use_empty())
1095 Inst
->replaceAllUsesWith(Op
);
1097 Inst
->eraseFromParent();
1104 // Otherwise, remember that we have this instruction.
1105 AvailableLoads
.insert(
1106 MemInst
.getPointerOperand(),
1107 LoadValue(Inst
, CurrentGeneration
, MemInst
.getMatchingId(),
1108 MemInst
.isAtomic()));
1109 LastStore
= nullptr;
1113 // If this instruction may read from memory or throw (and potentially read
1114 // from memory in the exception handler), forget LastStore. Load/store
1115 // intrinsics will indicate both a read and a write to memory. The target
1116 // may override this (e.g. so that a store intrinsic does not read from
1117 // memory, and thus will be treated the same as a regular store for
1118 // commoning purposes).
1119 if ((Inst
->mayReadFromMemory() || Inst
->mayThrow()) &&
1120 !(MemInst
.isValid() && !MemInst
.mayReadFromMemory()))
1121 LastStore
= nullptr;
1123 // If this is a read-only call, process it.
1124 if (CallValue::canHandle(Inst
)) {
1125 // If we have an available version of this call, and if it is the right
1126 // generation, replace this instruction.
1127 std::pair
<Instruction
*, unsigned> InVal
= AvailableCalls
.lookup(Inst
);
1128 if (InVal
.first
!= nullptr &&
1129 isSameMemGeneration(InVal
.second
, CurrentGeneration
, InVal
.first
,
1131 LLVM_DEBUG(dbgs() << "EarlyCSE CSE CALL: " << *Inst
1132 << " to: " << *InVal
.first
<< '\n');
1133 if (!DebugCounter::shouldExecute(CSECounter
)) {
1134 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1137 if (!Inst
->use_empty())
1138 Inst
->replaceAllUsesWith(InVal
.first
);
1140 Inst
->eraseFromParent();
1146 // Otherwise, remember that we have this instruction.
1147 AvailableCalls
.insert(
1148 Inst
, std::pair
<Instruction
*, unsigned>(Inst
, CurrentGeneration
));
1152 // A release fence requires that all stores complete before it, but does
1153 // not prevent the reordering of following loads 'before' the fence. As a
1154 // result, we don't need to consider it as writing to memory and don't need
1155 // to advance the generation. We do need to prevent DSE across the fence,
1156 // but that's handled above.
1157 if (FenceInst
*FI
= dyn_cast
<FenceInst
>(Inst
))
1158 if (FI
->getOrdering() == AtomicOrdering::Release
) {
1159 assert(Inst
->mayReadFromMemory() && "relied on to prevent DSE above");
1163 // write back DSE - If we write back the same value we just loaded from
1164 // the same location and haven't passed any intervening writes or ordering
1165 // operations, we can remove the write. The primary benefit is in allowing
1166 // the available load table to remain valid and value forward past where
1167 // the store originally was.
1168 if (MemInst
.isValid() && MemInst
.isStore()) {
1169 LoadValue InVal
= AvailableLoads
.lookup(MemInst
.getPointerOperand());
1170 if (InVal
.DefInst
&&
1171 InVal
.DefInst
== getOrCreateResult(Inst
, InVal
.DefInst
->getType()) &&
1172 InVal
.MatchingId
== MemInst
.getMatchingId() &&
1173 // We don't yet handle removing stores with ordering of any kind.
1174 !MemInst
.isVolatile() && MemInst
.isUnordered() &&
1175 (isOperatingOnInvariantMemAt(Inst
, InVal
.Generation
) ||
1176 isSameMemGeneration(InVal
.Generation
, CurrentGeneration
,
1177 InVal
.DefInst
, Inst
))) {
1178 // It is okay to have a LastStore to a different pointer here if MemorySSA
1179 // tells us that the load and store are from the same memory generation.
1180 // In that case, LastStore should keep its present value since we're
1181 // removing the current store.
1182 assert((!LastStore
||
1183 ParseMemoryInst(LastStore
, TTI
).getPointerOperand() ==
1184 MemInst
.getPointerOperand() ||
1186 "can't have an intervening store if not using MemorySSA!");
1187 LLVM_DEBUG(dbgs() << "EarlyCSE DSE (writeback): " << *Inst
<< '\n');
1188 if (!DebugCounter::shouldExecute(CSECounter
)) {
1189 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1193 Inst
->eraseFromParent();
1196 // We can avoid incrementing the generation count since we were able
1197 // to eliminate this store.
1202 // Okay, this isn't something we can CSE at all. Check to see if it is
1203 // something that could modify memory. If so, our available memory values
1204 // cannot be used so bump the generation count.
1205 if (Inst
->mayWriteToMemory()) {
1206 ++CurrentGeneration
;
1208 if (MemInst
.isValid() && MemInst
.isStore()) {
1209 // We do a trivial form of DSE if there are two stores to the same
1210 // location with no intervening loads. Delete the earlier store.
1211 // At the moment, we don't remove ordered stores, but do remove
1212 // unordered atomic stores. There's no special requirement (for
1213 // unordered atomics) about removing atomic stores only in favor of
1214 // other atomic stores since we were going to execute the non-atomic
1215 // one anyway and the atomic one might never have become visible.
1217 ParseMemoryInst
LastStoreMemInst(LastStore
, TTI
);
1218 assert(LastStoreMemInst
.isUnordered() &&
1219 !LastStoreMemInst
.isVolatile() &&
1220 "Violated invariant");
1221 if (LastStoreMemInst
.isMatchingMemLoc(MemInst
)) {
1222 LLVM_DEBUG(dbgs() << "EarlyCSE DEAD STORE: " << *LastStore
1223 << " due to: " << *Inst
<< '\n');
1224 if (!DebugCounter::shouldExecute(CSECounter
)) {
1225 LLVM_DEBUG(dbgs() << "Skipping due to debug counter\n");
1227 removeMSSA(LastStore
);
1228 LastStore
->eraseFromParent();
1231 LastStore
= nullptr;
1234 // fallthrough - we can exploit information about this store
1237 // Okay, we just invalidated anything we knew about loaded values. Try
1238 // to salvage *something* by remembering that the stored value is a live
1239 // version of the pointer. It is safe to forward from volatile stores
1240 // to non-volatile loads, so we don't have to check for volatility of
1242 AvailableLoads
.insert(
1243 MemInst
.getPointerOperand(),
1244 LoadValue(Inst
, CurrentGeneration
, MemInst
.getMatchingId(),
1245 MemInst
.isAtomic()));
1247 // Remember that this was the last unordered store we saw for DSE. We
1248 // don't yet handle DSE on ordered or volatile stores since we don't
1249 // have a good way to model the ordering requirement for following
1250 // passes once the store is removed. We could insert a fence, but
1251 // since fences are slightly stronger than stores in their ordering,
1252 // it's not clear this is a profitable transform. Another option would
1253 // be to merge the ordering with that of the post dominating store.
1254 if (MemInst
.isUnordered() && !MemInst
.isVolatile())
1257 LastStore
= nullptr;
1265 bool EarlyCSE::run() {
1266 // Note, deque is being used here because there is significant performance
1267 // gains over vector when the container becomes very large due to the
1268 // specific access patterns. For more information see the mailing list
1269 // discussion on this:
1270 // http://lists.llvm.org/pipermail/llvm-commits/Week-of-Mon-20120116/135228.html
1271 std::deque
<StackNode
*> nodesToProcess
;
1273 bool Changed
= false;
1275 // Process the root node.
1276 nodesToProcess
.push_back(new StackNode(
1277 AvailableValues
, AvailableLoads
, AvailableInvariants
, AvailableCalls
,
1278 CurrentGeneration
, DT
.getRootNode(),
1279 DT
.getRootNode()->begin(), DT
.getRootNode()->end()));
1281 assert(!CurrentGeneration
&& "Create a new EarlyCSE instance to rerun it.");
1283 // Process the stack.
1284 while (!nodesToProcess
.empty()) {
1285 // Grab the first item off the stack. Set the current generation, remove
1286 // the node from the stack, and process it.
1287 StackNode
*NodeToProcess
= nodesToProcess
.back();
1289 // Initialize class members.
1290 CurrentGeneration
= NodeToProcess
->currentGeneration();
1292 // Check if the node needs to be processed.
1293 if (!NodeToProcess
->isProcessed()) {
1294 // Process the node.
1295 Changed
|= processNode(NodeToProcess
->node());
1296 NodeToProcess
->childGeneration(CurrentGeneration
);
1297 NodeToProcess
->process();
1298 } else if (NodeToProcess
->childIter() != NodeToProcess
->end()) {
1299 // Push the next child onto the stack.
1300 DomTreeNode
*child
= NodeToProcess
->nextChild();
1301 nodesToProcess
.push_back(
1302 new StackNode(AvailableValues
, AvailableLoads
, AvailableInvariants
,
1303 AvailableCalls
, NodeToProcess
->childGeneration(),
1304 child
, child
->begin(), child
->end()));
1306 // It has been processed, and there are no more children to process,
1307 // so delete it and pop it off the stack.
1308 delete NodeToProcess
;
1309 nodesToProcess
.pop_back();
1311 } // while (!nodes...)
1316 PreservedAnalyses
EarlyCSEPass::run(Function
&F
,
1317 FunctionAnalysisManager
&AM
) {
1318 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1319 auto &TTI
= AM
.getResult
<TargetIRAnalysis
>(F
);
1320 auto &DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
1321 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
1323 UseMemorySSA
? &AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA() : nullptr;
1325 EarlyCSE
CSE(F
.getParent()->getDataLayout(), TLI
, TTI
, DT
, AC
, MSSA
);
1328 return PreservedAnalyses::all();
1330 PreservedAnalyses PA
;
1331 PA
.preserveSet
<CFGAnalyses
>();
1332 PA
.preserve
<GlobalsAA
>();
1334 PA
.preserve
<MemorySSAAnalysis
>();
1340 /// A simple and fast domtree-based CSE pass.
1342 /// This pass does a simple depth-first walk over the dominator tree,
1343 /// eliminating trivially redundant instructions and using instsimplify to
1344 /// canonicalize things as it goes. It is intended to be fast and catch obvious
1345 /// cases so that instcombine and other passes are more effective. It is
1346 /// expected that a later pass of GVN will catch the interesting/hard cases.
1347 template<bool UseMemorySSA
>
1348 class EarlyCSELegacyCommonPass
: public FunctionPass
{
1352 EarlyCSELegacyCommonPass() : FunctionPass(ID
) {
1354 initializeEarlyCSEMemSSALegacyPassPass(*PassRegistry::getPassRegistry());
1356 initializeEarlyCSELegacyPassPass(*PassRegistry::getPassRegistry());
1359 bool runOnFunction(Function
&F
) override
{
1360 if (skipFunction(F
))
1363 auto &TLI
= getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
1364 auto &TTI
= getAnalysis
<TargetTransformInfoWrapperPass
>().getTTI(F
);
1365 auto &DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
1366 auto &AC
= getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
);
1368 UseMemorySSA
? &getAnalysis
<MemorySSAWrapperPass
>().getMSSA() : nullptr;
1370 EarlyCSE
CSE(F
.getParent()->getDataLayout(), TLI
, TTI
, DT
, AC
, MSSA
);
1375 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
1376 AU
.addRequired
<AssumptionCacheTracker
>();
1377 AU
.addRequired
<DominatorTreeWrapperPass
>();
1378 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
1379 AU
.addRequired
<TargetTransformInfoWrapperPass
>();
1381 AU
.addRequired
<MemorySSAWrapperPass
>();
1382 AU
.addPreserved
<MemorySSAWrapperPass
>();
1384 AU
.addPreserved
<GlobalsAAWrapperPass
>();
1385 AU
.addPreserved
<AAResultsWrapperPass
>();
1386 AU
.setPreservesCFG();
1390 } // end anonymous namespace
1392 using EarlyCSELegacyPass
= EarlyCSELegacyCommonPass
</*UseMemorySSA=*/false>;
1395 char EarlyCSELegacyPass::ID
= 0;
1397 INITIALIZE_PASS_BEGIN(EarlyCSELegacyPass
, "early-cse", "Early CSE", false,
1399 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
1400 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
1401 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1402 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1403 INITIALIZE_PASS_END(EarlyCSELegacyPass
, "early-cse", "Early CSE", false, false)
1405 using EarlyCSEMemSSALegacyPass
=
1406 EarlyCSELegacyCommonPass
</*UseMemorySSA=*/true>;
1409 char EarlyCSEMemSSALegacyPass::ID
= 0;
1411 FunctionPass
*llvm::createEarlyCSEPass(bool UseMemorySSA
) {
1413 return new EarlyCSEMemSSALegacyPass();
1415 return new EarlyCSELegacyPass();
1418 INITIALIZE_PASS_BEGIN(EarlyCSEMemSSALegacyPass
, "early-cse-memssa",
1419 "Early CSE w/ MemorySSA", false, false)
1420 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass
)
1421 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
1422 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1423 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1424 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass
)
1425 INITIALIZE_PASS_END(EarlyCSEMemSSALegacyPass
, "early-cse-memssa",
1426 "Early CSE w/ MemorySSA", false, false)