1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (EarlierAccess) by walking
18 // 2. Check that there are no reads between EarlierAccess and the StartDef by
19 // checking all uses starting at EarlierAccess and walking until we see
21 // 3. For each found CurrentDef, check that:
22 // 1. There are no barrier instructions between CurrentDef and StartDef (like
23 // throws or stores with ordering constraints).
24 // 2. StartDef is executed whenever CurrentDef is executed.
25 // 3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
28 //===----------------------------------------------------------------------===//
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/MemoryBuiltins.h"
45 #include "llvm/Analysis/MemoryLocation.h"
46 #include "llvm/Analysis/MemorySSA.h"
47 #include "llvm/Analysis/MemorySSAUpdater.h"
48 #include "llvm/Analysis/MustExecute.h"
49 #include "llvm/Analysis/PostDominators.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/Dominators.h"
58 #include "llvm/IR/Function.h"
59 #include "llvm/IR/IRBuilder.h"
60 #include "llvm/IR/InstIterator.h"
61 #include "llvm/IR/InstrTypes.h"
62 #include "llvm/IR/Instruction.h"
63 #include "llvm/IR/Instructions.h"
64 #include "llvm/IR/IntrinsicInst.h"
65 #include "llvm/IR/Intrinsics.h"
66 #include "llvm/IR/LLVMContext.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/PassManager.h"
69 #include "llvm/IR/PatternMatch.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/InitializePasses.h"
72 #include "llvm/Pass.h"
73 #include "llvm/Support/Casting.h"
74 #include "llvm/Support/CommandLine.h"
75 #include "llvm/Support/Debug.h"
76 #include "llvm/Support/DebugCounter.h"
77 #include "llvm/Support/ErrorHandling.h"
78 #include "llvm/Support/MathExtras.h"
79 #include "llvm/Support/raw_ostream.h"
80 #include "llvm/Transforms/Scalar.h"
81 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
82 #include "llvm/Transforms/Utils/BuildLibCalls.h"
83 #include "llvm/Transforms/Utils/Local.h"
93 using namespace PatternMatch
;
95 #define DEBUG_TYPE "dse"
97 STATISTIC(NumRemainingStores
, "Number of stores remaining after DSE");
98 STATISTIC(NumRedundantStores
, "Number of redundant stores deleted");
99 STATISTIC(NumFastStores
, "Number of stores deleted");
100 STATISTIC(NumFastOther
, "Number of other instrs removed");
101 STATISTIC(NumCompletePartials
, "Number of stores dead by later partials");
102 STATISTIC(NumModifiedStores
, "Number of stores modified");
103 STATISTIC(NumCFGChecks
, "Number of stores modified");
104 STATISTIC(NumCFGTries
, "Number of stores modified");
105 STATISTIC(NumCFGSuccess
, "Number of stores modified");
106 STATISTIC(NumGetDomMemoryDefPassed
,
107 "Number of times a valid candidate is returned from getDomMemoryDef");
108 STATISTIC(NumDomMemDefChecks
,
109 "Number iterations check for reads in getDomMemoryDef");
111 DEBUG_COUNTER(MemorySSACounter
, "dse-memoryssa",
112 "Controls which MemoryDefs are eliminated.");
115 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
116 cl::init(true), cl::Hidden
,
117 cl::desc("Enable partial-overwrite tracking in DSE"));
120 EnablePartialStoreMerging("enable-dse-partial-store-merging",
121 cl::init(true), cl::Hidden
,
122 cl::desc("Enable partial store merging in DSE"));
124 static cl::opt
<unsigned>
125 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden
,
126 cl::desc("The number of memory instructions to scan for "
127 "dead store elimination (default = 150)"));
128 static cl::opt
<unsigned> MemorySSAUpwardsStepLimit(
129 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden
,
130 cl::desc("The maximum number of steps while walking upwards to find "
131 "MemoryDefs that may be killed (default = 90)"));
133 static cl::opt
<unsigned> MemorySSAPartialStoreLimit(
134 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden
,
135 cl::desc("The maximum number candidates that only partially overwrite the "
136 "killing MemoryDef to consider"
139 static cl::opt
<unsigned> MemorySSADefsPerBlockLimit(
140 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden
,
141 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
142 "other stores per basic block (default = 5000)"));
144 static cl::opt
<unsigned> MemorySSASameBBStepCost(
145 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden
,
147 "The cost of a step in the same basic block as the killing MemoryDef"
150 static cl::opt
<unsigned>
151 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
153 cl::desc("The cost of a step in a different basic "
154 "block than the killing MemoryDef"
157 static cl::opt
<unsigned> MemorySSAPathCheckLimit(
158 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden
,
159 cl::desc("The maximum number of blocks to check when trying to prove that "
160 "all paths to an exit go through a killing block (default = 50)"));
162 //===----------------------------------------------------------------------===//
164 //===----------------------------------------------------------------------===//
165 using OverlapIntervalsTy
= std::map
<int64_t, int64_t>;
166 using InstOverlapIntervalsTy
= DenseMap
<Instruction
*, OverlapIntervalsTy
>;
168 /// Does this instruction write some memory? This only returns true for things
169 /// that we can analyze with other helpers below.
170 static bool hasAnalyzableMemoryWrite(Instruction
*I
,
171 const TargetLibraryInfo
&TLI
) {
172 if (isa
<StoreInst
>(I
))
174 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
175 switch (II
->getIntrinsicID()) {
178 case Intrinsic::memset
:
179 case Intrinsic::memmove
:
180 case Intrinsic::memcpy
:
181 case Intrinsic::memcpy_inline
:
182 case Intrinsic::memcpy_element_unordered_atomic
:
183 case Intrinsic::memmove_element_unordered_atomic
:
184 case Intrinsic::memset_element_unordered_atomic
:
185 case Intrinsic::init_trampoline
:
186 case Intrinsic::lifetime_end
:
187 case Intrinsic::masked_store
:
191 if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
193 if (TLI
.getLibFunc(*CB
, LF
) && TLI
.has(LF
)) {
196 case LibFunc_strncpy
:
198 case LibFunc_strncat
:
208 /// Return a Location stored to by the specified instruction. If isRemovable
209 /// returns true, this function and getLocForRead completely describe the memory
210 /// operations for this instruction.
211 static MemoryLocation
getLocForWrite(Instruction
*Inst
,
212 const TargetLibraryInfo
&TLI
) {
213 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
))
214 return MemoryLocation::get(SI
);
216 // memcpy/memmove/memset.
217 if (auto *MI
= dyn_cast
<AnyMemIntrinsic
>(Inst
))
218 return MemoryLocation::getForDest(MI
);
220 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
)) {
221 switch (II
->getIntrinsicID()) {
223 return MemoryLocation(); // Unhandled intrinsic.
224 case Intrinsic::init_trampoline
:
225 return MemoryLocation::getAfter(II
->getArgOperand(0));
226 case Intrinsic::masked_store
:
227 return MemoryLocation::getForArgument(II
, 1, TLI
);
228 case Intrinsic::lifetime_end
: {
229 uint64_t Len
= cast
<ConstantInt
>(II
->getArgOperand(0))->getZExtValue();
230 return MemoryLocation(II
->getArgOperand(1), Len
);
234 if (auto *CB
= dyn_cast
<CallBase
>(Inst
))
235 // All the supported TLI functions so far happen to have dest as their
237 return MemoryLocation::getAfter(CB
->getArgOperand(0));
238 return MemoryLocation();
241 /// If the value of this instruction and the memory it writes to is unused, may
242 /// we delete this instruction?
243 static bool isRemovable(Instruction
*I
) {
244 // Don't remove volatile/atomic stores.
245 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
246 return SI
->isUnordered();
248 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
249 switch (II
->getIntrinsicID()) {
250 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
251 case Intrinsic::lifetime_end
:
252 // Never remove dead lifetime_end's, e.g. because it is followed by a
255 case Intrinsic::init_trampoline
:
256 // Always safe to remove init_trampoline.
258 case Intrinsic::memset
:
259 case Intrinsic::memmove
:
260 case Intrinsic::memcpy
:
261 case Intrinsic::memcpy_inline
:
262 // Don't remove volatile memory intrinsics.
263 return !cast
<MemIntrinsic
>(II
)->isVolatile();
264 case Intrinsic::memcpy_element_unordered_atomic
:
265 case Intrinsic::memmove_element_unordered_atomic
:
266 case Intrinsic::memset_element_unordered_atomic
:
267 case Intrinsic::masked_store
:
272 // note: only get here for calls with analyzable writes - i.e. libcalls
273 if (auto *CB
= dyn_cast
<CallBase
>(I
))
274 return CB
->use_empty();
279 /// Returns true if the end of this instruction can be safely shortened in
281 static bool isShortenableAtTheEnd(Instruction
*I
) {
282 // Don't shorten stores for now
283 if (isa
<StoreInst
>(I
))
286 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
287 switch (II
->getIntrinsicID()) {
288 default: return false;
289 case Intrinsic::memset
:
290 case Intrinsic::memcpy
:
291 case Intrinsic::memcpy_element_unordered_atomic
:
292 case Intrinsic::memset_element_unordered_atomic
:
293 // Do shorten memory intrinsics.
294 // FIXME: Add memmove if it's also safe to transform.
299 // Don't shorten libcalls calls for now.
304 /// Returns true if the beginning of this instruction can be safely shortened
306 static bool isShortenableAtTheBeginning(Instruction
*I
) {
307 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
308 // easily done by offsetting the source address.
309 return isa
<AnyMemSetInst
>(I
);
312 static uint64_t getPointerSize(const Value
*V
, const DataLayout
&DL
,
313 const TargetLibraryInfo
&TLI
,
317 Opts
.NullIsUnknownSize
= NullPointerIsDefined(F
);
319 if (getObjectSize(V
, Size
, DL
, &TLI
, Opts
))
321 return MemoryLocation::UnknownSize
;
326 enum OverwriteResult
{
330 OW_PartialEarlierWithFullLater
,
335 } // end anonymous namespace
337 /// Check if two instruction are masked stores that completely
338 /// overwrite one another. More specifically, \p Later has to
339 /// overwrite \p Earlier.
340 static OverwriteResult
isMaskedStoreOverwrite(const Instruction
*Later
,
341 const Instruction
*Earlier
,
342 BatchAAResults
&AA
) {
343 const auto *IIL
= dyn_cast
<IntrinsicInst
>(Later
);
344 const auto *IIE
= dyn_cast
<IntrinsicInst
>(Earlier
);
345 if (IIL
== nullptr || IIE
== nullptr)
347 if (IIL
->getIntrinsicID() != Intrinsic::masked_store
||
348 IIE
->getIntrinsicID() != Intrinsic::masked_store
)
351 Value
*LP
= IIL
->getArgOperand(1)->stripPointerCasts();
352 Value
*EP
= IIE
->getArgOperand(1)->stripPointerCasts();
353 if (LP
!= EP
&& !AA
.isMustAlias(LP
, EP
))
356 // TODO: check that Later's mask is a superset of the Earlier's mask.
357 if (IIL
->getArgOperand(3) != IIE
->getArgOperand(3))
362 /// Return 'OW_Complete' if a store to the 'Later' location completely
363 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
364 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
365 /// beginning of the 'Earlier' location is overwritten by 'Later'.
366 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
367 /// overwritten by a latter (smaller) store which doesn't write outside the big
368 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
369 /// NOTE: This function must only be called if both \p Later and \p Earlier
370 /// write to the same underlying object with valid \p EarlierOff and \p
372 static OverwriteResult
isPartialOverwrite(const MemoryLocation
&Later
,
373 const MemoryLocation
&Earlier
,
374 int64_t EarlierOff
, int64_t LaterOff
,
375 Instruction
*DepWrite
,
376 InstOverlapIntervalsTy
&IOL
) {
377 const uint64_t LaterSize
= Later
.Size
.getValue();
378 const uint64_t EarlierSize
= Earlier
.Size
.getValue();
379 // We may now overlap, although the overlap is not complete. There might also
380 // be other incomplete overlaps, and together, they might cover the complete
382 // Note: The correctness of this logic depends on the fact that this function
383 // is not even called providing DepWrite when there are any intervening reads.
384 if (EnablePartialOverwriteTracking
&&
385 LaterOff
< int64_t(EarlierOff
+ EarlierSize
) &&
386 int64_t(LaterOff
+ LaterSize
) >= EarlierOff
) {
388 // Insert our part of the overlap into the map.
389 auto &IM
= IOL
[DepWrite
];
390 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
391 << ", " << int64_t(EarlierOff
+ EarlierSize
)
392 << ") Later [" << LaterOff
<< ", "
393 << int64_t(LaterOff
+ LaterSize
) << ")\n");
395 // Make sure that we only insert non-overlapping intervals and combine
396 // adjacent intervals. The intervals are stored in the map with the ending
397 // offset as the key (in the half-open sense) and the starting offset as
399 int64_t LaterIntStart
= LaterOff
, LaterIntEnd
= LaterOff
+ LaterSize
;
401 // Find any intervals ending at, or after, LaterIntStart which start
402 // before LaterIntEnd.
403 auto ILI
= IM
.lower_bound(LaterIntStart
);
404 if (ILI
!= IM
.end() && ILI
->second
<= LaterIntEnd
) {
405 // This existing interval is overlapped with the current store somewhere
406 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
407 // intervals and adjusting our start and end.
408 LaterIntStart
= std::min(LaterIntStart
, ILI
->second
);
409 LaterIntEnd
= std::max(LaterIntEnd
, ILI
->first
);
412 // Continue erasing and adjusting our end in case other previous
413 // intervals are also overlapped with the current store.
415 // |--- ealier 1 ---| |--- ealier 2 ---|
416 // |------- later---------|
418 while (ILI
!= IM
.end() && ILI
->second
<= LaterIntEnd
) {
419 assert(ILI
->second
> LaterIntStart
&& "Unexpected interval");
420 LaterIntEnd
= std::max(LaterIntEnd
, ILI
->first
);
425 IM
[LaterIntEnd
] = LaterIntStart
;
428 if (ILI
->second
<= EarlierOff
&&
429 ILI
->first
>= int64_t(EarlierOff
+ EarlierSize
)) {
430 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
431 << EarlierOff
<< ", "
432 << int64_t(EarlierOff
+ EarlierSize
)
433 << ") Composite Later [" << ILI
->second
<< ", "
434 << ILI
->first
<< ")\n");
435 ++NumCompletePartials
;
440 // Check for an earlier store which writes to all the memory locations that
441 // the later store writes to.
442 if (EnablePartialStoreMerging
&& LaterOff
>= EarlierOff
&&
443 int64_t(EarlierOff
+ EarlierSize
) > LaterOff
&&
444 uint64_t(LaterOff
- EarlierOff
) + LaterSize
<= EarlierSize
) {
445 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
446 << EarlierOff
<< ", "
447 << int64_t(EarlierOff
+ EarlierSize
)
448 << ") by a later store [" << LaterOff
<< ", "
449 << int64_t(LaterOff
+ LaterSize
) << ")\n");
450 // TODO: Maybe come up with a better name?
451 return OW_PartialEarlierWithFullLater
;
454 // Another interesting case is if the later store overwrites the end of the
460 // In this case we may want to trim the size of earlier to avoid generating
461 // writes to addresses which will definitely be overwritten later
462 if (!EnablePartialOverwriteTracking
&&
463 (LaterOff
> EarlierOff
&& LaterOff
< int64_t(EarlierOff
+ EarlierSize
) &&
464 int64_t(LaterOff
+ LaterSize
) >= int64_t(EarlierOff
+ EarlierSize
)))
467 // Finally, we also need to check if the later store overwrites the beginning
468 // of the earlier store.
473 // In this case we may want to move the destination address and trim the size
474 // of earlier to avoid generating writes to addresses which will definitely
475 // be overwritten later.
476 if (!EnablePartialOverwriteTracking
&&
477 (LaterOff
<= EarlierOff
&& int64_t(LaterOff
+ LaterSize
) > EarlierOff
)) {
478 assert(int64_t(LaterOff
+ LaterSize
) < int64_t(EarlierOff
+ EarlierSize
) &&
479 "Expect to be handled as OW_Complete");
482 // Otherwise, they don't completely overlap.
486 /// Returns true if the memory which is accessed by the second instruction is not
487 /// modified between the first and the second instruction.
488 /// Precondition: Second instruction must be dominated by the first
491 memoryIsNotModifiedBetween(Instruction
*FirstI
, Instruction
*SecondI
,
492 BatchAAResults
&AA
, const DataLayout
&DL
,
494 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
495 // instructions which can modify the memory location accessed by SecondI.
497 // While doing the walk keep track of the address to check. It might be
498 // different in different basic blocks due to PHI translation.
499 using BlockAddressPair
= std::pair
<BasicBlock
*, PHITransAddr
>;
500 SmallVector
<BlockAddressPair
, 16> WorkList
;
501 // Keep track of the address we visited each block with. Bail out if we
502 // visit a block with different addresses.
503 DenseMap
<BasicBlock
*, Value
*> Visited
;
505 BasicBlock::iterator
FirstBBI(FirstI
);
507 BasicBlock::iterator
SecondBBI(SecondI
);
508 BasicBlock
*FirstBB
= FirstI
->getParent();
509 BasicBlock
*SecondBB
= SecondI
->getParent();
510 MemoryLocation MemLoc
;
511 if (auto *MemSet
= dyn_cast
<MemSetInst
>(SecondI
))
512 MemLoc
= MemoryLocation::getForDest(MemSet
);
514 MemLoc
= MemoryLocation::get(SecondI
);
516 auto *MemLocPtr
= const_cast<Value
*>(MemLoc
.Ptr
);
518 // Start checking the SecondBB.
520 std::make_pair(SecondBB
, PHITransAddr(MemLocPtr
, DL
, nullptr)));
521 bool isFirstBlock
= true;
523 // Check all blocks going backward until we reach the FirstBB.
524 while (!WorkList
.empty()) {
525 BlockAddressPair Current
= WorkList
.pop_back_val();
526 BasicBlock
*B
= Current
.first
;
527 PHITransAddr
&Addr
= Current
.second
;
528 Value
*Ptr
= Addr
.getAddr();
530 // Ignore instructions before FirstI if this is the FirstBB.
531 BasicBlock::iterator BI
= (B
== FirstBB
? FirstBBI
: B
->begin());
533 BasicBlock::iterator EI
;
535 // Ignore instructions after SecondI if this is the first visit of SecondBB.
536 assert(B
== SecondBB
&& "first block is not the store block");
538 isFirstBlock
= false;
540 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
541 // In this case we also have to look at instructions after SecondI.
544 for (; BI
!= EI
; ++BI
) {
545 Instruction
*I
= &*BI
;
546 if (I
->mayWriteToMemory() && I
!= SecondI
)
547 if (isModSet(AA
.getModRefInfo(I
, MemLoc
.getWithNewPtr(Ptr
))))
551 assert(B
!= &FirstBB
->getParent()->getEntryBlock() &&
552 "Should not hit the entry block because SI must be dominated by LI");
553 for (BasicBlock
*Pred
: predecessors(B
)) {
554 PHITransAddr PredAddr
= Addr
;
555 if (PredAddr
.NeedsPHITranslationFromBlock(B
)) {
556 if (!PredAddr
.IsPotentiallyPHITranslatable())
558 if (PredAddr
.PHITranslateValue(B
, Pred
, DT
, false))
561 Value
*TranslatedPtr
= PredAddr
.getAddr();
562 auto Inserted
= Visited
.insert(std::make_pair(Pred
, TranslatedPtr
));
563 if (!Inserted
.second
) {
564 // We already visited this block before. If it was with a different
565 // address - bail out!
566 if (TranslatedPtr
!= Inserted
.first
->second
)
568 // ... otherwise just skip it.
571 WorkList
.push_back(std::make_pair(Pred
, PredAddr
));
578 static bool tryToShorten(Instruction
*EarlierWrite
, int64_t &EarlierStart
,
579 uint64_t &EarlierSize
, int64_t LaterStart
,
580 uint64_t LaterSize
, bool IsOverwriteEnd
) {
581 auto *EarlierIntrinsic
= cast
<AnyMemIntrinsic
>(EarlierWrite
);
582 Align PrefAlign
= EarlierIntrinsic
->getDestAlign().valueOrOne();
584 // We assume that memet/memcpy operates in chunks of the "largest" native
585 // type size and aligned on the same value. That means optimal start and size
586 // of memset/memcpy should be modulo of preferred alignment of that type. That
587 // is it there is no any sense in trying to reduce store size any further
588 // since any "extra" stores comes for free anyway.
589 // On the other hand, maximum alignment we can achieve is limited by alignment
592 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
593 // "largest" native type.
594 // Note: What is the proper way to get that value?
595 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
596 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
598 int64_t ToRemoveStart
= 0;
599 uint64_t ToRemoveSize
= 0;
600 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
601 // maintained on the remaining store.
602 if (IsOverwriteEnd
) {
603 // Calculate required adjustment for 'LaterStart'in order to keep remaining
604 // store size aligned on 'PerfAlign'.
606 offsetToAlignment(uint64_t(LaterStart
- EarlierStart
), PrefAlign
);
607 ToRemoveStart
= LaterStart
+ Off
;
608 if (EarlierSize
<= uint64_t(ToRemoveStart
- EarlierStart
))
610 ToRemoveSize
= EarlierSize
- uint64_t(ToRemoveStart
- EarlierStart
);
612 ToRemoveStart
= EarlierStart
;
613 assert(LaterSize
>= uint64_t(EarlierStart
- LaterStart
) &&
614 "Not overlapping accesses?");
615 ToRemoveSize
= LaterSize
- uint64_t(EarlierStart
- LaterStart
);
616 // Calculate required adjustment for 'ToRemoveSize'in order to keep
617 // start of the remaining store aligned on 'PerfAlign'.
618 uint64_t Off
= offsetToAlignment(ToRemoveSize
, PrefAlign
);
620 if (ToRemoveSize
<= (PrefAlign
.value() - Off
))
622 ToRemoveSize
-= PrefAlign
.value() - Off
;
624 assert(isAligned(PrefAlign
, ToRemoveSize
) &&
625 "Should preserve selected alignment");
628 assert(ToRemoveSize
> 0 && "Shouldn't reach here if nothing to remove");
629 assert(EarlierSize
> ToRemoveSize
&& "Can't remove more than original size");
631 uint64_t NewSize
= EarlierSize
- ToRemoveSize
;
632 if (auto *AMI
= dyn_cast
<AtomicMemIntrinsic
>(EarlierWrite
)) {
633 // When shortening an atomic memory intrinsic, the newly shortened
634 // length must remain an integer multiple of the element size.
635 const uint32_t ElementSize
= AMI
->getElementSizeInBytes();
636 if (0 != NewSize
% ElementSize
)
640 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
641 << (IsOverwriteEnd
? "END" : "BEGIN") << ": "
642 << *EarlierWrite
<< "\n KILLER [" << ToRemoveStart
<< ", "
643 << int64_t(ToRemoveStart
+ ToRemoveSize
) << ")\n");
645 Value
*EarlierWriteLength
= EarlierIntrinsic
->getLength();
646 Value
*TrimmedLength
=
647 ConstantInt::get(EarlierWriteLength
->getType(), NewSize
);
648 EarlierIntrinsic
->setLength(TrimmedLength
);
649 EarlierIntrinsic
->setDestAlignment(PrefAlign
);
651 if (!IsOverwriteEnd
) {
652 Value
*OrigDest
= EarlierIntrinsic
->getRawDest();
654 Type::getInt8PtrTy(EarlierIntrinsic
->getContext(),
655 OrigDest
->getType()->getPointerAddressSpace());
656 Value
*Dest
= OrigDest
;
657 if (OrigDest
->getType() != Int8PtrTy
)
658 Dest
= CastInst::CreatePointerCast(OrigDest
, Int8PtrTy
, "", EarlierWrite
);
659 Value
*Indices
[1] = {
660 ConstantInt::get(EarlierWriteLength
->getType(), ToRemoveSize
)};
661 Instruction
*NewDestGEP
= GetElementPtrInst::CreateInBounds(
662 Type::getInt8Ty(EarlierIntrinsic
->getContext()),
663 Dest
, Indices
, "", EarlierWrite
);
664 NewDestGEP
->setDebugLoc(EarlierIntrinsic
->getDebugLoc());
665 if (NewDestGEP
->getType() != OrigDest
->getType())
666 NewDestGEP
= CastInst::CreatePointerCast(NewDestGEP
, OrigDest
->getType(),
668 EarlierIntrinsic
->setDest(NewDestGEP
);
671 // Finally update start and size of earlier access.
673 EarlierStart
+= ToRemoveSize
;
674 EarlierSize
= NewSize
;
679 static bool tryToShortenEnd(Instruction
*EarlierWrite
,
680 OverlapIntervalsTy
&IntervalMap
,
681 int64_t &EarlierStart
, uint64_t &EarlierSize
) {
682 if (IntervalMap
.empty() || !isShortenableAtTheEnd(EarlierWrite
))
685 OverlapIntervalsTy::iterator OII
= --IntervalMap
.end();
686 int64_t LaterStart
= OII
->second
;
687 uint64_t LaterSize
= OII
->first
- LaterStart
;
689 assert(OII
->first
- LaterStart
>= 0 && "Size expected to be positive");
691 if (LaterStart
> EarlierStart
&&
692 // Note: "LaterStart - EarlierStart" is known to be positive due to
694 (uint64_t)(LaterStart
- EarlierStart
) < EarlierSize
&&
695 // Note: "EarlierSize - (uint64_t)(LaterStart - EarlierStart)" is known to
696 // be non negative due to preceding checks.
697 LaterSize
>= EarlierSize
- (uint64_t)(LaterStart
- EarlierStart
)) {
698 if (tryToShorten(EarlierWrite
, EarlierStart
, EarlierSize
, LaterStart
,
700 IntervalMap
.erase(OII
);
707 static bool tryToShortenBegin(Instruction
*EarlierWrite
,
708 OverlapIntervalsTy
&IntervalMap
,
709 int64_t &EarlierStart
, uint64_t &EarlierSize
) {
710 if (IntervalMap
.empty() || !isShortenableAtTheBeginning(EarlierWrite
))
713 OverlapIntervalsTy::iterator OII
= IntervalMap
.begin();
714 int64_t LaterStart
= OII
->second
;
715 uint64_t LaterSize
= OII
->first
- LaterStart
;
717 assert(OII
->first
- LaterStart
>= 0 && "Size expected to be positive");
719 if (LaterStart
<= EarlierStart
&&
720 // Note: "EarlierStart - LaterStart" is known to be non negative due to
722 LaterSize
> (uint64_t)(EarlierStart
- LaterStart
)) {
723 // Note: "LaterSize - (uint64_t)(EarlierStart - LaterStart)" is known to be
724 // positive due to preceding checks.
725 assert(LaterSize
- (uint64_t)(EarlierStart
- LaterStart
) < EarlierSize
&&
726 "Should have been handled as OW_Complete");
727 if (tryToShorten(EarlierWrite
, EarlierStart
, EarlierSize
, LaterStart
,
729 IntervalMap
.erase(OII
);
736 static bool removePartiallyOverlappedStores(const DataLayout
&DL
,
737 InstOverlapIntervalsTy
&IOL
,
738 const TargetLibraryInfo
&TLI
) {
739 bool Changed
= false;
740 for (auto OI
: IOL
) {
741 Instruction
*EarlierWrite
= OI
.first
;
742 MemoryLocation Loc
= getLocForWrite(EarlierWrite
, TLI
);
743 assert(isRemovable(EarlierWrite
) && "Expect only removable instruction");
745 const Value
*Ptr
= Loc
.Ptr
->stripPointerCasts();
746 int64_t EarlierStart
= 0;
747 uint64_t EarlierSize
= Loc
.Size
.getValue();
748 GetPointerBaseWithConstantOffset(Ptr
, EarlierStart
, DL
);
749 OverlapIntervalsTy
&IntervalMap
= OI
.second
;
751 tryToShortenEnd(EarlierWrite
, IntervalMap
, EarlierStart
, EarlierSize
);
752 if (IntervalMap
.empty())
755 tryToShortenBegin(EarlierWrite
, IntervalMap
, EarlierStart
, EarlierSize
);
760 static Constant
*tryToMergePartialOverlappingStores(
761 StoreInst
*Earlier
, StoreInst
*Later
, int64_t InstWriteOffset
,
762 int64_t DepWriteOffset
, const DataLayout
&DL
, BatchAAResults
&AA
,
765 if (Earlier
&& isa
<ConstantInt
>(Earlier
->getValueOperand()) &&
766 DL
.typeSizeEqualsStoreSize(Earlier
->getValueOperand()->getType()) &&
767 Later
&& isa
<ConstantInt
>(Later
->getValueOperand()) &&
768 DL
.typeSizeEqualsStoreSize(Later
->getValueOperand()->getType()) &&
769 memoryIsNotModifiedBetween(Earlier
, Later
, AA
, DL
, DT
)) {
770 // If the store we find is:
771 // a) partially overwritten by the store to 'Loc'
772 // b) the later store is fully contained in the earlier one and
773 // c) they both have a constant value
774 // d) none of the two stores need padding
775 // Merge the two stores, replacing the earlier store's value with a
776 // merge of both values.
777 // TODO: Deal with other constant types (vectors, etc), and probably
778 // some mem intrinsics (if needed)
781 cast
<ConstantInt
>(Earlier
->getValueOperand())->getValue();
782 APInt LaterValue
= cast
<ConstantInt
>(Later
->getValueOperand())->getValue();
783 unsigned LaterBits
= LaterValue
.getBitWidth();
784 assert(EarlierValue
.getBitWidth() > LaterValue
.getBitWidth());
785 LaterValue
= LaterValue
.zext(EarlierValue
.getBitWidth());
787 // Offset of the smaller store inside the larger store
788 unsigned BitOffsetDiff
= (InstWriteOffset
- DepWriteOffset
) * 8;
789 unsigned LShiftAmount
= DL
.isBigEndian() ? EarlierValue
.getBitWidth() -
790 BitOffsetDiff
- LaterBits
792 APInt Mask
= APInt::getBitsSet(EarlierValue
.getBitWidth(), LShiftAmount
,
793 LShiftAmount
+ LaterBits
);
794 // Clear the bits we'll be replacing, then OR with the smaller
795 // store, shifted appropriately.
796 APInt Merged
= (EarlierValue
& ~Mask
) | (LaterValue
<< LShiftAmount
);
797 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *Earlier
798 << "\n Later: " << *Later
799 << "\n Merged Value: " << Merged
<< '\n');
800 return ConstantInt::get(Earlier
->getValueOperand()->getType(), Merged
);
806 // Returns true if \p I is an intrisnic that does not read or write memory.
807 bool isNoopIntrinsic(Instruction
*I
) {
808 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
809 switch (II
->getIntrinsicID()) {
810 case Intrinsic::lifetime_start
:
811 case Intrinsic::lifetime_end
:
812 case Intrinsic::invariant_end
:
813 case Intrinsic::launder_invariant_group
:
814 case Intrinsic::assume
:
816 case Intrinsic::dbg_addr
:
817 case Intrinsic::dbg_declare
:
818 case Intrinsic::dbg_label
:
819 case Intrinsic::dbg_value
:
820 llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
828 // Check if we can ignore \p D for DSE.
829 bool canSkipDef(MemoryDef
*D
, bool DefVisibleToCaller
,
830 const TargetLibraryInfo
&TLI
) {
831 Instruction
*DI
= D
->getMemoryInst();
832 // Calls that only access inaccessible memory cannot read or write any memory
833 // locations we consider for elimination.
834 if (auto *CB
= dyn_cast
<CallBase
>(DI
))
835 if (CB
->onlyAccessesInaccessibleMemory()) {
836 if (isAllocLikeFn(DI
, &TLI
))
840 // We can eliminate stores to locations not visible to the caller across
841 // throwing instructions.
842 if (DI
->mayThrow() && !DefVisibleToCaller
)
845 // We can remove the dead stores, irrespective of the fence and its ordering
846 // (release/acquire/seq_cst). Fences only constraints the ordering of
847 // already visible stores, it does not make a store visible to other
848 // threads. So, skipping over a fence does not change a store from being
850 if (isa
<FenceInst
>(DI
))
853 // Skip intrinsics that do not really read or modify memory.
854 if (isNoopIntrinsic(DI
))
864 /// The single BatchAA instance that is used to cache AA queries. It will
865 /// not be invalidated over the whole run. This is safe, because:
866 /// 1. Only memory writes are removed, so the alias cache for memory
867 /// locations remains valid.
868 /// 2. No new instructions are added (only instructions removed), so cached
869 /// information for a deleted value cannot be accessed by a re-used new
871 BatchAAResults BatchAA
;
875 PostDominatorTree
&PDT
;
876 const TargetLibraryInfo
&TLI
;
877 const DataLayout
&DL
;
880 // Whether the function contains any irreducible control flow, useful for
881 // being accurately able to detect loops.
882 bool ContainsIrreducibleLoops
;
884 // All MemoryDefs that potentially could kill other MemDefs.
885 SmallVector
<MemoryDef
*, 64> MemDefs
;
886 // Any that should be skipped as they are already deleted
887 SmallPtrSet
<MemoryAccess
*, 4> SkipStores
;
888 // Keep track of all of the objects that are invisible to the caller before
889 // the function returns.
890 // SmallPtrSet<const Value *, 16> InvisibleToCallerBeforeRet;
891 DenseMap
<const Value
*, bool> InvisibleToCallerBeforeRet
;
892 // Keep track of all of the objects that are invisible to the caller after
893 // the function returns.
894 DenseMap
<const Value
*, bool> InvisibleToCallerAfterRet
;
895 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
896 SmallPtrSet
<BasicBlock
*, 16> ThrowingBlocks
;
897 // Post-order numbers for each basic block. Used to figure out if memory
898 // accesses are executed before another access.
899 DenseMap
<BasicBlock
*, unsigned> PostOrderNumbers
;
901 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
903 DenseMap
<BasicBlock
*, InstOverlapIntervalsTy
> IOLs
;
905 DSEState(Function
&F
, AliasAnalysis
&AA
, MemorySSA
&MSSA
, DominatorTree
&DT
,
906 PostDominatorTree
&PDT
, const TargetLibraryInfo
&TLI
,
908 : F(F
), AA(AA
), BatchAA(AA
), MSSA(MSSA
), DT(DT
), PDT(PDT
), TLI(TLI
),
909 DL(F
.getParent()->getDataLayout()), LI(LI
) {}
911 static DSEState
get(Function
&F
, AliasAnalysis
&AA
, MemorySSA
&MSSA
,
912 DominatorTree
&DT
, PostDominatorTree
&PDT
,
913 const TargetLibraryInfo
&TLI
, const LoopInfo
&LI
) {
914 DSEState
State(F
, AA
, MSSA
, DT
, PDT
, TLI
, LI
);
915 // Collect blocks with throwing instructions not modeled in MemorySSA and
916 // alloc-like objects.
918 for (BasicBlock
*BB
: post_order(&F
)) {
919 State
.PostOrderNumbers
[BB
] = PO
++;
920 for (Instruction
&I
: *BB
) {
921 MemoryAccess
*MA
= MSSA
.getMemoryAccess(&I
);
922 if (I
.mayThrow() && !MA
)
923 State
.ThrowingBlocks
.insert(I
.getParent());
925 auto *MD
= dyn_cast_or_null
<MemoryDef
>(MA
);
926 if (MD
&& State
.MemDefs
.size() < MemorySSADefsPerBlockLimit
&&
927 (State
.getLocForWriteEx(&I
) || State
.isMemTerminatorInst(&I
)))
928 State
.MemDefs
.push_back(MD
);
932 // Treat byval or inalloca arguments the same as Allocas, stores to them are
933 // dead at the end of the function.
934 for (Argument
&AI
: F
.args())
935 if (AI
.hasPassPointeeByValueCopyAttr()) {
936 // For byval, the caller doesn't know the address of the allocation.
937 if (AI
.hasByValAttr())
938 State
.InvisibleToCallerBeforeRet
.insert({&AI
, true});
939 State
.InvisibleToCallerAfterRet
.insert({&AI
, true});
942 // Collect whether there is any irreducible control flow in the function.
943 State
.ContainsIrreducibleLoops
= mayContainIrreducibleControl(F
, &LI
);
948 /// Return 'OW_Complete' if a store to the 'Later' location (by \p LaterI
949 /// instruction) completely overwrites a store to the 'Earlier' location.
950 /// (by \p EarlierI instruction).
951 /// Return OW_MaybePartial if \p Later does not completely overwrite
952 /// \p Earlier, but they both write to the same underlying object. In that
953 /// case, use isPartialOverwrite to check if \p Later partially overwrites
954 /// \p Earlier. Returns 'OW_Unknown' if nothing can be determined.
956 isOverwrite(const Instruction
*LaterI
, const Instruction
*EarlierI
,
957 const MemoryLocation
&Later
, const MemoryLocation
&Earlier
,
958 int64_t &EarlierOff
, int64_t &LaterOff
) {
959 // AliasAnalysis does not always account for loops. Limit overwrite checks
960 // to dependencies for which we can guarantee they are independant of any
961 // loops they are in.
962 if (!isGuaranteedLoopIndependent(EarlierI
, LaterI
, Earlier
))
965 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
966 // get imprecise values here, though (except for unknown sizes).
967 if (!Later
.Size
.isPrecise() || !Earlier
.Size
.isPrecise()) {
968 // In case no constant size is known, try to an IR values for the number
969 // of bytes written and check if they match.
970 const auto *LaterMemI
= dyn_cast
<MemIntrinsic
>(LaterI
);
971 const auto *EarlierMemI
= dyn_cast
<MemIntrinsic
>(EarlierI
);
972 if (LaterMemI
&& EarlierMemI
) {
973 const Value
*LaterV
= LaterMemI
->getLength();
974 const Value
*EarlierV
= EarlierMemI
->getLength();
975 if (LaterV
== EarlierV
&& BatchAA
.isMustAlias(Earlier
, Later
))
979 // Masked stores have imprecise locations, but we can reason about them
981 return isMaskedStoreOverwrite(LaterI
, EarlierI
, BatchAA
);
984 const uint64_t LaterSize
= Later
.Size
.getValue();
985 const uint64_t EarlierSize
= Earlier
.Size
.getValue();
987 // Query the alias information
988 AliasResult AAR
= BatchAA
.alias(Later
, Earlier
);
990 // If the start pointers are the same, we just have to compare sizes to see if
991 // the later store was larger than the earlier store.
992 if (AAR
== AliasResult::MustAlias
) {
993 // Make sure that the Later size is >= the Earlier size.
994 if (LaterSize
>= EarlierSize
)
998 // If we hit a partial alias we may have a full overwrite
999 if (AAR
== AliasResult::PartialAlias
&& AAR
.hasOffset()) {
1000 int32_t Off
= AAR
.getOffset();
1001 if (Off
>= 0 && (uint64_t)Off
+ EarlierSize
<= LaterSize
)
1005 // Check to see if the later store is to the entire object (either a global,
1006 // an alloca, or a byval/inalloca argument). If so, then it clearly
1007 // overwrites any other store to the same object.
1008 const Value
*P1
= Earlier
.Ptr
->stripPointerCasts();
1009 const Value
*P2
= Later
.Ptr
->stripPointerCasts();
1010 const Value
*UO1
= getUnderlyingObject(P1
), *UO2
= getUnderlyingObject(P2
);
1012 // If we can't resolve the same pointers to the same object, then we can't
1013 // analyze them at all.
1017 // If the "Later" store is to a recognizable object, get its size.
1018 uint64_t ObjectSize
= getPointerSize(UO2
, DL
, TLI
, &F
);
1019 if (ObjectSize
!= MemoryLocation::UnknownSize
)
1020 if (ObjectSize
== LaterSize
&& ObjectSize
>= EarlierSize
)
1023 // Okay, we have stores to two completely different pointers. Try to
1024 // decompose the pointer into a "base + constant_offset" form. If the base
1025 // pointers are equal, then we can reason about the two stores.
1028 const Value
*BP1
= GetPointerBaseWithConstantOffset(P1
, EarlierOff
, DL
);
1029 const Value
*BP2
= GetPointerBaseWithConstantOffset(P2
, LaterOff
, DL
);
1031 // If the base pointers still differ, we have two completely different stores.
1035 // The later access completely overlaps the earlier store if and only if
1036 // both start and end of the earlier one is "inside" the later one:
1037 // |<->|--earlier--|<->|
1038 // |-------later-------|
1039 // Accesses may overlap if and only if start of one of them is "inside"
1041 // |<->|--earlier--|<----->|
1042 // |-------later-------|
1044 // |----- earlier -----|
1045 // |<->|---later---|<----->|
1047 // We have to be careful here as *Off is signed while *.Size is unsigned.
1049 // Check if the earlier access starts "not before" the later one.
1050 if (EarlierOff
>= LaterOff
) {
1051 // If the earlier access ends "not after" the later access then the earlier
1052 // one is completely overwritten by the later one.
1053 if (uint64_t(EarlierOff
- LaterOff
) + EarlierSize
<= LaterSize
)
1055 // If start of the earlier access is "before" end of the later access then
1056 // accesses overlap.
1057 else if ((uint64_t)(EarlierOff
- LaterOff
) < LaterSize
)
1058 return OW_MaybePartial
;
1060 // If start of the later access is "before" end of the earlier access then
1061 // accesses overlap.
1062 else if ((uint64_t)(LaterOff
- EarlierOff
) < EarlierSize
) {
1063 return OW_MaybePartial
;
1066 // Can reach here only if accesses are known not to overlap. There is no
1067 // dedicated code to indicate no overlap so signal "unknown".
1071 bool isInvisibleToCallerAfterRet(const Value
*V
) {
1072 if (isa
<AllocaInst
>(V
))
1074 auto I
= InvisibleToCallerAfterRet
.insert({V
, false});
1076 if (!isInvisibleToCallerBeforeRet(V
)) {
1077 I
.first
->second
= false;
1079 auto *Inst
= dyn_cast
<Instruction
>(V
);
1080 if (Inst
&& isAllocLikeFn(Inst
, &TLI
))
1081 I
.first
->second
= !PointerMayBeCaptured(V
, true, false);
1084 return I
.first
->second
;
1087 bool isInvisibleToCallerBeforeRet(const Value
*V
) {
1088 if (isa
<AllocaInst
>(V
))
1090 auto I
= InvisibleToCallerBeforeRet
.insert({V
, false});
1092 auto *Inst
= dyn_cast
<Instruction
>(V
);
1093 if (Inst
&& isAllocLikeFn(Inst
, &TLI
))
1094 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1095 // with the killing MemoryDef. But we refrain from doing so for now to
1096 // limit compile-time and this does not cause any changes to the number
1097 // of stores removed on a large test set in practice.
1098 I
.first
->second
= !PointerMayBeCaptured(V
, false, true);
1100 return I
.first
->second
;
1103 Optional
<MemoryLocation
> getLocForWriteEx(Instruction
*I
) const {
1104 if (!I
->mayWriteToMemory())
1107 if (auto *MTI
= dyn_cast
<AnyMemIntrinsic
>(I
))
1108 return {MemoryLocation::getForDest(MTI
)};
1110 if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
1111 // If the functions may write to memory we do not know about, bail out.
1112 if (!CB
->onlyAccessesArgMemory() &&
1113 !CB
->onlyAccessesInaccessibleMemOrArgMem())
1117 if (TLI
.getLibFunc(*CB
, LF
) && TLI
.has(LF
)) {
1119 case LibFunc_strcpy
:
1120 case LibFunc_strncpy
:
1121 case LibFunc_strcat
:
1122 case LibFunc_strncat
:
1123 return {MemoryLocation::getAfter(CB
->getArgOperand(0))};
1128 switch (CB
->getIntrinsicID()) {
1129 case Intrinsic::init_trampoline
:
1130 return {MemoryLocation::getAfter(CB
->getArgOperand(0))};
1131 case Intrinsic::masked_store
:
1132 return {MemoryLocation::getForArgument(CB
, 1, TLI
)};
1139 return MemoryLocation::getOrNone(I
);
1142 /// Returns true if \p UseInst completely overwrites \p DefLoc
1143 /// (stored by \p DefInst).
1144 bool isCompleteOverwrite(const MemoryLocation
&DefLoc
, Instruction
*DefInst
,
1145 Instruction
*UseInst
) {
1146 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1147 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1149 if (!UseInst
->mayWriteToMemory())
1152 if (auto *CB
= dyn_cast
<CallBase
>(UseInst
))
1153 if (CB
->onlyAccessesInaccessibleMemory())
1156 int64_t InstWriteOffset
, DepWriteOffset
;
1157 if (auto CC
= getLocForWriteEx(UseInst
))
1158 return isOverwrite(UseInst
, DefInst
, *CC
, DefLoc
, DepWriteOffset
,
1159 InstWriteOffset
) == OW_Complete
;
1163 /// Returns true if \p Def is not read before returning from the function.
1164 bool isWriteAtEndOfFunction(MemoryDef
*Def
) {
1165 LLVM_DEBUG(dbgs() << " Check if def " << *Def
<< " ("
1166 << *Def
->getMemoryInst()
1167 << ") is at the end the function \n");
1169 auto MaybeLoc
= getLocForWriteEx(Def
->getMemoryInst());
1171 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n");
1175 SmallVector
<MemoryAccess
*, 4> WorkList
;
1176 SmallPtrSet
<MemoryAccess
*, 8> Visited
;
1177 auto PushMemUses
= [&WorkList
, &Visited
](MemoryAccess
*Acc
) {
1178 if (!Visited
.insert(Acc
).second
)
1180 for (Use
&U
: Acc
->uses())
1181 WorkList
.push_back(cast
<MemoryAccess
>(U
.getUser()));
1184 for (unsigned I
= 0; I
< WorkList
.size(); I
++) {
1185 if (WorkList
.size() >= MemorySSAScanLimit
) {
1186 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n");
1190 MemoryAccess
*UseAccess
= WorkList
[I
];
1191 // Simply adding the users of MemoryPhi to the worklist is not enough,
1192 // because we might miss read clobbers in different iterations of a loop,
1194 // TODO: Add support for phi translation to handle the loop case.
1195 if (isa
<MemoryPhi
>(UseAccess
))
1198 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1199 // of times this is called and/or caching it.
1200 Instruction
*UseInst
= cast
<MemoryUseOrDef
>(UseAccess
)->getMemoryInst();
1201 if (isReadClobber(*MaybeLoc
, UseInst
)) {
1202 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst
<< ".\n");
1206 if (MemoryDef
*UseDef
= dyn_cast
<MemoryDef
>(UseAccess
))
1207 PushMemUses(UseDef
);
1212 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1213 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1214 /// indicating whether \p I is a free-like call.
1215 Optional
<std::pair
<MemoryLocation
, bool>>
1216 getLocForTerminator(Instruction
*I
) const {
1219 if (match(I
, m_Intrinsic
<Intrinsic::lifetime_end
>(m_ConstantInt(Len
),
1221 return {std::make_pair(MemoryLocation(Ptr
, Len
), false)};
1223 if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
1224 if (isFreeCall(I
, &TLI
))
1225 return {std::make_pair(MemoryLocation::getAfter(CB
->getArgOperand(0)),
1232 /// Returns true if \p I is a memory terminator instruction like
1233 /// llvm.lifetime.end or free.
1234 bool isMemTerminatorInst(Instruction
*I
) const {
1235 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
);
1236 return (II
&& II
->getIntrinsicID() == Intrinsic::lifetime_end
) ||
1237 isFreeCall(I
, &TLI
);
1240 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1241 /// instruction \p AccessI.
1242 bool isMemTerminator(const MemoryLocation
&Loc
, Instruction
*AccessI
,
1243 Instruction
*MaybeTerm
) {
1244 Optional
<std::pair
<MemoryLocation
, bool>> MaybeTermLoc
=
1245 getLocForTerminator(MaybeTerm
);
1250 // If the terminator is a free-like call, all accesses to the underlying
1251 // object can be considered terminated.
1252 if (getUnderlyingObject(Loc
.Ptr
) !=
1253 getUnderlyingObject(MaybeTermLoc
->first
.Ptr
))
1256 auto TermLoc
= MaybeTermLoc
->first
;
1257 if (MaybeTermLoc
->second
) {
1258 const Value
*LocUO
= getUnderlyingObject(Loc
.Ptr
);
1259 return BatchAA
.isMustAlias(TermLoc
.Ptr
, LocUO
);
1261 int64_t InstWriteOffset
, DepWriteOffset
;
1262 return isOverwrite(MaybeTerm
, AccessI
, TermLoc
, Loc
, DepWriteOffset
,
1263 InstWriteOffset
) == OW_Complete
;
1266 // Returns true if \p Use may read from \p DefLoc.
1267 bool isReadClobber(const MemoryLocation
&DefLoc
, Instruction
*UseInst
) {
1268 if (isNoopIntrinsic(UseInst
))
1271 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1272 // treated as read clobber.
1273 if (auto SI
= dyn_cast
<StoreInst
>(UseInst
))
1274 return isStrongerThan(SI
->getOrdering(), AtomicOrdering::Monotonic
);
1276 if (!UseInst
->mayReadFromMemory())
1279 if (auto *CB
= dyn_cast
<CallBase
>(UseInst
))
1280 if (CB
->onlyAccessesInaccessibleMemory())
1283 // NOTE: For calls, the number of stores removed could be slightly improved
1284 // by using AA.callCapturesBefore(UseInst, DefLoc, &DT), but that showed to
1285 // be expensive compared to the benefits in practice. For now, avoid more
1286 // expensive analysis to limit compile-time.
1287 return isRefSet(BatchAA
.getModRefInfo(UseInst
, DefLoc
));
1290 /// Returns true if a dependency between \p Current and \p KillingDef is
1291 /// guaranteed to be loop invariant for the loops that they are in. Either
1292 /// because they are known to be in the same block, in the same loop level or
1293 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1294 /// during execution of the containing function.
1295 bool isGuaranteedLoopIndependent(const Instruction
*Current
,
1296 const Instruction
*KillingDef
,
1297 const MemoryLocation
&CurrentLoc
) {
1298 // If the dependency is within the same block or loop level (being careful
1299 // of irreducible loops), we know that AA will return a valid result for the
1300 // memory dependency. (Both at the function level, outside of any loop,
1301 // would also be valid but we currently disable that to limit compile time).
1302 if (Current
->getParent() == KillingDef
->getParent())
1304 const Loop
*CurrentLI
= LI
.getLoopFor(Current
->getParent());
1305 if (!ContainsIrreducibleLoops
&& CurrentLI
&&
1306 CurrentLI
== LI
.getLoopFor(KillingDef
->getParent()))
1308 // Otherwise check the memory location is invariant to any loops.
1309 return isGuaranteedLoopInvariant(CurrentLoc
.Ptr
);
1312 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1313 /// loop. In particular, this guarantees that it only references a single
1314 /// MemoryLocation during execution of the containing function.
1315 bool isGuaranteedLoopInvariant(const Value
*Ptr
) {
1316 auto IsGuaranteedLoopInvariantBase
= [this](const Value
*Ptr
) {
1317 Ptr
= Ptr
->stripPointerCasts();
1318 if (auto *I
= dyn_cast
<Instruction
>(Ptr
)) {
1319 if (isa
<AllocaInst
>(Ptr
))
1322 if (isAllocLikeFn(I
, &TLI
))
1330 Ptr
= Ptr
->stripPointerCasts();
1331 if (auto *I
= dyn_cast
<Instruction
>(Ptr
)) {
1332 if (I
->getParent()->isEntryBlock())
1335 if (auto *GEP
= dyn_cast
<GEPOperator
>(Ptr
)) {
1336 return IsGuaranteedLoopInvariantBase(GEP
->getPointerOperand()) &&
1337 GEP
->hasAllConstantIndices();
1339 return IsGuaranteedLoopInvariantBase(Ptr
);
1342 // Find a MemoryDef writing to \p DefLoc and dominating \p StartAccess, with
1343 // no read access between them or on any other path to a function exit block
1344 // if \p DefLoc is not accessible after the function returns. If there is no
1345 // such MemoryDef, return None. The returned value may not (completely)
1346 // overwrite \p DefLoc. Currently we bail out when we encounter an aliasing
1347 // MemoryUse (read).
1348 Optional
<MemoryAccess
*>
1349 getDomMemoryDef(MemoryDef
*KillingDef
, MemoryAccess
*StartAccess
,
1350 const MemoryLocation
&DefLoc
, const Value
*DefUO
,
1351 unsigned &ScanLimit
, unsigned &WalkerStepLimit
,
1352 bool IsMemTerm
, unsigned &PartialLimit
) {
1353 if (ScanLimit
== 0 || WalkerStepLimit
== 0) {
1354 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1358 MemoryAccess
*Current
= StartAccess
;
1359 Instruction
*KillingI
= KillingDef
->getMemoryInst();
1360 LLVM_DEBUG(dbgs() << " trying to get dominating access\n");
1362 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1363 Optional
<MemoryLocation
> CurrentLoc
;
1364 for (;; Current
= cast
<MemoryDef
>(Current
)->getDefiningAccess()) {
1366 dbgs() << " visiting " << *Current
;
1367 if (!MSSA
.isLiveOnEntryDef(Current
) && isa
<MemoryUseOrDef
>(Current
))
1368 dbgs() << " (" << *cast
<MemoryUseOrDef
>(Current
)->getMemoryInst()
1374 if (MSSA
.isLiveOnEntryDef(Current
)) {
1375 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n");
1379 // Cost of a step. Accesses in the same block are more likely to be valid
1380 // candidates for elimination, hence consider them cheaper.
1381 unsigned StepCost
= KillingDef
->getBlock() == Current
->getBlock()
1382 ? MemorySSASameBBStepCost
1383 : MemorySSAOtherBBStepCost
;
1384 if (WalkerStepLimit
<= StepCost
) {
1385 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n");
1388 WalkerStepLimit
-= StepCost
;
1390 // Return for MemoryPhis. They cannot be eliminated directly and the
1391 // caller is responsible for traversing them.
1392 if (isa
<MemoryPhi
>(Current
)) {
1393 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n");
1397 // Below, check if CurrentDef is a valid candidate to be eliminated by
1398 // KillingDef. If it is not, check the next candidate.
1399 MemoryDef
*CurrentDef
= cast
<MemoryDef
>(Current
);
1400 Instruction
*CurrentI
= CurrentDef
->getMemoryInst();
1402 if (canSkipDef(CurrentDef
, !isInvisibleToCallerBeforeRet(DefUO
), TLI
))
1405 // Before we try to remove anything, check for any extra throwing
1406 // instructions that block us from DSEing
1407 if (mayThrowBetween(KillingI
, CurrentI
, DefUO
)) {
1408 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n");
1412 // Check for anything that looks like it will be a barrier to further
1414 if (isDSEBarrier(DefUO
, CurrentI
)) {
1415 LLVM_DEBUG(dbgs() << " ... skip, barrier\n");
1419 // If Current is known to be on path that reads DefLoc or is a read
1420 // clobber, bail out, as the path is not profitable. We skip this check
1421 // for intrinsic calls, because the code knows how to handle memcpy
1423 if (!isa
<IntrinsicInst
>(CurrentI
) && isReadClobber(DefLoc
, CurrentI
))
1426 // Quick check if there are direct uses that are read-clobbers.
1427 if (any_of(Current
->uses(), [this, &DefLoc
, StartAccess
](Use
&U
) {
1428 if (auto *UseOrDef
= dyn_cast
<MemoryUseOrDef
>(U
.getUser()))
1429 return !MSSA
.dominates(StartAccess
, UseOrDef
) &&
1430 isReadClobber(DefLoc
, UseOrDef
->getMemoryInst());
1433 LLVM_DEBUG(dbgs() << " ... found a read clobber\n");
1437 // If Current cannot be analyzed or is not removable, check the next
1439 if (!hasAnalyzableMemoryWrite(CurrentI
, TLI
) || !isRemovable(CurrentI
))
1442 // If Current does not have an analyzable write location, skip it
1443 CurrentLoc
= getLocForWriteEx(CurrentI
);
1447 // AliasAnalysis does not account for loops. Limit elimination to
1448 // candidates for which we can guarantee they always store to the same
1449 // memory location and not located in different loops.
1450 if (!isGuaranteedLoopIndependent(CurrentI
, KillingI
, *CurrentLoc
)) {
1451 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n");
1452 WalkerStepLimit
-= 1;
1457 // If the killing def is a memory terminator (e.g. lifetime.end), check
1458 // the next candidate if the current Current does not write the same
1459 // underlying object as the terminator.
1460 if (!isMemTerminator(*CurrentLoc
, CurrentI
, KillingI
))
1463 int64_t InstWriteOffset
, DepWriteOffset
;
1464 auto OR
= isOverwrite(KillingI
, CurrentI
, DefLoc
, *CurrentLoc
,
1465 DepWriteOffset
, InstWriteOffset
);
1466 // If Current does not write to the same object as KillingDef, check
1467 // the next candidate.
1468 if (OR
== OW_Unknown
)
1470 else if (OR
== OW_MaybePartial
) {
1471 // If KillingDef only partially overwrites Current, check the next
1472 // candidate if the partial step limit is exceeded. This aggressively
1473 // limits the number of candidates for partial store elimination,
1474 // which are less likely to be removable in the end.
1475 if (PartialLimit
<= 1) {
1476 WalkerStepLimit
-= 1;
1485 // Accesses to objects accessible after the function returns can only be
1486 // eliminated if the access is killed along all paths to the exit. Collect
1487 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1488 // they cover all paths from EarlierAccess to any function exit.
1489 SmallPtrSet
<Instruction
*, 16> KillingDefs
;
1490 KillingDefs
.insert(KillingDef
->getMemoryInst());
1491 MemoryAccess
*EarlierAccess
= Current
;
1492 Instruction
*EarlierMemInst
=
1493 cast
<MemoryDef
>(EarlierAccess
)->getMemoryInst();
1494 LLVM_DEBUG(dbgs() << " Checking for reads of " << *EarlierAccess
<< " ("
1495 << *EarlierMemInst
<< ")\n");
1497 SmallSetVector
<MemoryAccess
*, 32> WorkList
;
1498 auto PushMemUses
= [&WorkList
](MemoryAccess
*Acc
) {
1499 for (Use
&U
: Acc
->uses())
1500 WorkList
.insert(cast
<MemoryAccess
>(U
.getUser()));
1502 PushMemUses(EarlierAccess
);
1504 // Check if EarlierDef may be read.
1505 for (unsigned I
= 0; I
< WorkList
.size(); I
++) {
1506 MemoryAccess
*UseAccess
= WorkList
[I
];
1508 LLVM_DEBUG(dbgs() << " " << *UseAccess
);
1509 // Bail out if the number of accesses to check exceeds the scan limit.
1510 if (ScanLimit
< (WorkList
.size() - I
)) {
1511 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1515 NumDomMemDefChecks
++;
1517 if (isa
<MemoryPhi
>(UseAccess
)) {
1518 if (any_of(KillingDefs
, [this, UseAccess
](Instruction
*KI
) {
1519 return DT
.properlyDominates(KI
->getParent(),
1520 UseAccess
->getBlock());
1522 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1525 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n");
1526 PushMemUses(UseAccess
);
1530 Instruction
*UseInst
= cast
<MemoryUseOrDef
>(UseAccess
)->getMemoryInst();
1531 LLVM_DEBUG(dbgs() << " (" << *UseInst
<< ")\n");
1533 if (any_of(KillingDefs
, [this, UseInst
](Instruction
*KI
) {
1534 return DT
.dominates(KI
, UseInst
);
1536 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1540 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1541 // MemoryAccesses. We do not have to check it's users.
1542 if (isMemTerminator(*CurrentLoc
, EarlierMemInst
, UseInst
)) {
1545 << " ... skipping, memterminator invalidates following accesses\n");
1549 if (isNoopIntrinsic(cast
<MemoryUseOrDef
>(UseAccess
)->getMemoryInst())) {
1550 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n");
1551 PushMemUses(UseAccess
);
1555 if (UseInst
->mayThrow() && !isInvisibleToCallerBeforeRet(DefUO
)) {
1556 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n");
1560 // Uses which may read the original MemoryDef mean we cannot eliminate the
1561 // original MD. Stop walk.
1562 if (isReadClobber(*CurrentLoc
, UseInst
)) {
1563 LLVM_DEBUG(dbgs() << " ... found read clobber\n");
1567 // If this worklist walks back to the original memory access (and the
1568 // pointer is not guarenteed loop invariant) then we cannot assume that a
1569 // store kills itself.
1570 if (EarlierAccess
== UseAccess
&&
1571 !isGuaranteedLoopInvariant(CurrentLoc
->Ptr
)) {
1572 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n");
1575 // Otherwise, for the KillingDef and EarlierAccess we only have to check
1576 // if it reads the memory location.
1577 // TODO: It would probably be better to check for self-reads before
1578 // calling the function.
1579 if (KillingDef
== UseAccess
|| EarlierAccess
== UseAccess
) {
1580 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n");
1584 // Check all uses for MemoryDefs, except for defs completely overwriting
1585 // the original location. Otherwise we have to check uses of *all*
1586 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1587 // miss cases like the following
1588 // 1 = Def(LoE) ; <----- EarlierDef stores [0,1]
1589 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1590 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1591 // (The Use points to the *first* Def it may alias)
1592 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1594 if (MemoryDef
*UseDef
= dyn_cast
<MemoryDef
>(UseAccess
)) {
1595 if (isCompleteOverwrite(*CurrentLoc
, EarlierMemInst
, UseInst
)) {
1596 BasicBlock
*MaybeKillingBlock
= UseInst
->getParent();
1597 if (PostOrderNumbers
.find(MaybeKillingBlock
)->second
<
1598 PostOrderNumbers
.find(EarlierAccess
->getBlock())->second
) {
1599 if (!isInvisibleToCallerAfterRet(DefUO
)) {
1601 << " ... found killing def " << *UseInst
<< "\n");
1602 KillingDefs
.insert(UseInst
);
1606 << " ... found preceeding def " << *UseInst
<< "\n");
1610 PushMemUses(UseDef
);
1614 // For accesses to locations visible after the function returns, make sure
1615 // that the location is killed (=overwritten) along all paths from
1616 // EarlierAccess to the exit.
1617 if (!isInvisibleToCallerAfterRet(DefUO
)) {
1618 SmallPtrSet
<BasicBlock
*, 16> KillingBlocks
;
1619 for (Instruction
*KD
: KillingDefs
)
1620 KillingBlocks
.insert(KD
->getParent());
1621 assert(!KillingBlocks
.empty() &&
1622 "Expected at least a single killing block");
1624 // Find the common post-dominator of all killing blocks.
1625 BasicBlock
*CommonPred
= *KillingBlocks
.begin();
1626 for (auto I
= std::next(KillingBlocks
.begin()), E
= KillingBlocks
.end();
1630 CommonPred
= PDT
.findNearestCommonDominator(CommonPred
, *I
);
1633 // If CommonPred is in the set of killing blocks, just check if it
1634 // post-dominates EarlierAccess.
1635 if (KillingBlocks
.count(CommonPred
)) {
1636 if (PDT
.dominates(CommonPred
, EarlierAccess
->getBlock()))
1637 return {EarlierAccess
};
1641 // If the common post-dominator does not post-dominate EarlierAccess,
1642 // there is a path from EarlierAccess to an exit not going through a
1644 if (PDT
.dominates(CommonPred
, EarlierAccess
->getBlock())) {
1645 SetVector
<BasicBlock
*> WorkList
;
1647 // If CommonPred is null, there are multiple exits from the function.
1648 // They all have to be added to the worklist.
1650 WorkList
.insert(CommonPred
);
1652 for (BasicBlock
*R
: PDT
.roots())
1656 // Check if all paths starting from an exit node go through one of the
1657 // killing blocks before reaching EarlierAccess.
1658 for (unsigned I
= 0; I
< WorkList
.size(); I
++) {
1660 BasicBlock
*Current
= WorkList
[I
];
1661 if (KillingBlocks
.count(Current
))
1663 if (Current
== EarlierAccess
->getBlock())
1666 // EarlierAccess is reachable from the entry, so we don't have to
1667 // explore unreachable blocks further.
1668 if (!DT
.isReachableFromEntry(Current
))
1671 for (BasicBlock
*Pred
: predecessors(Current
))
1672 WorkList
.insert(Pred
);
1674 if (WorkList
.size() >= MemorySSAPathCheckLimit
)
1678 return {EarlierAccess
};
1683 // No aliasing MemoryUses of EarlierAccess found, EarlierAccess is
1684 // potentially dead.
1685 return {EarlierAccess
};
1688 // Delete dead memory defs
1689 void deleteDeadInstruction(Instruction
*SI
) {
1690 MemorySSAUpdater
Updater(&MSSA
);
1691 SmallVector
<Instruction
*, 32> NowDeadInsts
;
1692 NowDeadInsts
.push_back(SI
);
1695 while (!NowDeadInsts
.empty()) {
1696 Instruction
*DeadInst
= NowDeadInsts
.pop_back_val();
1699 // Try to preserve debug information attached to the dead instruction.
1700 salvageDebugInfo(*DeadInst
);
1701 salvageKnowledge(DeadInst
);
1703 // Remove the Instruction from MSSA.
1704 if (MemoryAccess
*MA
= MSSA
.getMemoryAccess(DeadInst
)) {
1705 if (MemoryDef
*MD
= dyn_cast
<MemoryDef
>(MA
)) {
1706 SkipStores
.insert(MD
);
1708 Updater
.removeMemoryAccess(MA
);
1711 auto I
= IOLs
.find(DeadInst
->getParent());
1712 if (I
!= IOLs
.end())
1713 I
->second
.erase(DeadInst
);
1714 // Remove its operands
1715 for (Use
&O
: DeadInst
->operands())
1716 if (Instruction
*OpI
= dyn_cast
<Instruction
>(O
)) {
1718 if (isInstructionTriviallyDead(OpI
, &TLI
))
1719 NowDeadInsts
.push_back(OpI
);
1722 DeadInst
->eraseFromParent();
1726 // Check for any extra throws between SI and NI that block DSE. This only
1727 // checks extra maythrows (those that aren't MemoryDef's). MemoryDef that may
1728 // throw are handled during the walk from one def to the next.
1729 bool mayThrowBetween(Instruction
*SI
, Instruction
*NI
,
1730 const Value
*SILocUnd
) {
1731 // First see if we can ignore it by using the fact that SI is an
1732 // alloca/alloca like object that is not visible to the caller during
1733 // execution of the function.
1734 if (SILocUnd
&& isInvisibleToCallerBeforeRet(SILocUnd
))
1737 if (SI
->getParent() == NI
->getParent())
1738 return ThrowingBlocks
.count(SI
->getParent());
1739 return !ThrowingBlocks
.empty();
1742 // Check if \p NI acts as a DSE barrier for \p SI. The following instructions
1744 // * A memory instruction that may throw and \p SI accesses a non-stack
1746 // * Atomic stores stronger that monotonic.
1747 bool isDSEBarrier(const Value
*SILocUnd
, Instruction
*NI
) {
1748 // If NI may throw it acts as a barrier, unless we are to an alloca/alloca
1749 // like object that does not escape.
1750 if (NI
->mayThrow() && !isInvisibleToCallerBeforeRet(SILocUnd
))
1753 // If NI is an atomic load/store stronger than monotonic, do not try to
1754 // eliminate/reorder it.
1755 if (NI
->isAtomic()) {
1756 if (auto *LI
= dyn_cast
<LoadInst
>(NI
))
1757 return isStrongerThanMonotonic(LI
->getOrdering());
1758 if (auto *SI
= dyn_cast
<StoreInst
>(NI
))
1759 return isStrongerThanMonotonic(SI
->getOrdering());
1760 if (auto *ARMW
= dyn_cast
<AtomicRMWInst
>(NI
))
1761 return isStrongerThanMonotonic(ARMW
->getOrdering());
1762 if (auto *CmpXchg
= dyn_cast
<AtomicCmpXchgInst
>(NI
))
1763 return isStrongerThanMonotonic(CmpXchg
->getSuccessOrdering()) ||
1764 isStrongerThanMonotonic(CmpXchg
->getFailureOrdering());
1765 llvm_unreachable("other instructions should be skipped in MemorySSA");
1770 /// Eliminate writes to objects that are not visible in the caller and are not
1771 /// accessed before returning from the function.
1772 bool eliminateDeadWritesAtEndOfFunction() {
1773 bool MadeChange
= false;
1776 << "Trying to eliminate MemoryDefs at the end of the function\n");
1777 for (int I
= MemDefs
.size() - 1; I
>= 0; I
--) {
1778 MemoryDef
*Def
= MemDefs
[I
];
1779 if (SkipStores
.contains(Def
) || !isRemovable(Def
->getMemoryInst()))
1782 Instruction
*DefI
= Def
->getMemoryInst();
1783 auto DefLoc
= getLocForWriteEx(DefI
);
1787 // NOTE: Currently eliminating writes at the end of a function is limited
1788 // to MemoryDefs with a single underlying object, to save compile-time. In
1789 // practice it appears the case with multiple underlying objects is very
1790 // uncommon. If it turns out to be important, we can use
1791 // getUnderlyingObjects here instead.
1792 const Value
*UO
= getUnderlyingObject(DefLoc
->Ptr
);
1793 if (!UO
|| !isInvisibleToCallerAfterRet(UO
))
1796 if (isWriteAtEndOfFunction(Def
)) {
1797 // See through pointer-to-pointer bitcasts
1798 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
1799 "of the function\n");
1800 deleteDeadInstruction(DefI
);
1808 /// \returns true if \p Def is a no-op store, either because it
1809 /// directly stores back a loaded value or stores zero to a calloced object.
1810 bool storeIsNoop(MemoryDef
*Def
, const Value
*DefUO
) {
1811 StoreInst
*Store
= dyn_cast
<StoreInst
>(Def
->getMemoryInst());
1812 MemSetInst
*MemSet
= dyn_cast
<MemSetInst
>(Def
->getMemoryInst());
1813 Constant
*StoredConstant
= nullptr;
1815 StoredConstant
= dyn_cast
<Constant
>(Store
->getOperand(0));
1817 StoredConstant
= dyn_cast
<Constant
>(MemSet
->getValue());
1819 if (StoredConstant
&& StoredConstant
->isNullValue()) {
1820 auto *DefUOInst
= dyn_cast
<Instruction
>(DefUO
);
1822 if (isCallocLikeFn(DefUOInst
, &TLI
)) {
1823 auto *UnderlyingDef
=
1824 cast
<MemoryDef
>(MSSA
.getMemoryAccess(DefUOInst
));
1825 // If UnderlyingDef is the clobbering access of Def, no instructions
1826 // between them can modify the memory location.
1828 MSSA
.getSkipSelfWalker()->getClobberingMemoryAccess(Def
);
1829 return UnderlyingDef
== ClobberDef
;
1833 if (F
.hasFnAttribute(Attribute::SanitizeMemory
) ||
1834 F
.hasFnAttribute(Attribute::SanitizeAddress
) ||
1835 F
.hasFnAttribute(Attribute::SanitizeHWAddress
) ||
1836 F
.getName() == "calloc")
1838 auto *Malloc
= const_cast<CallInst
*>(dyn_cast
<CallInst
>(DefUOInst
));
1841 auto *InnerCallee
= Malloc
->getCalledFunction();
1845 if (!TLI
.getLibFunc(*InnerCallee
, Func
) || !TLI
.has(Func
) ||
1846 Func
!= LibFunc_malloc
)
1848 if (Malloc
->getOperand(0) == MemSet
->getLength()) {
1849 if (DT
.dominates(Malloc
, MemSet
) &&
1850 memoryIsNotModifiedBetween(Malloc
, MemSet
, BatchAA
, DL
, &DT
)) {
1851 IRBuilder
<> IRB(Malloc
);
1852 const auto &DL
= Malloc
->getModule()->getDataLayout();
1854 emitCalloc(ConstantInt::get(IRB
.getIntPtrTy(DL
), 1),
1855 Malloc
->getArgOperand(0), IRB
, TLI
)) {
1856 MemorySSAUpdater
Updater(&MSSA
);
1857 auto *LastDef
= cast
<MemoryDef
>(
1858 Updater
.getMemorySSA()->getMemoryAccess(Malloc
));
1859 auto *NewAccess
= Updater
.createMemoryAccessAfter(
1860 cast
<Instruction
>(Calloc
), LastDef
, LastDef
);
1861 auto *NewAccessMD
= cast
<MemoryDef
>(NewAccess
);
1862 Updater
.insertDef(NewAccessMD
, /*RenameUses=*/true);
1863 Updater
.removeMemoryAccess(Malloc
);
1864 Malloc
->replaceAllUsesWith(Calloc
);
1865 Malloc
->eraseFromParent();
1878 if (auto *LoadI
= dyn_cast
<LoadInst
>(Store
->getOperand(0))) {
1879 if (LoadI
->getPointerOperand() == Store
->getOperand(1)) {
1880 // Get the defining access for the load.
1881 auto *LoadAccess
= MSSA
.getMemoryAccess(LoadI
)->getDefiningAccess();
1882 // Fast path: the defining accesses are the same.
1883 if (LoadAccess
== Def
->getDefiningAccess())
1886 // Look through phi accesses. Recursively scan all phi accesses by
1887 // adding them to a worklist. Bail when we run into a memory def that
1888 // does not match LoadAccess.
1889 SetVector
<MemoryAccess
*> ToCheck
;
1890 MemoryAccess
*Current
=
1891 MSSA
.getWalker()->getClobberingMemoryAccess(Def
);
1892 // We don't want to bail when we run into the store memory def. But,
1893 // the phi access may point to it. So, pretend like we've already
1895 ToCheck
.insert(Def
);
1896 ToCheck
.insert(Current
);
1897 // Start at current (1) to simulate already having checked Def.
1898 for (unsigned I
= 1; I
< ToCheck
.size(); ++I
) {
1899 Current
= ToCheck
[I
];
1900 if (auto PhiAccess
= dyn_cast
<MemoryPhi
>(Current
)) {
1901 // Check all the operands.
1902 for (auto &Use
: PhiAccess
->incoming_values())
1903 ToCheck
.insert(cast
<MemoryAccess
>(&Use
));
1907 // If we found a memory def, bail. This happens when we have an
1908 // unrelated write in between an otherwise noop store.
1909 assert(isa
<MemoryDef
>(Current
) &&
1910 "Only MemoryDefs should reach here.");
1911 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1912 // We are searching for the definition of the store's destination.
1913 // So, if that is the same definition as the load, then this is a
1914 // noop. Otherwise, fail.
1915 if (LoadAccess
!= Current
)
1926 static bool eliminateDeadStores(Function
&F
, AliasAnalysis
&AA
, MemorySSA
&MSSA
,
1927 DominatorTree
&DT
, PostDominatorTree
&PDT
,
1928 const TargetLibraryInfo
&TLI
,
1929 const LoopInfo
&LI
) {
1930 bool MadeChange
= false;
1932 DSEState State
= DSEState::get(F
, AA
, MSSA
, DT
, PDT
, TLI
, LI
);
1934 for (unsigned I
= 0; I
< State
.MemDefs
.size(); I
++) {
1935 MemoryDef
*KillingDef
= State
.MemDefs
[I
];
1936 if (State
.SkipStores
.count(KillingDef
))
1938 Instruction
*SI
= KillingDef
->getMemoryInst();
1940 Optional
<MemoryLocation
> MaybeSILoc
;
1941 if (State
.isMemTerminatorInst(SI
))
1942 MaybeSILoc
= State
.getLocForTerminator(SI
).map(
1943 [](const std::pair
<MemoryLocation
, bool> &P
) { return P
.first
; });
1945 MaybeSILoc
= State
.getLocForWriteEx(SI
);
1948 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
1952 MemoryLocation SILoc
= *MaybeSILoc
;
1953 assert(SILoc
.Ptr
&& "SILoc should not be null");
1954 const Value
*SILocUnd
= getUnderlyingObject(SILoc
.Ptr
);
1956 MemoryAccess
*Current
= KillingDef
;
1957 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
1958 << *Current
<< " (" << *SI
<< ")\n");
1960 unsigned ScanLimit
= MemorySSAScanLimit
;
1961 unsigned WalkerStepLimit
= MemorySSAUpwardsStepLimit
;
1962 unsigned PartialLimit
= MemorySSAPartialStoreLimit
;
1963 // Worklist of MemoryAccesses that may be killed by KillingDef.
1964 SetVector
<MemoryAccess
*> ToCheck
;
1967 ToCheck
.insert(KillingDef
->getDefiningAccess());
1969 bool Shortend
= false;
1970 bool IsMemTerm
= State
.isMemTerminatorInst(SI
);
1971 // Check if MemoryAccesses in the worklist are killed by KillingDef.
1972 for (unsigned I
= 0; I
< ToCheck
.size(); I
++) {
1973 Current
= ToCheck
[I
];
1974 if (State
.SkipStores
.count(Current
))
1977 Optional
<MemoryAccess
*> Next
= State
.getDomMemoryDef(
1978 KillingDef
, Current
, SILoc
, SILocUnd
, ScanLimit
, WalkerStepLimit
,
1979 IsMemTerm
, PartialLimit
);
1982 LLVM_DEBUG(dbgs() << " finished walk\n");
1986 MemoryAccess
*EarlierAccess
= *Next
;
1987 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *EarlierAccess
);
1988 if (isa
<MemoryPhi
>(EarlierAccess
)) {
1989 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n");
1990 for (Value
*V
: cast
<MemoryPhi
>(EarlierAccess
)->incoming_values()) {
1991 MemoryAccess
*IncomingAccess
= cast
<MemoryAccess
>(V
);
1992 BasicBlock
*IncomingBlock
= IncomingAccess
->getBlock();
1993 BasicBlock
*PhiBlock
= EarlierAccess
->getBlock();
1995 // We only consider incoming MemoryAccesses that come before the
1996 // MemoryPhi. Otherwise we could discover candidates that do not
1997 // strictly dominate our starting def.
1998 if (State
.PostOrderNumbers
[IncomingBlock
] >
1999 State
.PostOrderNumbers
[PhiBlock
])
2000 ToCheck
.insert(IncomingAccess
);
2004 auto *NextDef
= cast
<MemoryDef
>(EarlierAccess
);
2005 Instruction
*NI
= NextDef
->getMemoryInst();
2006 LLVM_DEBUG(dbgs() << " (" << *NI
<< ")\n");
2007 ToCheck
.insert(NextDef
->getDefiningAccess());
2008 NumGetDomMemoryDefPassed
++;
2010 if (!DebugCounter::shouldExecute(MemorySSACounter
))
2013 MemoryLocation NILoc
= *State
.getLocForWriteEx(NI
);
2016 const Value
*NIUnd
= getUnderlyingObject(NILoc
.Ptr
);
2017 if (SILocUnd
!= NIUnd
)
2019 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI
2020 << "\n KILLER: " << *SI
<< '\n');
2021 State
.deleteDeadInstruction(NI
);
2025 // Check if NI overwrites SI.
2026 int64_t InstWriteOffset
, DepWriteOffset
;
2027 OverwriteResult OR
= State
.isOverwrite(SI
, NI
, SILoc
, NILoc
,
2028 DepWriteOffset
, InstWriteOffset
);
2029 if (OR
== OW_MaybePartial
) {
2030 auto Iter
= State
.IOLs
.insert(
2031 std::make_pair
<BasicBlock
*, InstOverlapIntervalsTy
>(
2032 NI
->getParent(), InstOverlapIntervalsTy()));
2033 auto &IOL
= Iter
.first
->second
;
2034 OR
= isPartialOverwrite(SILoc
, NILoc
, DepWriteOffset
, InstWriteOffset
,
2038 if (EnablePartialStoreMerging
&& OR
== OW_PartialEarlierWithFullLater
) {
2039 auto *Earlier
= dyn_cast
<StoreInst
>(NI
);
2040 auto *Later
= dyn_cast
<StoreInst
>(SI
);
2041 // We are re-using tryToMergePartialOverlappingStores, which requires
2042 // Earlier to domiante Later.
2043 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2044 if (Earlier
&& Later
&& DT
.dominates(Earlier
, Later
)) {
2045 if (Constant
*Merged
= tryToMergePartialOverlappingStores(
2046 Earlier
, Later
, InstWriteOffset
, DepWriteOffset
, State
.DL
,
2047 State
.BatchAA
, &DT
)) {
2049 // Update stored value of earlier store to merged constant.
2050 Earlier
->setOperand(0, Merged
);
2051 ++NumModifiedStores
;
2055 // Remove later store and remove any outstanding overlap intervals
2056 // for the updated store.
2057 State
.deleteDeadInstruction(Later
);
2058 auto I
= State
.IOLs
.find(Earlier
->getParent());
2059 if (I
!= State
.IOLs
.end())
2060 I
->second
.erase(Earlier
);
2066 if (OR
== OW_Complete
) {
2067 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *NI
2068 << "\n KILLER: " << *SI
<< '\n');
2069 State
.deleteDeadInstruction(NI
);
2076 // Check if the store is a no-op.
2077 if (!Shortend
&& isRemovable(SI
) &&
2078 State
.storeIsNoop(KillingDef
, SILocUnd
)) {
2079 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *SI
<< '\n');
2080 State
.deleteDeadInstruction(SI
);
2081 NumRedundantStores
++;
2087 if (EnablePartialOverwriteTracking
)
2088 for (auto &KV
: State
.IOLs
)
2089 MadeChange
|= removePartiallyOverlappedStores(State
.DL
, KV
.second
, TLI
);
2091 MadeChange
|= State
.eliminateDeadWritesAtEndOfFunction();
2094 } // end anonymous namespace
2096 //===----------------------------------------------------------------------===//
2098 //===----------------------------------------------------------------------===//
2099 PreservedAnalyses
DSEPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
2100 AliasAnalysis
&AA
= AM
.getResult
<AAManager
>(F
);
2101 const TargetLibraryInfo
&TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
2102 DominatorTree
&DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
2103 MemorySSA
&MSSA
= AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA();
2104 PostDominatorTree
&PDT
= AM
.getResult
<PostDominatorTreeAnalysis
>(F
);
2105 LoopInfo
&LI
= AM
.getResult
<LoopAnalysis
>(F
);
2107 bool Changed
= eliminateDeadStores(F
, AA
, MSSA
, DT
, PDT
, TLI
, LI
);
2109 #ifdef LLVM_ENABLE_STATS
2110 if (AreStatisticsEnabled())
2111 for (auto &I
: instructions(F
))
2112 NumRemainingStores
+= isa
<StoreInst
>(&I
);
2116 return PreservedAnalyses::all();
2118 PreservedAnalyses PA
;
2119 PA
.preserveSet
<CFGAnalyses
>();
2120 PA
.preserve
<MemorySSAAnalysis
>();
2121 PA
.preserve
<LoopAnalysis
>();
2127 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
2128 class DSELegacyPass
: public FunctionPass
{
2130 static char ID
; // Pass identification, replacement for typeid
2132 DSELegacyPass() : FunctionPass(ID
) {
2133 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
2136 bool runOnFunction(Function
&F
) override
{
2137 if (skipFunction(F
))
2140 AliasAnalysis
&AA
= getAnalysis
<AAResultsWrapperPass
>().getAAResults();
2141 DominatorTree
&DT
= getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
2142 const TargetLibraryInfo
&TLI
=
2143 getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
);
2144 MemorySSA
&MSSA
= getAnalysis
<MemorySSAWrapperPass
>().getMSSA();
2145 PostDominatorTree
&PDT
=
2146 getAnalysis
<PostDominatorTreeWrapperPass
>().getPostDomTree();
2147 LoopInfo
&LI
= getAnalysis
<LoopInfoWrapperPass
>().getLoopInfo();
2149 bool Changed
= eliminateDeadStores(F
, AA
, MSSA
, DT
, PDT
, TLI
, LI
);
2151 #ifdef LLVM_ENABLE_STATS
2152 if (AreStatisticsEnabled())
2153 for (auto &I
: instructions(F
))
2154 NumRemainingStores
+= isa
<StoreInst
>(&I
);
2160 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
2161 AU
.setPreservesCFG();
2162 AU
.addRequired
<AAResultsWrapperPass
>();
2163 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
2164 AU
.addPreserved
<GlobalsAAWrapperPass
>();
2165 AU
.addRequired
<DominatorTreeWrapperPass
>();
2166 AU
.addPreserved
<DominatorTreeWrapperPass
>();
2167 AU
.addRequired
<PostDominatorTreeWrapperPass
>();
2168 AU
.addRequired
<MemorySSAWrapperPass
>();
2169 AU
.addPreserved
<PostDominatorTreeWrapperPass
>();
2170 AU
.addPreserved
<MemorySSAWrapperPass
>();
2171 AU
.addRequired
<LoopInfoWrapperPass
>();
2172 AU
.addPreserved
<LoopInfoWrapperPass
>();
2176 } // end anonymous namespace
2178 char DSELegacyPass::ID
= 0;
2180 INITIALIZE_PASS_BEGIN(DSELegacyPass
, "dse", "Dead Store Elimination", false,
2182 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
2183 INITIALIZE_PASS_DEPENDENCY(PostDominatorTreeWrapperPass
)
2184 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
2185 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass
)
2186 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass
)
2187 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass
)
2188 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
2189 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass
)
2190 INITIALIZE_PASS_END(DSELegacyPass
, "dse", "Dead Store Elimination", false,
2193 FunctionPass
*llvm::createDeadStoreEliminationPass() {
2194 return new DSELegacyPass();