[RISCV] Fix mgather -> riscv.masked.strided.load combine not extending indices (...
[llvm-project.git] / llvm / lib / Transforms / Scalar / DeadStoreElimination.cpp
blob380d6583655367e9c0c76e7e337a105045ddf515
1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking
17 // upwards.
18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by
19 // checking all uses starting at MaybeDeadAccess and walking until we see
20 // StartDef.
21 // 3. For each found CurrentDef, check that:
22 // 1. There are no barrier instructions between CurrentDef and StartDef (like
23 // throws or stores with ordering constraints).
24 // 2. StartDef is executed whenever CurrentDef is executed.
25 // 3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
28 //===----------------------------------------------------------------------===//
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/MemoryBuiltins.h"
45 #include "llvm/Analysis/MemoryLocation.h"
46 #include "llvm/Analysis/MemorySSA.h"
47 #include "llvm/Analysis/MemorySSAUpdater.h"
48 #include "llvm/Analysis/MustExecute.h"
49 #include "llvm/Analysis/PostDominators.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/Constants.h"
56 #include "llvm/IR/DataLayout.h"
57 #include "llvm/IR/DebugInfo.h"
58 #include "llvm/IR/Dominators.h"
59 #include "llvm/IR/Function.h"
60 #include "llvm/IR/IRBuilder.h"
61 #include "llvm/IR/InstIterator.h"
62 #include "llvm/IR/InstrTypes.h"
63 #include "llvm/IR/Instruction.h"
64 #include "llvm/IR/Instructions.h"
65 #include "llvm/IR/IntrinsicInst.h"
66 #include "llvm/IR/Module.h"
67 #include "llvm/IR/PassManager.h"
68 #include "llvm/IR/PatternMatch.h"
69 #include "llvm/IR/Value.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CommandLine.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/DebugCounter.h"
74 #include "llvm/Support/ErrorHandling.h"
75 #include "llvm/Support/raw_ostream.h"
76 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
77 #include "llvm/Transforms/Utils/BuildLibCalls.h"
78 #include "llvm/Transforms/Utils/Local.h"
79 #include <algorithm>
80 #include <cassert>
81 #include <cstdint>
82 #include <iterator>
83 #include <map>
84 #include <optional>
85 #include <utility>
87 using namespace llvm;
88 using namespace PatternMatch;
90 #define DEBUG_TYPE "dse"
92 STATISTIC(NumRemainingStores, "Number of stores remaining after DSE");
93 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
94 STATISTIC(NumFastStores, "Number of stores deleted");
95 STATISTIC(NumFastOther, "Number of other instrs removed");
96 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
97 STATISTIC(NumModifiedStores, "Number of stores modified");
98 STATISTIC(NumCFGChecks, "Number of stores modified");
99 STATISTIC(NumCFGTries, "Number of stores modified");
100 STATISTIC(NumCFGSuccess, "Number of stores modified");
101 STATISTIC(NumGetDomMemoryDefPassed,
102 "Number of times a valid candidate is returned from getDomMemoryDef");
103 STATISTIC(NumDomMemDefChecks,
104 "Number iterations check for reads in getDomMemoryDef");
106 DEBUG_COUNTER(MemorySSACounter, "dse-memoryssa",
107 "Controls which MemoryDefs are eliminated.");
109 static cl::opt<bool>
110 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
111 cl::init(true), cl::Hidden,
112 cl::desc("Enable partial-overwrite tracking in DSE"));
114 static cl::opt<bool>
115 EnablePartialStoreMerging("enable-dse-partial-store-merging",
116 cl::init(true), cl::Hidden,
117 cl::desc("Enable partial store merging in DSE"));
119 static cl::opt<unsigned>
120 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden,
121 cl::desc("The number of memory instructions to scan for "
122 "dead store elimination (default = 150)"));
123 static cl::opt<unsigned> MemorySSAUpwardsStepLimit(
124 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden,
125 cl::desc("The maximum number of steps while walking upwards to find "
126 "MemoryDefs that may be killed (default = 90)"));
128 static cl::opt<unsigned> MemorySSAPartialStoreLimit(
129 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden,
130 cl::desc("The maximum number candidates that only partially overwrite the "
131 "killing MemoryDef to consider"
132 " (default = 5)"));
134 static cl::opt<unsigned> MemorySSADefsPerBlockLimit(
135 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden,
136 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
137 "other stores per basic block (default = 5000)"));
139 static cl::opt<unsigned> MemorySSASameBBStepCost(
140 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden,
141 cl::desc(
142 "The cost of a step in the same basic block as the killing MemoryDef"
143 "(default = 1)"));
145 static cl::opt<unsigned>
146 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
147 cl::Hidden,
148 cl::desc("The cost of a step in a different basic "
149 "block than the killing MemoryDef"
150 "(default = 5)"));
152 static cl::opt<unsigned> MemorySSAPathCheckLimit(
153 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden,
154 cl::desc("The maximum number of blocks to check when trying to prove that "
155 "all paths to an exit go through a killing block (default = 50)"));
157 // This flags allows or disallows DSE to optimize MemorySSA during its
158 // traversal. Note that DSE optimizing MemorySSA may impact other passes
159 // downstream of the DSE invocation and can lead to issues not being
160 // reproducible in isolation (i.e. when MemorySSA is built from scratch). In
161 // those cases, the flag can be used to check if DSE's MemorySSA optimizations
162 // impact follow-up passes.
163 static cl::opt<bool>
164 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden,
165 cl::desc("Allow DSE to optimize memory accesses."));
167 //===----------------------------------------------------------------------===//
168 // Helper functions
169 //===----------------------------------------------------------------------===//
170 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
171 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
173 /// Returns true if the end of this instruction can be safely shortened in
174 /// length.
175 static bool isShortenableAtTheEnd(Instruction *I) {
176 // Don't shorten stores for now
177 if (isa<StoreInst>(I))
178 return false;
180 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
181 switch (II->getIntrinsicID()) {
182 default: return false;
183 case Intrinsic::memset:
184 case Intrinsic::memcpy:
185 case Intrinsic::memcpy_element_unordered_atomic:
186 case Intrinsic::memset_element_unordered_atomic:
187 // Do shorten memory intrinsics.
188 // FIXME: Add memmove if it's also safe to transform.
189 return true;
193 // Don't shorten libcalls calls for now.
195 return false;
198 /// Returns true if the beginning of this instruction can be safely shortened
199 /// in length.
200 static bool isShortenableAtTheBeginning(Instruction *I) {
201 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
202 // easily done by offsetting the source address.
203 return isa<AnyMemSetInst>(I);
206 static std::optional<TypeSize> getPointerSize(const Value *V,
207 const DataLayout &DL,
208 const TargetLibraryInfo &TLI,
209 const Function *F) {
210 uint64_t Size;
211 ObjectSizeOpts Opts;
212 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
214 if (getObjectSize(V, Size, DL, &TLI, Opts))
215 return TypeSize::getFixed(Size);
216 return std::nullopt;
219 namespace {
221 enum OverwriteResult {
222 OW_Begin,
223 OW_Complete,
224 OW_End,
225 OW_PartialEarlierWithFullLater,
226 OW_MaybePartial,
227 OW_None,
228 OW_Unknown
231 } // end anonymous namespace
233 /// Check if two instruction are masked stores that completely
234 /// overwrite one another. More specifically, \p KillingI has to
235 /// overwrite \p DeadI.
236 static OverwriteResult isMaskedStoreOverwrite(const Instruction *KillingI,
237 const Instruction *DeadI,
238 BatchAAResults &AA) {
239 const auto *KillingII = dyn_cast<IntrinsicInst>(KillingI);
240 const auto *DeadII = dyn_cast<IntrinsicInst>(DeadI);
241 if (KillingII == nullptr || DeadII == nullptr)
242 return OW_Unknown;
243 if (KillingII->getIntrinsicID() != DeadII->getIntrinsicID())
244 return OW_Unknown;
245 if (KillingII->getIntrinsicID() == Intrinsic::masked_store) {
246 // Type size.
247 VectorType *KillingTy =
248 cast<VectorType>(KillingII->getArgOperand(0)->getType());
249 VectorType *DeadTy = cast<VectorType>(DeadII->getArgOperand(0)->getType());
250 if (KillingTy->getScalarSizeInBits() != DeadTy->getScalarSizeInBits())
251 return OW_Unknown;
252 // Element count.
253 if (KillingTy->getElementCount() != DeadTy->getElementCount())
254 return OW_Unknown;
255 // Pointers.
256 Value *KillingPtr = KillingII->getArgOperand(1)->stripPointerCasts();
257 Value *DeadPtr = DeadII->getArgOperand(1)->stripPointerCasts();
258 if (KillingPtr != DeadPtr && !AA.isMustAlias(KillingPtr, DeadPtr))
259 return OW_Unknown;
260 // Masks.
261 // TODO: check that KillingII's mask is a superset of the DeadII's mask.
262 if (KillingII->getArgOperand(3) != DeadII->getArgOperand(3))
263 return OW_Unknown;
264 return OW_Complete;
266 return OW_Unknown;
269 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely
270 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the
271 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin'
272 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'.
273 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was
274 /// overwritten by a killing (smaller) store which doesn't write outside the big
275 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
276 /// NOTE: This function must only be called if both \p KillingLoc and \p
277 /// DeadLoc belong to the same underlying object with valid \p KillingOff and
278 /// \p DeadOff.
279 static OverwriteResult isPartialOverwrite(const MemoryLocation &KillingLoc,
280 const MemoryLocation &DeadLoc,
281 int64_t KillingOff, int64_t DeadOff,
282 Instruction *DeadI,
283 InstOverlapIntervalsTy &IOL) {
284 const uint64_t KillingSize = KillingLoc.Size.getValue();
285 const uint64_t DeadSize = DeadLoc.Size.getValue();
286 // We may now overlap, although the overlap is not complete. There might also
287 // be other incomplete overlaps, and together, they might cover the complete
288 // dead store.
289 // Note: The correctness of this logic depends on the fact that this function
290 // is not even called providing DepWrite when there are any intervening reads.
291 if (EnablePartialOverwriteTracking &&
292 KillingOff < int64_t(DeadOff + DeadSize) &&
293 int64_t(KillingOff + KillingSize) >= DeadOff) {
295 // Insert our part of the overlap into the map.
296 auto &IM = IOL[DeadI];
297 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff << ", "
298 << int64_t(DeadOff + DeadSize) << ") KillingLoc ["
299 << KillingOff << ", " << int64_t(KillingOff + KillingSize)
300 << ")\n");
302 // Make sure that we only insert non-overlapping intervals and combine
303 // adjacent intervals. The intervals are stored in the map with the ending
304 // offset as the key (in the half-open sense) and the starting offset as
305 // the value.
306 int64_t KillingIntStart = KillingOff;
307 int64_t KillingIntEnd = KillingOff + KillingSize;
309 // Find any intervals ending at, or after, KillingIntStart which start
310 // before KillingIntEnd.
311 auto ILI = IM.lower_bound(KillingIntStart);
312 if (ILI != IM.end() && ILI->second <= KillingIntEnd) {
313 // This existing interval is overlapped with the current store somewhere
314 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing
315 // intervals and adjusting our start and end.
316 KillingIntStart = std::min(KillingIntStart, ILI->second);
317 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
318 ILI = IM.erase(ILI);
320 // Continue erasing and adjusting our end in case other previous
321 // intervals are also overlapped with the current store.
323 // |--- dead 1 ---| |--- dead 2 ---|
324 // |------- killing---------|
326 while (ILI != IM.end() && ILI->second <= KillingIntEnd) {
327 assert(ILI->second > KillingIntStart && "Unexpected interval");
328 KillingIntEnd = std::max(KillingIntEnd, ILI->first);
329 ILI = IM.erase(ILI);
333 IM[KillingIntEnd] = KillingIntStart;
335 ILI = IM.begin();
336 if (ILI->second <= DeadOff && ILI->first >= int64_t(DeadOff + DeadSize)) {
337 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc ["
338 << DeadOff << ", " << int64_t(DeadOff + DeadSize)
339 << ") Composite KillingLoc [" << ILI->second << ", "
340 << ILI->first << ")\n");
341 ++NumCompletePartials;
342 return OW_Complete;
346 // Check for a dead store which writes to all the memory locations that
347 // the killing store writes to.
348 if (EnablePartialStoreMerging && KillingOff >= DeadOff &&
349 int64_t(DeadOff + DeadSize) > KillingOff &&
350 uint64_t(KillingOff - DeadOff) + KillingSize <= DeadSize) {
351 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff
352 << ", " << int64_t(DeadOff + DeadSize)
353 << ") by a killing store [" << KillingOff << ", "
354 << int64_t(KillingOff + KillingSize) << ")\n");
355 // TODO: Maybe come up with a better name?
356 return OW_PartialEarlierWithFullLater;
359 // Another interesting case is if the killing store overwrites the end of the
360 // dead store.
362 // |--dead--|
363 // |-- killing --|
365 // In this case we may want to trim the size of dead store to avoid
366 // generating stores to addresses which will definitely be overwritten killing
367 // store.
368 if (!EnablePartialOverwriteTracking &&
369 (KillingOff > DeadOff && KillingOff < int64_t(DeadOff + DeadSize) &&
370 int64_t(KillingOff + KillingSize) >= int64_t(DeadOff + DeadSize)))
371 return OW_End;
373 // Finally, we also need to check if the killing store overwrites the
374 // beginning of the dead store.
376 // |--dead--|
377 // |-- killing --|
379 // In this case we may want to move the destination address and trim the size
380 // of dead store to avoid generating stores to addresses which will definitely
381 // be overwritten killing store.
382 if (!EnablePartialOverwriteTracking &&
383 (KillingOff <= DeadOff && int64_t(KillingOff + KillingSize) > DeadOff)) {
384 assert(int64_t(KillingOff + KillingSize) < int64_t(DeadOff + DeadSize) &&
385 "Expect to be handled as OW_Complete");
386 return OW_Begin;
388 // Otherwise, they don't completely overlap.
389 return OW_Unknown;
392 /// Returns true if the memory which is accessed by the second instruction is not
393 /// modified between the first and the second instruction.
394 /// Precondition: Second instruction must be dominated by the first
395 /// instruction.
396 static bool
397 memoryIsNotModifiedBetween(Instruction *FirstI, Instruction *SecondI,
398 BatchAAResults &AA, const DataLayout &DL,
399 DominatorTree *DT) {
400 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
401 // instructions which can modify the memory location accessed by SecondI.
403 // While doing the walk keep track of the address to check. It might be
404 // different in different basic blocks due to PHI translation.
405 using BlockAddressPair = std::pair<BasicBlock *, PHITransAddr>;
406 SmallVector<BlockAddressPair, 16> WorkList;
407 // Keep track of the address we visited each block with. Bail out if we
408 // visit a block with different addresses.
409 DenseMap<BasicBlock *, Value *> Visited;
411 BasicBlock::iterator FirstBBI(FirstI);
412 ++FirstBBI;
413 BasicBlock::iterator SecondBBI(SecondI);
414 BasicBlock *FirstBB = FirstI->getParent();
415 BasicBlock *SecondBB = SecondI->getParent();
416 MemoryLocation MemLoc;
417 if (auto *MemSet = dyn_cast<MemSetInst>(SecondI))
418 MemLoc = MemoryLocation::getForDest(MemSet);
419 else
420 MemLoc = MemoryLocation::get(SecondI);
422 auto *MemLocPtr = const_cast<Value *>(MemLoc.Ptr);
424 // Start checking the SecondBB.
425 WorkList.push_back(
426 std::make_pair(SecondBB, PHITransAddr(MemLocPtr, DL, nullptr)));
427 bool isFirstBlock = true;
429 // Check all blocks going backward until we reach the FirstBB.
430 while (!WorkList.empty()) {
431 BlockAddressPair Current = WorkList.pop_back_val();
432 BasicBlock *B = Current.first;
433 PHITransAddr &Addr = Current.second;
434 Value *Ptr = Addr.getAddr();
436 // Ignore instructions before FirstI if this is the FirstBB.
437 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
439 BasicBlock::iterator EI;
440 if (isFirstBlock) {
441 // Ignore instructions after SecondI if this is the first visit of SecondBB.
442 assert(B == SecondBB && "first block is not the store block");
443 EI = SecondBBI;
444 isFirstBlock = false;
445 } else {
446 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
447 // In this case we also have to look at instructions after SecondI.
448 EI = B->end();
450 for (; BI != EI; ++BI) {
451 Instruction *I = &*BI;
452 if (I->mayWriteToMemory() && I != SecondI)
453 if (isModSet(AA.getModRefInfo(I, MemLoc.getWithNewPtr(Ptr))))
454 return false;
456 if (B != FirstBB) {
457 assert(B != &FirstBB->getParent()->getEntryBlock() &&
458 "Should not hit the entry block because SI must be dominated by LI");
459 for (BasicBlock *Pred : predecessors(B)) {
460 PHITransAddr PredAddr = Addr;
461 if (PredAddr.needsPHITranslationFromBlock(B)) {
462 if (!PredAddr.isPotentiallyPHITranslatable())
463 return false;
464 if (!PredAddr.translateValue(B, Pred, DT, false))
465 return false;
467 Value *TranslatedPtr = PredAddr.getAddr();
468 auto Inserted = Visited.insert(std::make_pair(Pred, TranslatedPtr));
469 if (!Inserted.second) {
470 // We already visited this block before. If it was with a different
471 // address - bail out!
472 if (TranslatedPtr != Inserted.first->second)
473 return false;
474 // ... otherwise just skip it.
475 continue;
477 WorkList.push_back(std::make_pair(Pred, PredAddr));
481 return true;
484 static void shortenAssignment(Instruction *Inst, Value *OriginalDest,
485 uint64_t OldOffsetInBits, uint64_t OldSizeInBits,
486 uint64_t NewSizeInBits, bool IsOverwriteEnd) {
487 const DataLayout &DL = Inst->getModule()->getDataLayout();
488 uint64_t DeadSliceSizeInBits = OldSizeInBits - NewSizeInBits;
489 uint64_t DeadSliceOffsetInBits =
490 OldOffsetInBits + (IsOverwriteEnd ? NewSizeInBits : 0);
491 auto SetDeadFragExpr = [](auto *Assign,
492 DIExpression::FragmentInfo DeadFragment) {
493 // createFragmentExpression expects an offset relative to the existing
494 // fragment offset if there is one.
495 uint64_t RelativeOffset = DeadFragment.OffsetInBits -
496 Assign->getExpression()
497 ->getFragmentInfo()
498 .value_or(DIExpression::FragmentInfo(0, 0))
499 .OffsetInBits;
500 if (auto NewExpr = DIExpression::createFragmentExpression(
501 Assign->getExpression(), RelativeOffset, DeadFragment.SizeInBits)) {
502 Assign->setExpression(*NewExpr);
503 return;
505 // Failed to create a fragment expression for this so discard the value,
506 // making this a kill location.
507 auto *Expr = *DIExpression::createFragmentExpression(
508 DIExpression::get(Assign->getContext(), std::nullopt),
509 DeadFragment.OffsetInBits, DeadFragment.SizeInBits);
510 Assign->setExpression(Expr);
511 Assign->setKillLocation();
514 // A DIAssignID to use so that the inserted dbg.assign intrinsics do not
515 // link to any instructions. Created in the loop below (once).
516 DIAssignID *LinkToNothing = nullptr;
517 LLVMContext &Ctx = Inst->getContext();
518 auto GetDeadLink = [&Ctx, &LinkToNothing]() {
519 if (!LinkToNothing)
520 LinkToNothing = DIAssignID::getDistinct(Ctx);
521 return LinkToNothing;
524 // Insert an unlinked dbg.assign intrinsic for the dead fragment after each
525 // overlapping dbg.assign intrinsic. The loop invalidates the iterators
526 // returned by getAssignmentMarkers so save a copy of the markers to iterate
527 // over.
528 auto LinkedRange = at::getAssignmentMarkers(Inst);
529 SmallVector<DPValue *> LinkedDPVAssigns = at::getDPVAssignmentMarkers(Inst);
530 SmallVector<DbgAssignIntrinsic *> Linked(LinkedRange.begin(),
531 LinkedRange.end());
532 auto InsertAssignForOverlap = [&](auto *Assign) {
533 std::optional<DIExpression::FragmentInfo> NewFragment;
534 if (!at::calculateFragmentIntersect(DL, OriginalDest, DeadSliceOffsetInBits,
535 DeadSliceSizeInBits, Assign,
536 NewFragment) ||
537 !NewFragment) {
538 // We couldn't calculate the intersecting fragment for some reason. Be
539 // cautious and unlink the whole assignment from the store.
540 Assign->setKillAddress();
541 Assign->setAssignId(GetDeadLink());
542 return;
544 // No intersect.
545 if (NewFragment->SizeInBits == 0)
546 return;
548 // Fragments overlap: insert a new dbg.assign for this dead part.
549 auto *NewAssign = static_cast<decltype(Assign)>(Assign->clone());
550 NewAssign->insertAfter(Assign);
551 NewAssign->setAssignId(GetDeadLink());
552 if (NewFragment)
553 SetDeadFragExpr(NewAssign, *NewFragment);
554 NewAssign->setKillAddress();
556 for_each(Linked, InsertAssignForOverlap);
557 for_each(LinkedDPVAssigns, InsertAssignForOverlap);
560 static bool tryToShorten(Instruction *DeadI, int64_t &DeadStart,
561 uint64_t &DeadSize, int64_t KillingStart,
562 uint64_t KillingSize, bool IsOverwriteEnd) {
563 auto *DeadIntrinsic = cast<AnyMemIntrinsic>(DeadI);
564 Align PrefAlign = DeadIntrinsic->getDestAlign().valueOrOne();
566 // We assume that memet/memcpy operates in chunks of the "largest" native
567 // type size and aligned on the same value. That means optimal start and size
568 // of memset/memcpy should be modulo of preferred alignment of that type. That
569 // is it there is no any sense in trying to reduce store size any further
570 // since any "extra" stores comes for free anyway.
571 // On the other hand, maximum alignment we can achieve is limited by alignment
572 // of initial store.
574 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
575 // "largest" native type.
576 // Note: What is the proper way to get that value?
577 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
578 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
580 int64_t ToRemoveStart = 0;
581 uint64_t ToRemoveSize = 0;
582 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
583 // maintained on the remaining store.
584 if (IsOverwriteEnd) {
585 // Calculate required adjustment for 'KillingStart' in order to keep
586 // remaining store size aligned on 'PerfAlign'.
587 uint64_t Off =
588 offsetToAlignment(uint64_t(KillingStart - DeadStart), PrefAlign);
589 ToRemoveStart = KillingStart + Off;
590 if (DeadSize <= uint64_t(ToRemoveStart - DeadStart))
591 return false;
592 ToRemoveSize = DeadSize - uint64_t(ToRemoveStart - DeadStart);
593 } else {
594 ToRemoveStart = DeadStart;
595 assert(KillingSize >= uint64_t(DeadStart - KillingStart) &&
596 "Not overlapping accesses?");
597 ToRemoveSize = KillingSize - uint64_t(DeadStart - KillingStart);
598 // Calculate required adjustment for 'ToRemoveSize'in order to keep
599 // start of the remaining store aligned on 'PerfAlign'.
600 uint64_t Off = offsetToAlignment(ToRemoveSize, PrefAlign);
601 if (Off != 0) {
602 if (ToRemoveSize <= (PrefAlign.value() - Off))
603 return false;
604 ToRemoveSize -= PrefAlign.value() - Off;
606 assert(isAligned(PrefAlign, ToRemoveSize) &&
607 "Should preserve selected alignment");
610 assert(ToRemoveSize > 0 && "Shouldn't reach here if nothing to remove");
611 assert(DeadSize > ToRemoveSize && "Can't remove more than original size");
613 uint64_t NewSize = DeadSize - ToRemoveSize;
614 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(DeadI)) {
615 // When shortening an atomic memory intrinsic, the newly shortened
616 // length must remain an integer multiple of the element size.
617 const uint32_t ElementSize = AMI->getElementSizeInBytes();
618 if (0 != NewSize % ElementSize)
619 return false;
622 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
623 << (IsOverwriteEnd ? "END" : "BEGIN") << ": " << *DeadI
624 << "\n KILLER [" << ToRemoveStart << ", "
625 << int64_t(ToRemoveStart + ToRemoveSize) << ")\n");
627 Value *DeadWriteLength = DeadIntrinsic->getLength();
628 Value *TrimmedLength = ConstantInt::get(DeadWriteLength->getType(), NewSize);
629 DeadIntrinsic->setLength(TrimmedLength);
630 DeadIntrinsic->setDestAlignment(PrefAlign);
632 Value *OrigDest = DeadIntrinsic->getRawDest();
633 if (!IsOverwriteEnd) {
634 Value *Indices[1] = {
635 ConstantInt::get(DeadWriteLength->getType(), ToRemoveSize)};
636 Instruction *NewDestGEP = GetElementPtrInst::CreateInBounds(
637 Type::getInt8Ty(DeadIntrinsic->getContext()), OrigDest, Indices, "", DeadI);
638 NewDestGEP->setDebugLoc(DeadIntrinsic->getDebugLoc());
639 DeadIntrinsic->setDest(NewDestGEP);
642 // Update attached dbg.assign intrinsics. Assume 8-bit byte.
643 shortenAssignment(DeadI, OrigDest, DeadStart * 8, DeadSize * 8, NewSize * 8,
644 IsOverwriteEnd);
646 // Finally update start and size of dead access.
647 if (!IsOverwriteEnd)
648 DeadStart += ToRemoveSize;
649 DeadSize = NewSize;
651 return true;
654 static bool tryToShortenEnd(Instruction *DeadI, OverlapIntervalsTy &IntervalMap,
655 int64_t &DeadStart, uint64_t &DeadSize) {
656 if (IntervalMap.empty() || !isShortenableAtTheEnd(DeadI))
657 return false;
659 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
660 int64_t KillingStart = OII->second;
661 uint64_t KillingSize = OII->first - KillingStart;
663 assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
665 if (KillingStart > DeadStart &&
666 // Note: "KillingStart - KillingStart" is known to be positive due to
667 // preceding check.
668 (uint64_t)(KillingStart - DeadStart) < DeadSize &&
669 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to
670 // be non negative due to preceding checks.
671 KillingSize >= DeadSize - (uint64_t)(KillingStart - DeadStart)) {
672 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
673 true)) {
674 IntervalMap.erase(OII);
675 return true;
678 return false;
681 static bool tryToShortenBegin(Instruction *DeadI,
682 OverlapIntervalsTy &IntervalMap,
683 int64_t &DeadStart, uint64_t &DeadSize) {
684 if (IntervalMap.empty() || !isShortenableAtTheBeginning(DeadI))
685 return false;
687 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
688 int64_t KillingStart = OII->second;
689 uint64_t KillingSize = OII->first - KillingStart;
691 assert(OII->first - KillingStart >= 0 && "Size expected to be positive");
693 if (KillingStart <= DeadStart &&
694 // Note: "DeadStart - KillingStart" is known to be non negative due to
695 // preceding check.
696 KillingSize > (uint64_t)(DeadStart - KillingStart)) {
697 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to
698 // be positive due to preceding checks.
699 assert(KillingSize - (uint64_t)(DeadStart - KillingStart) < DeadSize &&
700 "Should have been handled as OW_Complete");
701 if (tryToShorten(DeadI, DeadStart, DeadSize, KillingStart, KillingSize,
702 false)) {
703 IntervalMap.erase(OII);
704 return true;
707 return false;
710 static Constant *
711 tryToMergePartialOverlappingStores(StoreInst *KillingI, StoreInst *DeadI,
712 int64_t KillingOffset, int64_t DeadOffset,
713 const DataLayout &DL, BatchAAResults &AA,
714 DominatorTree *DT) {
716 if (DeadI && isa<ConstantInt>(DeadI->getValueOperand()) &&
717 DL.typeSizeEqualsStoreSize(DeadI->getValueOperand()->getType()) &&
718 KillingI && isa<ConstantInt>(KillingI->getValueOperand()) &&
719 DL.typeSizeEqualsStoreSize(KillingI->getValueOperand()->getType()) &&
720 memoryIsNotModifiedBetween(DeadI, KillingI, AA, DL, DT)) {
721 // If the store we find is:
722 // a) partially overwritten by the store to 'Loc'
723 // b) the killing store is fully contained in the dead one and
724 // c) they both have a constant value
725 // d) none of the two stores need padding
726 // Merge the two stores, replacing the dead store's value with a
727 // merge of both values.
728 // TODO: Deal with other constant types (vectors, etc), and probably
729 // some mem intrinsics (if needed)
731 APInt DeadValue = cast<ConstantInt>(DeadI->getValueOperand())->getValue();
732 APInt KillingValue =
733 cast<ConstantInt>(KillingI->getValueOperand())->getValue();
734 unsigned KillingBits = KillingValue.getBitWidth();
735 assert(DeadValue.getBitWidth() > KillingValue.getBitWidth());
736 KillingValue = KillingValue.zext(DeadValue.getBitWidth());
738 // Offset of the smaller store inside the larger store
739 unsigned BitOffsetDiff = (KillingOffset - DeadOffset) * 8;
740 unsigned LShiftAmount =
741 DL.isBigEndian() ? DeadValue.getBitWidth() - BitOffsetDiff - KillingBits
742 : BitOffsetDiff;
743 APInt Mask = APInt::getBitsSet(DeadValue.getBitWidth(), LShiftAmount,
744 LShiftAmount + KillingBits);
745 // Clear the bits we'll be replacing, then OR with the smaller
746 // store, shifted appropriately.
747 APInt Merged = (DeadValue & ~Mask) | (KillingValue << LShiftAmount);
748 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI
749 << "\n Killing: " << *KillingI
750 << "\n Merged Value: " << Merged << '\n');
751 return ConstantInt::get(DeadI->getValueOperand()->getType(), Merged);
753 return nullptr;
756 namespace {
757 // Returns true if \p I is an intrinsic that does not read or write memory.
758 bool isNoopIntrinsic(Instruction *I) {
759 if (const IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
760 switch (II->getIntrinsicID()) {
761 case Intrinsic::lifetime_start:
762 case Intrinsic::lifetime_end:
763 case Intrinsic::invariant_end:
764 case Intrinsic::launder_invariant_group:
765 case Intrinsic::assume:
766 return true;
767 case Intrinsic::dbg_declare:
768 case Intrinsic::dbg_label:
769 case Intrinsic::dbg_value:
770 llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
771 default:
772 return false;
775 return false;
778 // Check if we can ignore \p D for DSE.
779 bool canSkipDef(MemoryDef *D, bool DefVisibleToCaller) {
780 Instruction *DI = D->getMemoryInst();
781 // Calls that only access inaccessible memory cannot read or write any memory
782 // locations we consider for elimination.
783 if (auto *CB = dyn_cast<CallBase>(DI))
784 if (CB->onlyAccessesInaccessibleMemory())
785 return true;
787 // We can eliminate stores to locations not visible to the caller across
788 // throwing instructions.
789 if (DI->mayThrow() && !DefVisibleToCaller)
790 return true;
792 // We can remove the dead stores, irrespective of the fence and its ordering
793 // (release/acquire/seq_cst). Fences only constraints the ordering of
794 // already visible stores, it does not make a store visible to other
795 // threads. So, skipping over a fence does not change a store from being
796 // dead.
797 if (isa<FenceInst>(DI))
798 return true;
800 // Skip intrinsics that do not really read or modify memory.
801 if (isNoopIntrinsic(DI))
802 return true;
804 return false;
807 struct DSEState {
808 Function &F;
809 AliasAnalysis &AA;
810 EarliestEscapeInfo EI;
812 /// The single BatchAA instance that is used to cache AA queries. It will
813 /// not be invalidated over the whole run. This is safe, because:
814 /// 1. Only memory writes are removed, so the alias cache for memory
815 /// locations remains valid.
816 /// 2. No new instructions are added (only instructions removed), so cached
817 /// information for a deleted value cannot be accessed by a re-used new
818 /// value pointer.
819 BatchAAResults BatchAA;
821 MemorySSA &MSSA;
822 DominatorTree &DT;
823 PostDominatorTree &PDT;
824 const TargetLibraryInfo &TLI;
825 const DataLayout &DL;
826 const LoopInfo &LI;
828 // Whether the function contains any irreducible control flow, useful for
829 // being accurately able to detect loops.
830 bool ContainsIrreducibleLoops;
832 // All MemoryDefs that potentially could kill other MemDefs.
833 SmallVector<MemoryDef *, 64> MemDefs;
834 // Any that should be skipped as they are already deleted
835 SmallPtrSet<MemoryAccess *, 4> SkipStores;
836 // Keep track whether a given object is captured before return or not.
837 DenseMap<const Value *, bool> CapturedBeforeReturn;
838 // Keep track of all of the objects that are invisible to the caller after
839 // the function returns.
840 DenseMap<const Value *, bool> InvisibleToCallerAfterRet;
841 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
842 SmallPtrSet<BasicBlock *, 16> ThrowingBlocks;
843 // Post-order numbers for each basic block. Used to figure out if memory
844 // accesses are executed before another access.
845 DenseMap<BasicBlock *, unsigned> PostOrderNumbers;
847 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
848 /// basic block.
849 MapVector<BasicBlock *, InstOverlapIntervalsTy> IOLs;
850 // Check if there are root nodes that are terminated by UnreachableInst.
851 // Those roots pessimize post-dominance queries. If there are such roots,
852 // fall back to CFG scan starting from all non-unreachable roots.
853 bool AnyUnreachableExit;
855 // Whether or not we should iterate on removing dead stores at the end of the
856 // function due to removing a store causing a previously captured pointer to
857 // no longer be captured.
858 bool ShouldIterateEndOfFunctionDSE;
860 /// Dead instructions to be removed at the end of DSE.
861 SmallVector<Instruction *> ToRemove;
863 // Class contains self-reference, make sure it's not copied/moved.
864 DSEState(const DSEState &) = delete;
865 DSEState &operator=(const DSEState &) = delete;
867 DSEState(Function &F, AliasAnalysis &AA, MemorySSA &MSSA, DominatorTree &DT,
868 PostDominatorTree &PDT, const TargetLibraryInfo &TLI,
869 const LoopInfo &LI)
870 : F(F), AA(AA), EI(DT, &LI), BatchAA(AA, &EI), MSSA(MSSA), DT(DT),
871 PDT(PDT), TLI(TLI), DL(F.getParent()->getDataLayout()), LI(LI) {
872 // Collect blocks with throwing instructions not modeled in MemorySSA and
873 // alloc-like objects.
874 unsigned PO = 0;
875 for (BasicBlock *BB : post_order(&F)) {
876 PostOrderNumbers[BB] = PO++;
877 for (Instruction &I : *BB) {
878 MemoryAccess *MA = MSSA.getMemoryAccess(&I);
879 if (I.mayThrow() && !MA)
880 ThrowingBlocks.insert(I.getParent());
882 auto *MD = dyn_cast_or_null<MemoryDef>(MA);
883 if (MD && MemDefs.size() < MemorySSADefsPerBlockLimit &&
884 (getLocForWrite(&I) || isMemTerminatorInst(&I)))
885 MemDefs.push_back(MD);
889 // Treat byval or inalloca arguments the same as Allocas, stores to them are
890 // dead at the end of the function.
891 for (Argument &AI : F.args())
892 if (AI.hasPassPointeeByValueCopyAttr())
893 InvisibleToCallerAfterRet.insert({&AI, true});
895 // Collect whether there is any irreducible control flow in the function.
896 ContainsIrreducibleLoops = mayContainIrreducibleControl(F, &LI);
898 AnyUnreachableExit = any_of(PDT.roots(), [](const BasicBlock *E) {
899 return isa<UnreachableInst>(E->getTerminator());
903 LocationSize strengthenLocationSize(const Instruction *I,
904 LocationSize Size) const {
905 if (auto *CB = dyn_cast<CallBase>(I)) {
906 LibFunc F;
907 if (TLI.getLibFunc(*CB, F) && TLI.has(F) &&
908 (F == LibFunc_memset_chk || F == LibFunc_memcpy_chk)) {
909 // Use the precise location size specified by the 3rd argument
910 // for determining KillingI overwrites DeadLoc if it is a memset_chk
911 // instruction. memset_chk will write either the amount specified as 3rd
912 // argument or the function will immediately abort and exit the program.
913 // NOTE: AA may determine NoAlias if it can prove that the access size
914 // is larger than the allocation size due to that being UB. To avoid
915 // returning potentially invalid NoAlias results by AA, limit the use of
916 // the precise location size to isOverwrite.
917 if (const auto *Len = dyn_cast<ConstantInt>(CB->getArgOperand(2)))
918 return LocationSize::precise(Len->getZExtValue());
921 return Size;
924 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p
925 /// KillingI instruction) completely overwrites a store to the 'DeadLoc'
926 /// location (by \p DeadI instruction).
927 /// Return OW_MaybePartial if \p KillingI does not completely overwrite
928 /// \p DeadI, but they both write to the same underlying object. In that
929 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites
930 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the
931 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined.
932 OverwriteResult isOverwrite(const Instruction *KillingI,
933 const Instruction *DeadI,
934 const MemoryLocation &KillingLoc,
935 const MemoryLocation &DeadLoc,
936 int64_t &KillingOff, int64_t &DeadOff) {
937 // AliasAnalysis does not always account for loops. Limit overwrite checks
938 // to dependencies for which we can guarantee they are independent of any
939 // loops they are in.
940 if (!isGuaranteedLoopIndependent(DeadI, KillingI, DeadLoc))
941 return OW_Unknown;
943 LocationSize KillingLocSize =
944 strengthenLocationSize(KillingI, KillingLoc.Size);
945 const Value *DeadPtr = DeadLoc.Ptr->stripPointerCasts();
946 const Value *KillingPtr = KillingLoc.Ptr->stripPointerCasts();
947 const Value *DeadUndObj = getUnderlyingObject(DeadPtr);
948 const Value *KillingUndObj = getUnderlyingObject(KillingPtr);
950 // Check whether the killing store overwrites the whole object, in which
951 // case the size/offset of the dead store does not matter.
952 if (DeadUndObj == KillingUndObj && KillingLocSize.isPrecise() &&
953 isIdentifiedObject(KillingUndObj)) {
954 std::optional<TypeSize> KillingUndObjSize =
955 getPointerSize(KillingUndObj, DL, TLI, &F);
956 if (KillingUndObjSize && *KillingUndObjSize == KillingLocSize.getValue())
957 return OW_Complete;
960 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
961 // get imprecise values here, though (except for unknown sizes).
962 if (!KillingLocSize.isPrecise() || !DeadLoc.Size.isPrecise()) {
963 // In case no constant size is known, try to an IR values for the number
964 // of bytes written and check if they match.
965 const auto *KillingMemI = dyn_cast<MemIntrinsic>(KillingI);
966 const auto *DeadMemI = dyn_cast<MemIntrinsic>(DeadI);
967 if (KillingMemI && DeadMemI) {
968 const Value *KillingV = KillingMemI->getLength();
969 const Value *DeadV = DeadMemI->getLength();
970 if (KillingV == DeadV && BatchAA.isMustAlias(DeadLoc, KillingLoc))
971 return OW_Complete;
974 // Masked stores have imprecise locations, but we can reason about them
975 // to some extent.
976 return isMaskedStoreOverwrite(KillingI, DeadI, BatchAA);
979 const TypeSize KillingSize = KillingLocSize.getValue();
980 const TypeSize DeadSize = DeadLoc.Size.getValue();
981 // Bail on doing Size comparison which depends on AA for now
982 // TODO: Remove AnyScalable once Alias Analysis deal with scalable vectors
983 const bool AnyScalable =
984 DeadSize.isScalable() || KillingLocSize.isScalable();
986 if (AnyScalable)
987 return OW_Unknown;
988 // Query the alias information
989 AliasResult AAR = BatchAA.alias(KillingLoc, DeadLoc);
991 // If the start pointers are the same, we just have to compare sizes to see if
992 // the killing store was larger than the dead store.
993 if (AAR == AliasResult::MustAlias) {
994 // Make sure that the KillingSize size is >= the DeadSize size.
995 if (KillingSize >= DeadSize)
996 return OW_Complete;
999 // If we hit a partial alias we may have a full overwrite
1000 if (AAR == AliasResult::PartialAlias && AAR.hasOffset()) {
1001 int32_t Off = AAR.getOffset();
1002 if (Off >= 0 && (uint64_t)Off + DeadSize <= KillingSize)
1003 return OW_Complete;
1006 // If we can't resolve the same pointers to the same object, then we can't
1007 // analyze them at all.
1008 if (DeadUndObj != KillingUndObj) {
1009 // Non aliasing stores to different objects don't overlap. Note that
1010 // if the killing store is known to overwrite whole object (out of
1011 // bounds access overwrites whole object as well) then it is assumed to
1012 // completely overwrite any store to the same object even if they don't
1013 // actually alias (see next check).
1014 if (AAR == AliasResult::NoAlias)
1015 return OW_None;
1016 return OW_Unknown;
1019 // Okay, we have stores to two completely different pointers. Try to
1020 // decompose the pointer into a "base + constant_offset" form. If the base
1021 // pointers are equal, then we can reason about the two stores.
1022 DeadOff = 0;
1023 KillingOff = 0;
1024 const Value *DeadBasePtr =
1025 GetPointerBaseWithConstantOffset(DeadPtr, DeadOff, DL);
1026 const Value *KillingBasePtr =
1027 GetPointerBaseWithConstantOffset(KillingPtr, KillingOff, DL);
1029 // If the base pointers still differ, we have two completely different
1030 // stores.
1031 if (DeadBasePtr != KillingBasePtr)
1032 return OW_Unknown;
1034 // The killing access completely overlaps the dead store if and only if
1035 // both start and end of the dead one is "inside" the killing one:
1036 // |<->|--dead--|<->|
1037 // |-----killing------|
1038 // Accesses may overlap if and only if start of one of them is "inside"
1039 // another one:
1040 // |<->|--dead--|<-------->|
1041 // |-------killing--------|
1042 // OR
1043 // |-------dead-------|
1044 // |<->|---killing---|<----->|
1046 // We have to be careful here as *Off is signed while *.Size is unsigned.
1048 // Check if the dead access starts "not before" the killing one.
1049 if (DeadOff >= KillingOff) {
1050 // If the dead access ends "not after" the killing access then the
1051 // dead one is completely overwritten by the killing one.
1052 if (uint64_t(DeadOff - KillingOff) + DeadSize <= KillingSize)
1053 return OW_Complete;
1054 // If start of the dead access is "before" end of the killing access
1055 // then accesses overlap.
1056 else if ((uint64_t)(DeadOff - KillingOff) < KillingSize)
1057 return OW_MaybePartial;
1059 // If start of the killing access is "before" end of the dead access then
1060 // accesses overlap.
1061 else if ((uint64_t)(KillingOff - DeadOff) < DeadSize) {
1062 return OW_MaybePartial;
1065 // Can reach here only if accesses are known not to overlap.
1066 return OW_None;
1069 bool isInvisibleToCallerAfterRet(const Value *V) {
1070 if (isa<AllocaInst>(V))
1071 return true;
1072 auto I = InvisibleToCallerAfterRet.insert({V, false});
1073 if (I.second) {
1074 if (!isInvisibleToCallerOnUnwind(V)) {
1075 I.first->second = false;
1076 } else if (isNoAliasCall(V)) {
1077 I.first->second = !PointerMayBeCaptured(V, true, false);
1080 return I.first->second;
1083 bool isInvisibleToCallerOnUnwind(const Value *V) {
1084 bool RequiresNoCaptureBeforeUnwind;
1085 if (!isNotVisibleOnUnwind(V, RequiresNoCaptureBeforeUnwind))
1086 return false;
1087 if (!RequiresNoCaptureBeforeUnwind)
1088 return true;
1090 auto I = CapturedBeforeReturn.insert({V, true});
1091 if (I.second)
1092 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1093 // with the killing MemoryDef. But we refrain from doing so for now to
1094 // limit compile-time and this does not cause any changes to the number
1095 // of stores removed on a large test set in practice.
1096 I.first->second = PointerMayBeCaptured(V, false, true);
1097 return !I.first->second;
1100 std::optional<MemoryLocation> getLocForWrite(Instruction *I) const {
1101 if (!I->mayWriteToMemory())
1102 return std::nullopt;
1104 if (auto *CB = dyn_cast<CallBase>(I))
1105 return MemoryLocation::getForDest(CB, TLI);
1107 return MemoryLocation::getOrNone(I);
1110 /// Assuming this instruction has a dead analyzable write, can we delete
1111 /// this instruction?
1112 bool isRemovable(Instruction *I) {
1113 assert(getLocForWrite(I) && "Must have analyzable write");
1115 // Don't remove volatile/atomic stores.
1116 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1117 return SI->isUnordered();
1119 if (auto *CB = dyn_cast<CallBase>(I)) {
1120 // Don't remove volatile memory intrinsics.
1121 if (auto *MI = dyn_cast<MemIntrinsic>(CB))
1122 return !MI->isVolatile();
1124 // Never remove dead lifetime intrinsics, e.g. because they are followed
1125 // by a free.
1126 if (CB->isLifetimeStartOrEnd())
1127 return false;
1129 return CB->use_empty() && CB->willReturn() && CB->doesNotThrow() &&
1130 !CB->isTerminator();
1133 return false;
1136 /// Returns true if \p UseInst completely overwrites \p DefLoc
1137 /// (stored by \p DefInst).
1138 bool isCompleteOverwrite(const MemoryLocation &DefLoc, Instruction *DefInst,
1139 Instruction *UseInst) {
1140 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1141 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1142 // MemoryDef.
1143 if (!UseInst->mayWriteToMemory())
1144 return false;
1146 if (auto *CB = dyn_cast<CallBase>(UseInst))
1147 if (CB->onlyAccessesInaccessibleMemory())
1148 return false;
1150 int64_t InstWriteOffset, DepWriteOffset;
1151 if (auto CC = getLocForWrite(UseInst))
1152 return isOverwrite(UseInst, DefInst, *CC, DefLoc, InstWriteOffset,
1153 DepWriteOffset) == OW_Complete;
1154 return false;
1157 /// Returns true if \p Def is not read before returning from the function.
1158 bool isWriteAtEndOfFunction(MemoryDef *Def) {
1159 LLVM_DEBUG(dbgs() << " Check if def " << *Def << " ("
1160 << *Def->getMemoryInst()
1161 << ") is at the end the function \n");
1163 auto MaybeLoc = getLocForWrite(Def->getMemoryInst());
1164 if (!MaybeLoc) {
1165 LLVM_DEBUG(dbgs() << " ... could not get location for write.\n");
1166 return false;
1169 SmallVector<MemoryAccess *, 4> WorkList;
1170 SmallPtrSet<MemoryAccess *, 8> Visited;
1171 auto PushMemUses = [&WorkList, &Visited](MemoryAccess *Acc) {
1172 if (!Visited.insert(Acc).second)
1173 return;
1174 for (Use &U : Acc->uses())
1175 WorkList.push_back(cast<MemoryAccess>(U.getUser()));
1177 PushMemUses(Def);
1178 for (unsigned I = 0; I < WorkList.size(); I++) {
1179 if (WorkList.size() >= MemorySSAScanLimit) {
1180 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n");
1181 return false;
1184 MemoryAccess *UseAccess = WorkList[I];
1185 if (isa<MemoryPhi>(UseAccess)) {
1186 // AliasAnalysis does not account for loops. Limit elimination to
1187 // candidates for which we can guarantee they always store to the same
1188 // memory location.
1189 if (!isGuaranteedLoopInvariant(MaybeLoc->Ptr))
1190 return false;
1192 PushMemUses(cast<MemoryPhi>(UseAccess));
1193 continue;
1195 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1196 // of times this is called and/or caching it.
1197 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1198 if (isReadClobber(*MaybeLoc, UseInst)) {
1199 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst << ".\n");
1200 return false;
1203 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess))
1204 PushMemUses(UseDef);
1206 return true;
1209 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1210 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1211 /// indicating whether \p I is a free-like call.
1212 std::optional<std::pair<MemoryLocation, bool>>
1213 getLocForTerminator(Instruction *I) const {
1214 uint64_t Len;
1215 Value *Ptr;
1216 if (match(I, m_Intrinsic<Intrinsic::lifetime_end>(m_ConstantInt(Len),
1217 m_Value(Ptr))))
1218 return {std::make_pair(MemoryLocation(Ptr, Len), false)};
1220 if (auto *CB = dyn_cast<CallBase>(I)) {
1221 if (Value *FreedOp = getFreedOperand(CB, &TLI))
1222 return {std::make_pair(MemoryLocation::getAfter(FreedOp), true)};
1225 return std::nullopt;
1228 /// Returns true if \p I is a memory terminator instruction like
1229 /// llvm.lifetime.end or free.
1230 bool isMemTerminatorInst(Instruction *I) const {
1231 auto *CB = dyn_cast<CallBase>(I);
1232 return CB && (CB->getIntrinsicID() == Intrinsic::lifetime_end ||
1233 getFreedOperand(CB, &TLI) != nullptr);
1236 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1237 /// instruction \p AccessI.
1238 bool isMemTerminator(const MemoryLocation &Loc, Instruction *AccessI,
1239 Instruction *MaybeTerm) {
1240 std::optional<std::pair<MemoryLocation, bool>> MaybeTermLoc =
1241 getLocForTerminator(MaybeTerm);
1243 if (!MaybeTermLoc)
1244 return false;
1246 // If the terminator is a free-like call, all accesses to the underlying
1247 // object can be considered terminated.
1248 if (getUnderlyingObject(Loc.Ptr) !=
1249 getUnderlyingObject(MaybeTermLoc->first.Ptr))
1250 return false;
1252 auto TermLoc = MaybeTermLoc->first;
1253 if (MaybeTermLoc->second) {
1254 const Value *LocUO = getUnderlyingObject(Loc.Ptr);
1255 return BatchAA.isMustAlias(TermLoc.Ptr, LocUO);
1257 int64_t InstWriteOffset = 0;
1258 int64_t DepWriteOffset = 0;
1259 return isOverwrite(MaybeTerm, AccessI, TermLoc, Loc, InstWriteOffset,
1260 DepWriteOffset) == OW_Complete;
1263 // Returns true if \p Use may read from \p DefLoc.
1264 bool isReadClobber(const MemoryLocation &DefLoc, Instruction *UseInst) {
1265 if (isNoopIntrinsic(UseInst))
1266 return false;
1268 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1269 // treated as read clobber.
1270 if (auto SI = dyn_cast<StoreInst>(UseInst))
1271 return isStrongerThan(SI->getOrdering(), AtomicOrdering::Monotonic);
1273 if (!UseInst->mayReadFromMemory())
1274 return false;
1276 if (auto *CB = dyn_cast<CallBase>(UseInst))
1277 if (CB->onlyAccessesInaccessibleMemory())
1278 return false;
1280 return isRefSet(BatchAA.getModRefInfo(UseInst, DefLoc));
1283 /// Returns true if a dependency between \p Current and \p KillingDef is
1284 /// guaranteed to be loop invariant for the loops that they are in. Either
1285 /// because they are known to be in the same block, in the same loop level or
1286 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1287 /// during execution of the containing function.
1288 bool isGuaranteedLoopIndependent(const Instruction *Current,
1289 const Instruction *KillingDef,
1290 const MemoryLocation &CurrentLoc) {
1291 // If the dependency is within the same block or loop level (being careful
1292 // of irreducible loops), we know that AA will return a valid result for the
1293 // memory dependency. (Both at the function level, outside of any loop,
1294 // would also be valid but we currently disable that to limit compile time).
1295 if (Current->getParent() == KillingDef->getParent())
1296 return true;
1297 const Loop *CurrentLI = LI.getLoopFor(Current->getParent());
1298 if (!ContainsIrreducibleLoops && CurrentLI &&
1299 CurrentLI == LI.getLoopFor(KillingDef->getParent()))
1300 return true;
1301 // Otherwise check the memory location is invariant to any loops.
1302 return isGuaranteedLoopInvariant(CurrentLoc.Ptr);
1305 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1306 /// loop. In particular, this guarantees that it only references a single
1307 /// MemoryLocation during execution of the containing function.
1308 bool isGuaranteedLoopInvariant(const Value *Ptr) {
1309 Ptr = Ptr->stripPointerCasts();
1310 if (auto *GEP = dyn_cast<GEPOperator>(Ptr))
1311 if (GEP->hasAllConstantIndices())
1312 Ptr = GEP->getPointerOperand()->stripPointerCasts();
1314 if (auto *I = dyn_cast<Instruction>(Ptr)) {
1315 return I->getParent()->isEntryBlock() ||
1316 (!ContainsIrreducibleLoops && !LI.getLoopFor(I->getParent()));
1318 return true;
1321 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess,
1322 // with no read access between them or on any other path to a function exit
1323 // block if \p KillingLoc is not accessible after the function returns. If
1324 // there is no such MemoryDef, return std::nullopt. The returned value may not
1325 // (completely) overwrite \p KillingLoc. Currently we bail out when we
1326 // encounter an aliasing MemoryUse (read).
1327 std::optional<MemoryAccess *>
1328 getDomMemoryDef(MemoryDef *KillingDef, MemoryAccess *StartAccess,
1329 const MemoryLocation &KillingLoc, const Value *KillingUndObj,
1330 unsigned &ScanLimit, unsigned &WalkerStepLimit,
1331 bool IsMemTerm, unsigned &PartialLimit) {
1332 if (ScanLimit == 0 || WalkerStepLimit == 0) {
1333 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1334 return std::nullopt;
1337 MemoryAccess *Current = StartAccess;
1338 Instruction *KillingI = KillingDef->getMemoryInst();
1339 LLVM_DEBUG(dbgs() << " trying to get dominating access\n");
1341 // Only optimize defining access of KillingDef when directly starting at its
1342 // defining access. The defining access also must only access KillingLoc. At
1343 // the moment we only support instructions with a single write location, so
1344 // it should be sufficient to disable optimizations for instructions that
1345 // also read from memory.
1346 bool CanOptimize = OptimizeMemorySSA &&
1347 KillingDef->getDefiningAccess() == StartAccess &&
1348 !KillingI->mayReadFromMemory();
1350 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1351 std::optional<MemoryLocation> CurrentLoc;
1352 for (;; Current = cast<MemoryDef>(Current)->getDefiningAccess()) {
1353 LLVM_DEBUG({
1354 dbgs() << " visiting " << *Current;
1355 if (!MSSA.isLiveOnEntryDef(Current) && isa<MemoryUseOrDef>(Current))
1356 dbgs() << " (" << *cast<MemoryUseOrDef>(Current)->getMemoryInst()
1357 << ")";
1358 dbgs() << "\n";
1361 // Reached TOP.
1362 if (MSSA.isLiveOnEntryDef(Current)) {
1363 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n");
1364 if (CanOptimize && Current != KillingDef->getDefiningAccess())
1365 // The first clobbering def is... none.
1366 KillingDef->setOptimized(Current);
1367 return std::nullopt;
1370 // Cost of a step. Accesses in the same block are more likely to be valid
1371 // candidates for elimination, hence consider them cheaper.
1372 unsigned StepCost = KillingDef->getBlock() == Current->getBlock()
1373 ? MemorySSASameBBStepCost
1374 : MemorySSAOtherBBStepCost;
1375 if (WalkerStepLimit <= StepCost) {
1376 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n");
1377 return std::nullopt;
1379 WalkerStepLimit -= StepCost;
1381 // Return for MemoryPhis. They cannot be eliminated directly and the
1382 // caller is responsible for traversing them.
1383 if (isa<MemoryPhi>(Current)) {
1384 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n");
1385 return Current;
1388 // Below, check if CurrentDef is a valid candidate to be eliminated by
1389 // KillingDef. If it is not, check the next candidate.
1390 MemoryDef *CurrentDef = cast<MemoryDef>(Current);
1391 Instruction *CurrentI = CurrentDef->getMemoryInst();
1393 if (canSkipDef(CurrentDef, !isInvisibleToCallerOnUnwind(KillingUndObj))) {
1394 CanOptimize = false;
1395 continue;
1398 // Before we try to remove anything, check for any extra throwing
1399 // instructions that block us from DSEing
1400 if (mayThrowBetween(KillingI, CurrentI, KillingUndObj)) {
1401 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n");
1402 return std::nullopt;
1405 // Check for anything that looks like it will be a barrier to further
1406 // removal
1407 if (isDSEBarrier(KillingUndObj, CurrentI)) {
1408 LLVM_DEBUG(dbgs() << " ... skip, barrier\n");
1409 return std::nullopt;
1412 // If Current is known to be on path that reads DefLoc or is a read
1413 // clobber, bail out, as the path is not profitable. We skip this check
1414 // for intrinsic calls, because the code knows how to handle memcpy
1415 // intrinsics.
1416 if (!isa<IntrinsicInst>(CurrentI) && isReadClobber(KillingLoc, CurrentI))
1417 return std::nullopt;
1419 // Quick check if there are direct uses that are read-clobbers.
1420 if (any_of(Current->uses(), [this, &KillingLoc, StartAccess](Use &U) {
1421 if (auto *UseOrDef = dyn_cast<MemoryUseOrDef>(U.getUser()))
1422 return !MSSA.dominates(StartAccess, UseOrDef) &&
1423 isReadClobber(KillingLoc, UseOrDef->getMemoryInst());
1424 return false;
1425 })) {
1426 LLVM_DEBUG(dbgs() << " ... found a read clobber\n");
1427 return std::nullopt;
1430 // If Current does not have an analyzable write location or is not
1431 // removable, skip it.
1432 CurrentLoc = getLocForWrite(CurrentI);
1433 if (!CurrentLoc || !isRemovable(CurrentI)) {
1434 CanOptimize = false;
1435 continue;
1438 // AliasAnalysis does not account for loops. Limit elimination to
1439 // candidates for which we can guarantee they always store to the same
1440 // memory location and not located in different loops.
1441 if (!isGuaranteedLoopIndependent(CurrentI, KillingI, *CurrentLoc)) {
1442 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n");
1443 CanOptimize = false;
1444 continue;
1447 if (IsMemTerm) {
1448 // If the killing def is a memory terminator (e.g. lifetime.end), check
1449 // the next candidate if the current Current does not write the same
1450 // underlying object as the terminator.
1451 if (!isMemTerminator(*CurrentLoc, CurrentI, KillingI)) {
1452 CanOptimize = false;
1453 continue;
1455 } else {
1456 int64_t KillingOffset = 0;
1457 int64_t DeadOffset = 0;
1458 auto OR = isOverwrite(KillingI, CurrentI, KillingLoc, *CurrentLoc,
1459 KillingOffset, DeadOffset);
1460 if (CanOptimize) {
1461 // CurrentDef is the earliest write clobber of KillingDef. Use it as
1462 // optimized access. Do not optimize if CurrentDef is already the
1463 // defining access of KillingDef.
1464 if (CurrentDef != KillingDef->getDefiningAccess() &&
1465 (OR == OW_Complete || OR == OW_MaybePartial))
1466 KillingDef->setOptimized(CurrentDef);
1468 // Once a may-aliasing def is encountered do not set an optimized
1469 // access.
1470 if (OR != OW_None)
1471 CanOptimize = false;
1474 // If Current does not write to the same object as KillingDef, check
1475 // the next candidate.
1476 if (OR == OW_Unknown || OR == OW_None)
1477 continue;
1478 else if (OR == OW_MaybePartial) {
1479 // If KillingDef only partially overwrites Current, check the next
1480 // candidate if the partial step limit is exceeded. This aggressively
1481 // limits the number of candidates for partial store elimination,
1482 // which are less likely to be removable in the end.
1483 if (PartialLimit <= 1) {
1484 WalkerStepLimit -= 1;
1485 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n");
1486 continue;
1488 PartialLimit -= 1;
1491 break;
1494 // Accesses to objects accessible after the function returns can only be
1495 // eliminated if the access is dead along all paths to the exit. Collect
1496 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1497 // they cover all paths from MaybeDeadAccess to any function exit.
1498 SmallPtrSet<Instruction *, 16> KillingDefs;
1499 KillingDefs.insert(KillingDef->getMemoryInst());
1500 MemoryAccess *MaybeDeadAccess = Current;
1501 MemoryLocation MaybeDeadLoc = *CurrentLoc;
1502 Instruction *MaybeDeadI = cast<MemoryDef>(MaybeDeadAccess)->getMemoryInst();
1503 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess << " ("
1504 << *MaybeDeadI << ")\n");
1506 SmallSetVector<MemoryAccess *, 32> WorkList;
1507 auto PushMemUses = [&WorkList](MemoryAccess *Acc) {
1508 for (Use &U : Acc->uses())
1509 WorkList.insert(cast<MemoryAccess>(U.getUser()));
1511 PushMemUses(MaybeDeadAccess);
1513 // Check if DeadDef may be read.
1514 for (unsigned I = 0; I < WorkList.size(); I++) {
1515 MemoryAccess *UseAccess = WorkList[I];
1517 LLVM_DEBUG(dbgs() << " " << *UseAccess);
1518 // Bail out if the number of accesses to check exceeds the scan limit.
1519 if (ScanLimit < (WorkList.size() - I)) {
1520 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1521 return std::nullopt;
1523 --ScanLimit;
1524 NumDomMemDefChecks++;
1526 if (isa<MemoryPhi>(UseAccess)) {
1527 if (any_of(KillingDefs, [this, UseAccess](Instruction *KI) {
1528 return DT.properlyDominates(KI->getParent(),
1529 UseAccess->getBlock());
1530 })) {
1531 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1532 continue;
1534 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n");
1535 PushMemUses(UseAccess);
1536 continue;
1539 Instruction *UseInst = cast<MemoryUseOrDef>(UseAccess)->getMemoryInst();
1540 LLVM_DEBUG(dbgs() << " (" << *UseInst << ")\n");
1542 if (any_of(KillingDefs, [this, UseInst](Instruction *KI) {
1543 return DT.dominates(KI, UseInst);
1544 })) {
1545 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1546 continue;
1549 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1550 // MemoryAccesses. We do not have to check it's users.
1551 if (isMemTerminator(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1552 LLVM_DEBUG(
1553 dbgs()
1554 << " ... skipping, memterminator invalidates following accesses\n");
1555 continue;
1558 if (isNoopIntrinsic(cast<MemoryUseOrDef>(UseAccess)->getMemoryInst())) {
1559 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n");
1560 PushMemUses(UseAccess);
1561 continue;
1564 if (UseInst->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj)) {
1565 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n");
1566 return std::nullopt;
1569 // Uses which may read the original MemoryDef mean we cannot eliminate the
1570 // original MD. Stop walk.
1571 if (isReadClobber(MaybeDeadLoc, UseInst)) {
1572 LLVM_DEBUG(dbgs() << " ... found read clobber\n");
1573 return std::nullopt;
1576 // If this worklist walks back to the original memory access (and the
1577 // pointer is not guarenteed loop invariant) then we cannot assume that a
1578 // store kills itself.
1579 if (MaybeDeadAccess == UseAccess &&
1580 !isGuaranteedLoopInvariant(MaybeDeadLoc.Ptr)) {
1581 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n");
1582 return std::nullopt;
1584 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
1585 // if it reads the memory location.
1586 // TODO: It would probably be better to check for self-reads before
1587 // calling the function.
1588 if (KillingDef == UseAccess || MaybeDeadAccess == UseAccess) {
1589 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n");
1590 continue;
1593 // Check all uses for MemoryDefs, except for defs completely overwriting
1594 // the original location. Otherwise we have to check uses of *all*
1595 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1596 // miss cases like the following
1597 // 1 = Def(LoE) ; <----- DeadDef stores [0,1]
1598 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1599 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1600 // (The Use points to the *first* Def it may alias)
1601 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1602 // stores [0,1]
1603 if (MemoryDef *UseDef = dyn_cast<MemoryDef>(UseAccess)) {
1604 if (isCompleteOverwrite(MaybeDeadLoc, MaybeDeadI, UseInst)) {
1605 BasicBlock *MaybeKillingBlock = UseInst->getParent();
1606 if (PostOrderNumbers.find(MaybeKillingBlock)->second <
1607 PostOrderNumbers.find(MaybeDeadAccess->getBlock())->second) {
1608 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1609 LLVM_DEBUG(dbgs()
1610 << " ... found killing def " << *UseInst << "\n");
1611 KillingDefs.insert(UseInst);
1613 } else {
1614 LLVM_DEBUG(dbgs()
1615 << " ... found preceeding def " << *UseInst << "\n");
1616 return std::nullopt;
1618 } else
1619 PushMemUses(UseDef);
1623 // For accesses to locations visible after the function returns, make sure
1624 // that the location is dead (=overwritten) along all paths from
1625 // MaybeDeadAccess to the exit.
1626 if (!isInvisibleToCallerAfterRet(KillingUndObj)) {
1627 SmallPtrSet<BasicBlock *, 16> KillingBlocks;
1628 for (Instruction *KD : KillingDefs)
1629 KillingBlocks.insert(KD->getParent());
1630 assert(!KillingBlocks.empty() &&
1631 "Expected at least a single killing block");
1633 // Find the common post-dominator of all killing blocks.
1634 BasicBlock *CommonPred = *KillingBlocks.begin();
1635 for (BasicBlock *BB : llvm::drop_begin(KillingBlocks)) {
1636 if (!CommonPred)
1637 break;
1638 CommonPred = PDT.findNearestCommonDominator(CommonPred, BB);
1641 // If the common post-dominator does not post-dominate MaybeDeadAccess,
1642 // there is a path from MaybeDeadAccess to an exit not going through a
1643 // killing block.
1644 if (!PDT.dominates(CommonPred, MaybeDeadAccess->getBlock())) {
1645 if (!AnyUnreachableExit)
1646 return std::nullopt;
1648 // Fall back to CFG scan starting at all non-unreachable roots if not
1649 // all paths to the exit go through CommonPred.
1650 CommonPred = nullptr;
1653 // If CommonPred itself is in the set of killing blocks, we're done.
1654 if (KillingBlocks.count(CommonPred))
1655 return {MaybeDeadAccess};
1657 SetVector<BasicBlock *> WorkList;
1658 // If CommonPred is null, there are multiple exits from the function.
1659 // They all have to be added to the worklist.
1660 if (CommonPred)
1661 WorkList.insert(CommonPred);
1662 else
1663 for (BasicBlock *R : PDT.roots()) {
1664 if (!isa<UnreachableInst>(R->getTerminator()))
1665 WorkList.insert(R);
1668 NumCFGTries++;
1669 // Check if all paths starting from an exit node go through one of the
1670 // killing blocks before reaching MaybeDeadAccess.
1671 for (unsigned I = 0; I < WorkList.size(); I++) {
1672 NumCFGChecks++;
1673 BasicBlock *Current = WorkList[I];
1674 if (KillingBlocks.count(Current))
1675 continue;
1676 if (Current == MaybeDeadAccess->getBlock())
1677 return std::nullopt;
1679 // MaybeDeadAccess is reachable from the entry, so we don't have to
1680 // explore unreachable blocks further.
1681 if (!DT.isReachableFromEntry(Current))
1682 continue;
1684 for (BasicBlock *Pred : predecessors(Current))
1685 WorkList.insert(Pred);
1687 if (WorkList.size() >= MemorySSAPathCheckLimit)
1688 return std::nullopt;
1690 NumCFGSuccess++;
1693 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
1694 // potentially dead.
1695 return {MaybeDeadAccess};
1698 /// Delete dead memory defs and recursively add their operands to ToRemove if
1699 /// they became dead.
1700 void deleteDeadInstruction(Instruction *SI) {
1701 MemorySSAUpdater Updater(&MSSA);
1702 SmallVector<Instruction *, 32> NowDeadInsts;
1703 NowDeadInsts.push_back(SI);
1704 --NumFastOther;
1706 while (!NowDeadInsts.empty()) {
1707 Instruction *DeadInst = NowDeadInsts.pop_back_val();
1708 ++NumFastOther;
1710 // Try to preserve debug information attached to the dead instruction.
1711 salvageDebugInfo(*DeadInst);
1712 salvageKnowledge(DeadInst);
1714 // Remove the Instruction from MSSA.
1715 MemoryAccess *MA = MSSA.getMemoryAccess(DeadInst);
1716 bool IsMemDef = MA && isa<MemoryDef>(MA);
1717 if (MA) {
1718 if (IsMemDef) {
1719 auto *MD = cast<MemoryDef>(MA);
1720 SkipStores.insert(MD);
1721 if (auto *SI = dyn_cast<StoreInst>(MD->getMemoryInst())) {
1722 if (SI->getValueOperand()->getType()->isPointerTy()) {
1723 const Value *UO = getUnderlyingObject(SI->getValueOperand());
1724 if (CapturedBeforeReturn.erase(UO))
1725 ShouldIterateEndOfFunctionDSE = true;
1726 InvisibleToCallerAfterRet.erase(UO);
1731 Updater.removeMemoryAccess(MA);
1734 auto I = IOLs.find(DeadInst->getParent());
1735 if (I != IOLs.end())
1736 I->second.erase(DeadInst);
1737 // Remove its operands
1738 for (Use &O : DeadInst->operands())
1739 if (Instruction *OpI = dyn_cast<Instruction>(O)) {
1740 O.set(PoisonValue::get(O->getType()));
1741 if (isInstructionTriviallyDead(OpI, &TLI))
1742 NowDeadInsts.push_back(OpI);
1745 EI.removeInstruction(DeadInst);
1746 // Remove memory defs directly if they don't produce results, but only
1747 // queue other dead instructions for later removal. They may have been
1748 // used as memory locations that have been cached by BatchAA. Removing
1749 // them here may lead to newly created instructions to be allocated at the
1750 // same address, yielding stale cache entries.
1751 if (IsMemDef && DeadInst->getType()->isVoidTy())
1752 DeadInst->eraseFromParent();
1753 else
1754 ToRemove.push_back(DeadInst);
1758 // Check for any extra throws between \p KillingI and \p DeadI that block
1759 // DSE. This only checks extra maythrows (those that aren't MemoryDef's).
1760 // MemoryDef that may throw are handled during the walk from one def to the
1761 // next.
1762 bool mayThrowBetween(Instruction *KillingI, Instruction *DeadI,
1763 const Value *KillingUndObj) {
1764 // First see if we can ignore it by using the fact that KillingI is an
1765 // alloca/alloca like object that is not visible to the caller during
1766 // execution of the function.
1767 if (KillingUndObj && isInvisibleToCallerOnUnwind(KillingUndObj))
1768 return false;
1770 if (KillingI->getParent() == DeadI->getParent())
1771 return ThrowingBlocks.count(KillingI->getParent());
1772 return !ThrowingBlocks.empty();
1775 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
1776 // instructions act as barriers:
1777 // * A memory instruction that may throw and \p KillingI accesses a non-stack
1778 // object.
1779 // * Atomic stores stronger that monotonic.
1780 bool isDSEBarrier(const Value *KillingUndObj, Instruction *DeadI) {
1781 // If DeadI may throw it acts as a barrier, unless we are to an
1782 // alloca/alloca like object that does not escape.
1783 if (DeadI->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj))
1784 return true;
1786 // If DeadI is an atomic load/store stronger than monotonic, do not try to
1787 // eliminate/reorder it.
1788 if (DeadI->isAtomic()) {
1789 if (auto *LI = dyn_cast<LoadInst>(DeadI))
1790 return isStrongerThanMonotonic(LI->getOrdering());
1791 if (auto *SI = dyn_cast<StoreInst>(DeadI))
1792 return isStrongerThanMonotonic(SI->getOrdering());
1793 if (auto *ARMW = dyn_cast<AtomicRMWInst>(DeadI))
1794 return isStrongerThanMonotonic(ARMW->getOrdering());
1795 if (auto *CmpXchg = dyn_cast<AtomicCmpXchgInst>(DeadI))
1796 return isStrongerThanMonotonic(CmpXchg->getSuccessOrdering()) ||
1797 isStrongerThanMonotonic(CmpXchg->getFailureOrdering());
1798 llvm_unreachable("other instructions should be skipped in MemorySSA");
1800 return false;
1803 /// Eliminate writes to objects that are not visible in the caller and are not
1804 /// accessed before returning from the function.
1805 bool eliminateDeadWritesAtEndOfFunction() {
1806 bool MadeChange = false;
1807 LLVM_DEBUG(
1808 dbgs()
1809 << "Trying to eliminate MemoryDefs at the end of the function\n");
1810 do {
1811 ShouldIterateEndOfFunctionDSE = false;
1812 for (MemoryDef *Def : llvm::reverse(MemDefs)) {
1813 if (SkipStores.contains(Def))
1814 continue;
1816 Instruction *DefI = Def->getMemoryInst();
1817 auto DefLoc = getLocForWrite(DefI);
1818 if (!DefLoc || !isRemovable(DefI))
1819 continue;
1821 // NOTE: Currently eliminating writes at the end of a function is
1822 // limited to MemoryDefs with a single underlying object, to save
1823 // compile-time. In practice it appears the case with multiple
1824 // underlying objects is very uncommon. If it turns out to be important,
1825 // we can use getUnderlyingObjects here instead.
1826 const Value *UO = getUnderlyingObject(DefLoc->Ptr);
1827 if (!isInvisibleToCallerAfterRet(UO))
1828 continue;
1830 if (isWriteAtEndOfFunction(Def)) {
1831 // See through pointer-to-pointer bitcasts
1832 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
1833 "of the function\n");
1834 deleteDeadInstruction(DefI);
1835 ++NumFastStores;
1836 MadeChange = true;
1839 } while (ShouldIterateEndOfFunctionDSE);
1840 return MadeChange;
1843 /// If we have a zero initializing memset following a call to malloc,
1844 /// try folding it into a call to calloc.
1845 bool tryFoldIntoCalloc(MemoryDef *Def, const Value *DefUO) {
1846 Instruction *DefI = Def->getMemoryInst();
1847 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
1848 if (!MemSet)
1849 // TODO: Could handle zero store to small allocation as well.
1850 return false;
1851 Constant *StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1852 if (!StoredConstant || !StoredConstant->isNullValue())
1853 return false;
1855 if (!isRemovable(DefI))
1856 // The memset might be volatile..
1857 return false;
1859 if (F.hasFnAttribute(Attribute::SanitizeMemory) ||
1860 F.hasFnAttribute(Attribute::SanitizeAddress) ||
1861 F.hasFnAttribute(Attribute::SanitizeHWAddress) ||
1862 F.getName() == "calloc")
1863 return false;
1864 auto *Malloc = const_cast<CallInst *>(dyn_cast<CallInst>(DefUO));
1865 if (!Malloc)
1866 return false;
1867 auto *InnerCallee = Malloc->getCalledFunction();
1868 if (!InnerCallee)
1869 return false;
1870 LibFunc Func;
1871 if (!TLI.getLibFunc(*InnerCallee, Func) || !TLI.has(Func) ||
1872 Func != LibFunc_malloc)
1873 return false;
1874 // Gracefully handle malloc with unexpected memory attributes.
1875 auto *MallocDef = dyn_cast_or_null<MemoryDef>(MSSA.getMemoryAccess(Malloc));
1876 if (!MallocDef)
1877 return false;
1879 auto shouldCreateCalloc = [](CallInst *Malloc, CallInst *Memset) {
1880 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
1881 // of malloc block
1882 auto *MallocBB = Malloc->getParent(),
1883 *MemsetBB = Memset->getParent();
1884 if (MallocBB == MemsetBB)
1885 return true;
1886 auto *Ptr = Memset->getArgOperand(0);
1887 auto *TI = MallocBB->getTerminator();
1888 ICmpInst::Predicate Pred;
1889 BasicBlock *TrueBB, *FalseBB;
1890 if (!match(TI, m_Br(m_ICmp(Pred, m_Specific(Ptr), m_Zero()), TrueBB,
1891 FalseBB)))
1892 return false;
1893 if (Pred != ICmpInst::ICMP_EQ || MemsetBB != FalseBB)
1894 return false;
1895 return true;
1898 if (Malloc->getOperand(0) != MemSet->getLength())
1899 return false;
1900 if (!shouldCreateCalloc(Malloc, MemSet) ||
1901 !DT.dominates(Malloc, MemSet) ||
1902 !memoryIsNotModifiedBetween(Malloc, MemSet, BatchAA, DL, &DT))
1903 return false;
1904 IRBuilder<> IRB(Malloc);
1905 Type *SizeTTy = Malloc->getArgOperand(0)->getType();
1906 auto *Calloc = emitCalloc(ConstantInt::get(SizeTTy, 1),
1907 Malloc->getArgOperand(0), IRB, TLI);
1908 if (!Calloc)
1909 return false;
1911 MemorySSAUpdater Updater(&MSSA);
1912 auto *NewAccess =
1913 Updater.createMemoryAccessAfter(cast<Instruction>(Calloc), nullptr,
1914 MallocDef);
1915 auto *NewAccessMD = cast<MemoryDef>(NewAccess);
1916 Updater.insertDef(NewAccessMD, /*RenameUses=*/true);
1917 Malloc->replaceAllUsesWith(Calloc);
1918 deleteDeadInstruction(Malloc);
1919 return true;
1922 /// \returns true if \p Def is a no-op store, either because it
1923 /// directly stores back a loaded value or stores zero to a calloced object.
1924 bool storeIsNoop(MemoryDef *Def, const Value *DefUO) {
1925 Instruction *DefI = Def->getMemoryInst();
1926 StoreInst *Store = dyn_cast<StoreInst>(DefI);
1927 MemSetInst *MemSet = dyn_cast<MemSetInst>(DefI);
1928 Constant *StoredConstant = nullptr;
1929 if (Store)
1930 StoredConstant = dyn_cast<Constant>(Store->getOperand(0));
1931 else if (MemSet)
1932 StoredConstant = dyn_cast<Constant>(MemSet->getValue());
1933 else
1934 return false;
1936 if (!isRemovable(DefI))
1937 return false;
1939 if (StoredConstant) {
1940 Constant *InitC =
1941 getInitialValueOfAllocation(DefUO, &TLI, StoredConstant->getType());
1942 // If the clobbering access is LiveOnEntry, no instructions between them
1943 // can modify the memory location.
1944 if (InitC && InitC == StoredConstant)
1945 return MSSA.isLiveOnEntryDef(
1946 MSSA.getSkipSelfWalker()->getClobberingMemoryAccess(Def, BatchAA));
1949 if (!Store)
1950 return false;
1952 if (auto *LoadI = dyn_cast<LoadInst>(Store->getOperand(0))) {
1953 if (LoadI->getPointerOperand() == Store->getOperand(1)) {
1954 // Get the defining access for the load.
1955 auto *LoadAccess = MSSA.getMemoryAccess(LoadI)->getDefiningAccess();
1956 // Fast path: the defining accesses are the same.
1957 if (LoadAccess == Def->getDefiningAccess())
1958 return true;
1960 // Look through phi accesses. Recursively scan all phi accesses by
1961 // adding them to a worklist. Bail when we run into a memory def that
1962 // does not match LoadAccess.
1963 SetVector<MemoryAccess *> ToCheck;
1964 MemoryAccess *Current =
1965 MSSA.getWalker()->getClobberingMemoryAccess(Def, BatchAA);
1966 // We don't want to bail when we run into the store memory def. But,
1967 // the phi access may point to it. So, pretend like we've already
1968 // checked it.
1969 ToCheck.insert(Def);
1970 ToCheck.insert(Current);
1971 // Start at current (1) to simulate already having checked Def.
1972 for (unsigned I = 1; I < ToCheck.size(); ++I) {
1973 Current = ToCheck[I];
1974 if (auto PhiAccess = dyn_cast<MemoryPhi>(Current)) {
1975 // Check all the operands.
1976 for (auto &Use : PhiAccess->incoming_values())
1977 ToCheck.insert(cast<MemoryAccess>(&Use));
1978 continue;
1981 // If we found a memory def, bail. This happens when we have an
1982 // unrelated write in between an otherwise noop store.
1983 assert(isa<MemoryDef>(Current) &&
1984 "Only MemoryDefs should reach here.");
1985 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
1986 // We are searching for the definition of the store's destination.
1987 // So, if that is the same definition as the load, then this is a
1988 // noop. Otherwise, fail.
1989 if (LoadAccess != Current)
1990 return false;
1992 return true;
1996 return false;
1999 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy &IOL) {
2000 bool Changed = false;
2001 for (auto OI : IOL) {
2002 Instruction *DeadI = OI.first;
2003 MemoryLocation Loc = *getLocForWrite(DeadI);
2004 assert(isRemovable(DeadI) && "Expect only removable instruction");
2006 const Value *Ptr = Loc.Ptr->stripPointerCasts();
2007 int64_t DeadStart = 0;
2008 uint64_t DeadSize = Loc.Size.getValue();
2009 GetPointerBaseWithConstantOffset(Ptr, DeadStart, DL);
2010 OverlapIntervalsTy &IntervalMap = OI.second;
2011 Changed |= tryToShortenEnd(DeadI, IntervalMap, DeadStart, DeadSize);
2012 if (IntervalMap.empty())
2013 continue;
2014 Changed |= tryToShortenBegin(DeadI, IntervalMap, DeadStart, DeadSize);
2016 return Changed;
2019 /// Eliminates writes to locations where the value that is being written
2020 /// is already stored at the same location.
2021 bool eliminateRedundantStoresOfExistingValues() {
2022 bool MadeChange = false;
2023 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
2024 "already existing value\n");
2025 for (auto *Def : MemDefs) {
2026 if (SkipStores.contains(Def) || MSSA.isLiveOnEntryDef(Def))
2027 continue;
2029 Instruction *DefInst = Def->getMemoryInst();
2030 auto MaybeDefLoc = getLocForWrite(DefInst);
2031 if (!MaybeDefLoc || !isRemovable(DefInst))
2032 continue;
2034 MemoryDef *UpperDef;
2035 // To conserve compile-time, we avoid walking to the next clobbering def.
2036 // Instead, we just try to get the optimized access, if it exists. DSE
2037 // will try to optimize defs during the earlier traversal.
2038 if (Def->isOptimized())
2039 UpperDef = dyn_cast<MemoryDef>(Def->getOptimized());
2040 else
2041 UpperDef = dyn_cast<MemoryDef>(Def->getDefiningAccess());
2042 if (!UpperDef || MSSA.isLiveOnEntryDef(UpperDef))
2043 continue;
2045 Instruction *UpperInst = UpperDef->getMemoryInst();
2046 auto IsRedundantStore = [&]() {
2047 if (DefInst->isIdenticalTo(UpperInst))
2048 return true;
2049 if (auto *MemSetI = dyn_cast<MemSetInst>(UpperInst)) {
2050 if (auto *SI = dyn_cast<StoreInst>(DefInst)) {
2051 // MemSetInst must have a write location.
2052 MemoryLocation UpperLoc = *getLocForWrite(UpperInst);
2053 int64_t InstWriteOffset = 0;
2054 int64_t DepWriteOffset = 0;
2055 auto OR = isOverwrite(UpperInst, DefInst, UpperLoc, *MaybeDefLoc,
2056 InstWriteOffset, DepWriteOffset);
2057 Value *StoredByte = isBytewiseValue(SI->getValueOperand(), DL);
2058 return StoredByte && StoredByte == MemSetI->getOperand(1) &&
2059 OR == OW_Complete;
2062 return false;
2065 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc, DefInst))
2066 continue;
2067 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst
2068 << '\n');
2069 deleteDeadInstruction(DefInst);
2070 NumRedundantStores++;
2071 MadeChange = true;
2073 return MadeChange;
2077 static bool eliminateDeadStores(Function &F, AliasAnalysis &AA, MemorySSA &MSSA,
2078 DominatorTree &DT, PostDominatorTree &PDT,
2079 const TargetLibraryInfo &TLI,
2080 const LoopInfo &LI) {
2081 bool MadeChange = false;
2083 DSEState State(F, AA, MSSA, DT, PDT, TLI, LI);
2084 // For each store:
2085 for (unsigned I = 0; I < State.MemDefs.size(); I++) {
2086 MemoryDef *KillingDef = State.MemDefs[I];
2087 if (State.SkipStores.count(KillingDef))
2088 continue;
2089 Instruction *KillingI = KillingDef->getMemoryInst();
2091 std::optional<MemoryLocation> MaybeKillingLoc;
2092 if (State.isMemTerminatorInst(KillingI)) {
2093 if (auto KillingLoc = State.getLocForTerminator(KillingI))
2094 MaybeKillingLoc = KillingLoc->first;
2095 } else {
2096 MaybeKillingLoc = State.getLocForWrite(KillingI);
2099 if (!MaybeKillingLoc) {
2100 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2101 << *KillingI << "\n");
2102 continue;
2104 MemoryLocation KillingLoc = *MaybeKillingLoc;
2105 assert(KillingLoc.Ptr && "KillingLoc should not be null");
2106 const Value *KillingUndObj = getUnderlyingObject(KillingLoc.Ptr);
2107 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2108 << *KillingDef << " (" << *KillingI << ")\n");
2110 unsigned ScanLimit = MemorySSAScanLimit;
2111 unsigned WalkerStepLimit = MemorySSAUpwardsStepLimit;
2112 unsigned PartialLimit = MemorySSAPartialStoreLimit;
2113 // Worklist of MemoryAccesses that may be killed by KillingDef.
2114 SetVector<MemoryAccess *> ToCheck;
2115 ToCheck.insert(KillingDef->getDefiningAccess());
2117 bool Shortend = false;
2118 bool IsMemTerm = State.isMemTerminatorInst(KillingI);
2119 // Check if MemoryAccesses in the worklist are killed by KillingDef.
2120 for (unsigned I = 0; I < ToCheck.size(); I++) {
2121 MemoryAccess *Current = ToCheck[I];
2122 if (State.SkipStores.count(Current))
2123 continue;
2125 std::optional<MemoryAccess *> MaybeDeadAccess = State.getDomMemoryDef(
2126 KillingDef, Current, KillingLoc, KillingUndObj, ScanLimit,
2127 WalkerStepLimit, IsMemTerm, PartialLimit);
2129 if (!MaybeDeadAccess) {
2130 LLVM_DEBUG(dbgs() << " finished walk\n");
2131 continue;
2134 MemoryAccess *DeadAccess = *MaybeDeadAccess;
2135 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess);
2136 if (isa<MemoryPhi>(DeadAccess)) {
2137 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n");
2138 for (Value *V : cast<MemoryPhi>(DeadAccess)->incoming_values()) {
2139 MemoryAccess *IncomingAccess = cast<MemoryAccess>(V);
2140 BasicBlock *IncomingBlock = IncomingAccess->getBlock();
2141 BasicBlock *PhiBlock = DeadAccess->getBlock();
2143 // We only consider incoming MemoryAccesses that come before the
2144 // MemoryPhi. Otherwise we could discover candidates that do not
2145 // strictly dominate our starting def.
2146 if (State.PostOrderNumbers[IncomingBlock] >
2147 State.PostOrderNumbers[PhiBlock])
2148 ToCheck.insert(IncomingAccess);
2150 continue;
2152 auto *DeadDefAccess = cast<MemoryDef>(DeadAccess);
2153 Instruction *DeadI = DeadDefAccess->getMemoryInst();
2154 LLVM_DEBUG(dbgs() << " (" << *DeadI << ")\n");
2155 ToCheck.insert(DeadDefAccess->getDefiningAccess());
2156 NumGetDomMemoryDefPassed++;
2158 if (!DebugCounter::shouldExecute(MemorySSACounter))
2159 continue;
2161 MemoryLocation DeadLoc = *State.getLocForWrite(DeadI);
2163 if (IsMemTerm) {
2164 const Value *DeadUndObj = getUnderlyingObject(DeadLoc.Ptr);
2165 if (KillingUndObj != DeadUndObj)
2166 continue;
2167 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI
2168 << "\n KILLER: " << *KillingI << '\n');
2169 State.deleteDeadInstruction(DeadI);
2170 ++NumFastStores;
2171 MadeChange = true;
2172 } else {
2173 // Check if DeadI overwrites KillingI.
2174 int64_t KillingOffset = 0;
2175 int64_t DeadOffset = 0;
2176 OverwriteResult OR = State.isOverwrite(
2177 KillingI, DeadI, KillingLoc, DeadLoc, KillingOffset, DeadOffset);
2178 if (OR == OW_MaybePartial) {
2179 auto Iter = State.IOLs.insert(
2180 std::make_pair<BasicBlock *, InstOverlapIntervalsTy>(
2181 DeadI->getParent(), InstOverlapIntervalsTy()));
2182 auto &IOL = Iter.first->second;
2183 OR = isPartialOverwrite(KillingLoc, DeadLoc, KillingOffset,
2184 DeadOffset, DeadI, IOL);
2187 if (EnablePartialStoreMerging && OR == OW_PartialEarlierWithFullLater) {
2188 auto *DeadSI = dyn_cast<StoreInst>(DeadI);
2189 auto *KillingSI = dyn_cast<StoreInst>(KillingI);
2190 // We are re-using tryToMergePartialOverlappingStores, which requires
2191 // DeadSI to dominate KillingSI.
2192 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2193 if (DeadSI && KillingSI && DT.dominates(DeadSI, KillingSI)) {
2194 if (Constant *Merged = tryToMergePartialOverlappingStores(
2195 KillingSI, DeadSI, KillingOffset, DeadOffset, State.DL,
2196 State.BatchAA, &DT)) {
2198 // Update stored value of earlier store to merged constant.
2199 DeadSI->setOperand(0, Merged);
2200 ++NumModifiedStores;
2201 MadeChange = true;
2203 Shortend = true;
2204 // Remove killing store and remove any outstanding overlap
2205 // intervals for the updated store.
2206 State.deleteDeadInstruction(KillingSI);
2207 auto I = State.IOLs.find(DeadSI->getParent());
2208 if (I != State.IOLs.end())
2209 I->second.erase(DeadSI);
2210 break;
2215 if (OR == OW_Complete) {
2216 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DeadI
2217 << "\n KILLER: " << *KillingI << '\n');
2218 State.deleteDeadInstruction(DeadI);
2219 ++NumFastStores;
2220 MadeChange = true;
2225 // Check if the store is a no-op.
2226 if (!Shortend && State.storeIsNoop(KillingDef, KillingUndObj)) {
2227 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *KillingI
2228 << '\n');
2229 State.deleteDeadInstruction(KillingI);
2230 NumRedundantStores++;
2231 MadeChange = true;
2232 continue;
2235 // Can we form a calloc from a memset/malloc pair?
2236 if (!Shortend && State.tryFoldIntoCalloc(KillingDef, KillingUndObj)) {
2237 LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n"
2238 << " DEAD: " << *KillingI << '\n');
2239 State.deleteDeadInstruction(KillingI);
2240 MadeChange = true;
2241 continue;
2245 if (EnablePartialOverwriteTracking)
2246 for (auto &KV : State.IOLs)
2247 MadeChange |= State.removePartiallyOverlappedStores(KV.second);
2249 MadeChange |= State.eliminateRedundantStoresOfExistingValues();
2250 MadeChange |= State.eliminateDeadWritesAtEndOfFunction();
2252 while (!State.ToRemove.empty()) {
2253 Instruction *DeadInst = State.ToRemove.pop_back_val();
2254 DeadInst->eraseFromParent();
2257 return MadeChange;
2259 } // end anonymous namespace
2261 //===----------------------------------------------------------------------===//
2262 // DSE Pass
2263 //===----------------------------------------------------------------------===//
2264 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
2265 AliasAnalysis &AA = AM.getResult<AAManager>(F);
2266 const TargetLibraryInfo &TLI = AM.getResult<TargetLibraryAnalysis>(F);
2267 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
2268 MemorySSA &MSSA = AM.getResult<MemorySSAAnalysis>(F).getMSSA();
2269 PostDominatorTree &PDT = AM.getResult<PostDominatorTreeAnalysis>(F);
2270 LoopInfo &LI = AM.getResult<LoopAnalysis>(F);
2272 bool Changed = eliminateDeadStores(F, AA, MSSA, DT, PDT, TLI, LI);
2274 #ifdef LLVM_ENABLE_STATS
2275 if (AreStatisticsEnabled())
2276 for (auto &I : instructions(F))
2277 NumRemainingStores += isa<StoreInst>(&I);
2278 #endif
2280 if (!Changed)
2281 return PreservedAnalyses::all();
2283 PreservedAnalyses PA;
2284 PA.preserveSet<CFGAnalyses>();
2285 PA.preserve<MemorySSAAnalysis>();
2286 PA.preserve<LoopAnalysis>();
2287 return PA;