[llvm-exegesis] [NFC] Fixing typo.
[llvm-complete.git] / lib / Transforms / Scalar / DeadStoreElimination.cpp
blob863ee834e57dae815a92059f6e7cdaf2f8d1851a
1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements a trivial dead store elimination that only considers
10 // basic-block local redundant stores.
12 // FIXME: This should eventually be extended to be a post-dominator tree
13 // traversal. Doing so would be pretty trivial.
15 //===----------------------------------------------------------------------===//
17 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
18 #include "llvm/ADT/APInt.h"
19 #include "llvm/ADT/DenseMap.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/SmallPtrSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/ADT/StringRef.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/CaptureTracking.h"
27 #include "llvm/Analysis/GlobalsModRef.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
30 #include "llvm/Analysis/MemoryLocation.h"
31 #include "llvm/Analysis/TargetLibraryInfo.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Argument.h"
35 #include "llvm/IR/BasicBlock.h"
36 #include "llvm/IR/CallSite.h"
37 #include "llvm/IR/Constant.h"
38 #include "llvm/IR/Constants.h"
39 #include "llvm/IR/DataLayout.h"
40 #include "llvm/IR/Dominators.h"
41 #include "llvm/IR/Function.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/LLVMContext.h"
48 #include "llvm/IR/Module.h"
49 #include "llvm/IR/PassManager.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/ErrorHandling.h"
56 #include "llvm/Support/MathExtras.h"
57 #include "llvm/Support/raw_ostream.h"
58 #include "llvm/Transforms/Scalar.h"
59 #include <algorithm>
60 #include <cassert>
61 #include <cstddef>
62 #include <cstdint>
63 #include <iterator>
64 #include <map>
65 #include <utility>
67 using namespace llvm;
69 #define DEBUG_TYPE "dse"
71 STATISTIC(NumRedundantStores, "Number of redundant stores deleted");
72 STATISTIC(NumFastStores, "Number of stores deleted");
73 STATISTIC(NumFastOther, "Number of other instrs removed");
74 STATISTIC(NumCompletePartials, "Number of stores dead by later partials");
75 STATISTIC(NumModifiedStores, "Number of stores modified");
77 static cl::opt<bool>
78 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
79 cl::init(true), cl::Hidden,
80 cl::desc("Enable partial-overwrite tracking in DSE"));
82 static cl::opt<bool>
83 EnablePartialStoreMerging("enable-dse-partial-store-merging",
84 cl::init(true), cl::Hidden,
85 cl::desc("Enable partial store merging in DSE"));
87 //===----------------------------------------------------------------------===//
88 // Helper functions
89 //===----------------------------------------------------------------------===//
90 using OverlapIntervalsTy = std::map<int64_t, int64_t>;
91 using InstOverlapIntervalsTy = DenseMap<Instruction *, OverlapIntervalsTy>;
93 /// Delete this instruction. Before we do, go through and zero out all the
94 /// operands of this instruction. If any of them become dead, delete them and
95 /// the computation tree that feeds them.
96 /// If ValueSet is non-null, remove any deleted instructions from it as well.
97 static void
98 deleteDeadInstruction(Instruction *I, BasicBlock::iterator *BBI,
99 MemoryDependenceResults &MD, const TargetLibraryInfo &TLI,
100 InstOverlapIntervalsTy &IOL,
101 DenseMap<Instruction*, size_t> *InstrOrdering,
102 SmallSetVector<Value *, 16> *ValueSet = nullptr) {
103 SmallVector<Instruction*, 32> NowDeadInsts;
105 NowDeadInsts.push_back(I);
106 --NumFastOther;
108 // Keeping the iterator straight is a pain, so we let this routine tell the
109 // caller what the next instruction is after we're done mucking about.
110 BasicBlock::iterator NewIter = *BBI;
112 // Before we touch this instruction, remove it from memdep!
113 do {
114 Instruction *DeadInst = NowDeadInsts.pop_back_val();
115 ++NumFastOther;
117 // Try to preserve debug information attached to the dead instruction.
118 salvageDebugInfo(*DeadInst);
120 // This instruction is dead, zap it, in stages. Start by removing it from
121 // MemDep, which needs to know the operands and needs it to be in the
122 // function.
123 MD.removeInstruction(DeadInst);
125 for (unsigned op = 0, e = DeadInst->getNumOperands(); op != e; ++op) {
126 Value *Op = DeadInst->getOperand(op);
127 DeadInst->setOperand(op, nullptr);
129 // If this operand just became dead, add it to the NowDeadInsts list.
130 if (!Op->use_empty()) continue;
132 if (Instruction *OpI = dyn_cast<Instruction>(Op))
133 if (isInstructionTriviallyDead(OpI, &TLI))
134 NowDeadInsts.push_back(OpI);
137 if (ValueSet) ValueSet->remove(DeadInst);
138 InstrOrdering->erase(DeadInst);
139 IOL.erase(DeadInst);
141 if (NewIter == DeadInst->getIterator())
142 NewIter = DeadInst->eraseFromParent();
143 else
144 DeadInst->eraseFromParent();
145 } while (!NowDeadInsts.empty());
146 *BBI = NewIter;
149 /// Does this instruction write some memory? This only returns true for things
150 /// that we can analyze with other helpers below.
151 static bool hasAnalyzableMemoryWrite(Instruction *I,
152 const TargetLibraryInfo &TLI) {
153 if (isa<StoreInst>(I))
154 return true;
155 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
156 switch (II->getIntrinsicID()) {
157 default:
158 return false;
159 case Intrinsic::memset:
160 case Intrinsic::memmove:
161 case Intrinsic::memcpy:
162 case Intrinsic::memcpy_element_unordered_atomic:
163 case Intrinsic::memmove_element_unordered_atomic:
164 case Intrinsic::memset_element_unordered_atomic:
165 case Intrinsic::init_trampoline:
166 case Intrinsic::lifetime_end:
167 return true;
170 if (auto CS = CallSite(I)) {
171 if (Function *F = CS.getCalledFunction()) {
172 StringRef FnName = F->getName();
173 if (TLI.has(LibFunc_strcpy) && FnName == TLI.getName(LibFunc_strcpy))
174 return true;
175 if (TLI.has(LibFunc_strncpy) && FnName == TLI.getName(LibFunc_strncpy))
176 return true;
177 if (TLI.has(LibFunc_strcat) && FnName == TLI.getName(LibFunc_strcat))
178 return true;
179 if (TLI.has(LibFunc_strncat) && FnName == TLI.getName(LibFunc_strncat))
180 return true;
183 return false;
186 /// Return a Location stored to by the specified instruction. If isRemovable
187 /// returns true, this function and getLocForRead completely describe the memory
188 /// operations for this instruction.
189 static MemoryLocation getLocForWrite(Instruction *Inst) {
191 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
192 return MemoryLocation::get(SI);
194 if (auto *MI = dyn_cast<AnyMemIntrinsic>(Inst)) {
195 // memcpy/memmove/memset.
196 MemoryLocation Loc = MemoryLocation::getForDest(MI);
197 return Loc;
200 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(Inst)) {
201 switch (II->getIntrinsicID()) {
202 default:
203 return MemoryLocation(); // Unhandled intrinsic.
204 case Intrinsic::init_trampoline:
205 return MemoryLocation(II->getArgOperand(0));
206 case Intrinsic::lifetime_end: {
207 uint64_t Len = cast<ConstantInt>(II->getArgOperand(0))->getZExtValue();
208 return MemoryLocation(II->getArgOperand(1), Len);
212 if (auto CS = CallSite(Inst))
213 // All the supported TLI functions so far happen to have dest as their
214 // first argument.
215 return MemoryLocation(CS.getArgument(0));
216 return MemoryLocation();
219 /// Return the location read by the specified "hasAnalyzableMemoryWrite"
220 /// instruction if any.
221 static MemoryLocation getLocForRead(Instruction *Inst,
222 const TargetLibraryInfo &TLI) {
223 assert(hasAnalyzableMemoryWrite(Inst, TLI) && "Unknown instruction case");
225 // The only instructions that both read and write are the mem transfer
226 // instructions (memcpy/memmove).
227 if (auto *MTI = dyn_cast<AnyMemTransferInst>(Inst))
228 return MemoryLocation::getForSource(MTI);
229 return MemoryLocation();
232 /// If the value of this instruction and the memory it writes to is unused, may
233 /// we delete this instruction?
234 static bool isRemovable(Instruction *I) {
235 // Don't remove volatile/atomic stores.
236 if (StoreInst *SI = dyn_cast<StoreInst>(I))
237 return SI->isUnordered();
239 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
240 switch (II->getIntrinsicID()) {
241 default: llvm_unreachable("doesn't pass 'hasAnalyzableMemoryWrite' predicate");
242 case Intrinsic::lifetime_end:
243 // Never remove dead lifetime_end's, e.g. because it is followed by a
244 // free.
245 return false;
246 case Intrinsic::init_trampoline:
247 // Always safe to remove init_trampoline.
248 return true;
249 case Intrinsic::memset:
250 case Intrinsic::memmove:
251 case Intrinsic::memcpy:
252 // Don't remove volatile memory intrinsics.
253 return !cast<MemIntrinsic>(II)->isVolatile();
254 case Intrinsic::memcpy_element_unordered_atomic:
255 case Intrinsic::memmove_element_unordered_atomic:
256 case Intrinsic::memset_element_unordered_atomic:
257 return true;
261 // note: only get here for calls with analyzable writes - i.e. libcalls
262 if (auto CS = CallSite(I))
263 return CS.getInstruction()->use_empty();
265 return false;
268 /// Returns true if the end of this instruction can be safely shortened in
269 /// length.
270 static bool isShortenableAtTheEnd(Instruction *I) {
271 // Don't shorten stores for now
272 if (isa<StoreInst>(I))
273 return false;
275 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I)) {
276 switch (II->getIntrinsicID()) {
277 default: return false;
278 case Intrinsic::memset:
279 case Intrinsic::memcpy:
280 case Intrinsic::memcpy_element_unordered_atomic:
281 case Intrinsic::memset_element_unordered_atomic:
282 // Do shorten memory intrinsics.
283 // FIXME: Add memmove if it's also safe to transform.
284 return true;
288 // Don't shorten libcalls calls for now.
290 return false;
293 /// Returns true if the beginning of this instruction can be safely shortened
294 /// in length.
295 static bool isShortenableAtTheBeginning(Instruction *I) {
296 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
297 // easily done by offsetting the source address.
298 return isa<AnyMemSetInst>(I);
301 /// Return the pointer that is being written to.
302 static Value *getStoredPointerOperand(Instruction *I) {
303 //TODO: factor this to reuse getLocForWrite
304 MemoryLocation Loc = getLocForWrite(I);
305 assert(Loc.Ptr &&
306 "unable to find pointer written for analyzable instruction?");
307 // TODO: most APIs don't expect const Value *
308 return const_cast<Value*>(Loc.Ptr);
311 static uint64_t getPointerSize(const Value *V, const DataLayout &DL,
312 const TargetLibraryInfo &TLI,
313 const Function *F) {
314 uint64_t Size;
315 ObjectSizeOpts Opts;
316 Opts.NullIsUnknownSize = NullPointerIsDefined(F);
318 if (getObjectSize(V, Size, DL, &TLI, Opts))
319 return Size;
320 return MemoryLocation::UnknownSize;
323 namespace {
325 enum OverwriteResult {
326 OW_Begin,
327 OW_Complete,
328 OW_End,
329 OW_PartialEarlierWithFullLater,
330 OW_Unknown
333 } // end anonymous namespace
335 /// Return 'OW_Complete' if a store to the 'Later' location completely
336 /// overwrites a store to the 'Earlier' location, 'OW_End' if the end of the
337 /// 'Earlier' location is completely overwritten by 'Later', 'OW_Begin' if the
338 /// beginning of the 'Earlier' location is overwritten by 'Later'.
339 /// 'OW_PartialEarlierWithFullLater' means that an earlier (big) store was
340 /// overwritten by a latter (smaller) store which doesn't write outside the big
341 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
342 static OverwriteResult isOverwrite(const MemoryLocation &Later,
343 const MemoryLocation &Earlier,
344 const DataLayout &DL,
345 const TargetLibraryInfo &TLI,
346 int64_t &EarlierOff, int64_t &LaterOff,
347 Instruction *DepWrite,
348 InstOverlapIntervalsTy &IOL,
349 AliasAnalysis &AA,
350 const Function *F) {
351 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
352 // get imprecise values here, though (except for unknown sizes).
353 if (!Later.Size.isPrecise() || !Earlier.Size.isPrecise())
354 return OW_Unknown;
356 const uint64_t LaterSize = Later.Size.getValue();
357 const uint64_t EarlierSize = Earlier.Size.getValue();
359 const Value *P1 = Earlier.Ptr->stripPointerCasts();
360 const Value *P2 = Later.Ptr->stripPointerCasts();
362 // If the start pointers are the same, we just have to compare sizes to see if
363 // the later store was larger than the earlier store.
364 if (P1 == P2 || AA.isMustAlias(P1, P2)) {
365 // Make sure that the Later size is >= the Earlier size.
366 if (LaterSize >= EarlierSize)
367 return OW_Complete;
370 // Check to see if the later store is to the entire object (either a global,
371 // an alloca, or a byval/inalloca argument). If so, then it clearly
372 // overwrites any other store to the same object.
373 const Value *UO1 = GetUnderlyingObject(P1, DL),
374 *UO2 = GetUnderlyingObject(P2, DL);
376 // If we can't resolve the same pointers to the same object, then we can't
377 // analyze them at all.
378 if (UO1 != UO2)
379 return OW_Unknown;
381 // If the "Later" store is to a recognizable object, get its size.
382 uint64_t ObjectSize = getPointerSize(UO2, DL, TLI, F);
383 if (ObjectSize != MemoryLocation::UnknownSize)
384 if (ObjectSize == LaterSize && ObjectSize >= EarlierSize)
385 return OW_Complete;
387 // Okay, we have stores to two completely different pointers. Try to
388 // decompose the pointer into a "base + constant_offset" form. If the base
389 // pointers are equal, then we can reason about the two stores.
390 EarlierOff = 0;
391 LaterOff = 0;
392 const Value *BP1 = GetPointerBaseWithConstantOffset(P1, EarlierOff, DL);
393 const Value *BP2 = GetPointerBaseWithConstantOffset(P2, LaterOff, DL);
395 // If the base pointers still differ, we have two completely different stores.
396 if (BP1 != BP2)
397 return OW_Unknown;
399 // The later store completely overlaps the earlier store if:
401 // 1. Both start at the same offset and the later one's size is greater than
402 // or equal to the earlier one's, or
404 // |--earlier--|
405 // |-- later --|
407 // 2. The earlier store has an offset greater than the later offset, but which
408 // still lies completely within the later store.
410 // |--earlier--|
411 // |----- later ------|
413 // We have to be careful here as *Off is signed while *.Size is unsigned.
414 if (EarlierOff >= LaterOff &&
415 LaterSize >= EarlierSize &&
416 uint64_t(EarlierOff - LaterOff) + EarlierSize <= LaterSize)
417 return OW_Complete;
419 // We may now overlap, although the overlap is not complete. There might also
420 // be other incomplete overlaps, and together, they might cover the complete
421 // earlier write.
422 // Note: The correctness of this logic depends on the fact that this function
423 // is not even called providing DepWrite when there are any intervening reads.
424 if (EnablePartialOverwriteTracking &&
425 LaterOff < int64_t(EarlierOff + EarlierSize) &&
426 int64_t(LaterOff + LaterSize) >= EarlierOff) {
428 // Insert our part of the overlap into the map.
429 auto &IM = IOL[DepWrite];
430 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: Earlier [" << EarlierOff
431 << ", " << int64_t(EarlierOff + EarlierSize)
432 << ") Later [" << LaterOff << ", "
433 << int64_t(LaterOff + LaterSize) << ")\n");
435 // Make sure that we only insert non-overlapping intervals and combine
436 // adjacent intervals. The intervals are stored in the map with the ending
437 // offset as the key (in the half-open sense) and the starting offset as
438 // the value.
439 int64_t LaterIntStart = LaterOff, LaterIntEnd = LaterOff + LaterSize;
441 // Find any intervals ending at, or after, LaterIntStart which start
442 // before LaterIntEnd.
443 auto ILI = IM.lower_bound(LaterIntStart);
444 if (ILI != IM.end() && ILI->second <= LaterIntEnd) {
445 // This existing interval is overlapped with the current store somewhere
446 // in [LaterIntStart, LaterIntEnd]. Merge them by erasing the existing
447 // intervals and adjusting our start and end.
448 LaterIntStart = std::min(LaterIntStart, ILI->second);
449 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
450 ILI = IM.erase(ILI);
452 // Continue erasing and adjusting our end in case other previous
453 // intervals are also overlapped with the current store.
455 // |--- ealier 1 ---| |--- ealier 2 ---|
456 // |------- later---------|
458 while (ILI != IM.end() && ILI->second <= LaterIntEnd) {
459 assert(ILI->second > LaterIntStart && "Unexpected interval");
460 LaterIntEnd = std::max(LaterIntEnd, ILI->first);
461 ILI = IM.erase(ILI);
465 IM[LaterIntEnd] = LaterIntStart;
467 ILI = IM.begin();
468 if (ILI->second <= EarlierOff &&
469 ILI->first >= int64_t(EarlierOff + EarlierSize)) {
470 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: Earlier ["
471 << EarlierOff << ", "
472 << int64_t(EarlierOff + EarlierSize)
473 << ") Composite Later [" << ILI->second << ", "
474 << ILI->first << ")\n");
475 ++NumCompletePartials;
476 return OW_Complete;
480 // Check for an earlier store which writes to all the memory locations that
481 // the later store writes to.
482 if (EnablePartialStoreMerging && LaterOff >= EarlierOff &&
483 int64_t(EarlierOff + EarlierSize) > LaterOff &&
484 uint64_t(LaterOff - EarlierOff) + LaterSize <= EarlierSize) {
485 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite an earlier load ["
486 << EarlierOff << ", "
487 << int64_t(EarlierOff + EarlierSize)
488 << ") by a later store [" << LaterOff << ", "
489 << int64_t(LaterOff + LaterSize) << ")\n");
490 // TODO: Maybe come up with a better name?
491 return OW_PartialEarlierWithFullLater;
494 // Another interesting case is if the later store overwrites the end of the
495 // earlier store.
497 // |--earlier--|
498 // |-- later --|
500 // In this case we may want to trim the size of earlier to avoid generating
501 // writes to addresses which will definitely be overwritten later
502 if (!EnablePartialOverwriteTracking &&
503 (LaterOff > EarlierOff && LaterOff < int64_t(EarlierOff + EarlierSize) &&
504 int64_t(LaterOff + LaterSize) >= int64_t(EarlierOff + EarlierSize)))
505 return OW_End;
507 // Finally, we also need to check if the later store overwrites the beginning
508 // of the earlier store.
510 // |--earlier--|
511 // |-- later --|
513 // In this case we may want to move the destination address and trim the size
514 // of earlier to avoid generating writes to addresses which will definitely
515 // be overwritten later.
516 if (!EnablePartialOverwriteTracking &&
517 (LaterOff <= EarlierOff && int64_t(LaterOff + LaterSize) > EarlierOff)) {
518 assert(int64_t(LaterOff + LaterSize) < int64_t(EarlierOff + EarlierSize) &&
519 "Expect to be handled as OW_Complete");
520 return OW_Begin;
522 // Otherwise, they don't completely overlap.
523 return OW_Unknown;
526 /// If 'Inst' might be a self read (i.e. a noop copy of a
527 /// memory region into an identical pointer) then it doesn't actually make its
528 /// input dead in the traditional sense. Consider this case:
530 /// memmove(A <- B)
531 /// memmove(A <- A)
533 /// In this case, the second store to A does not make the first store to A dead.
534 /// The usual situation isn't an explicit A<-A store like this (which can be
535 /// trivially removed) but a case where two pointers may alias.
537 /// This function detects when it is unsafe to remove a dependent instruction
538 /// because the DSE inducing instruction may be a self-read.
539 static bool isPossibleSelfRead(Instruction *Inst,
540 const MemoryLocation &InstStoreLoc,
541 Instruction *DepWrite,
542 const TargetLibraryInfo &TLI,
543 AliasAnalysis &AA) {
544 // Self reads can only happen for instructions that read memory. Get the
545 // location read.
546 MemoryLocation InstReadLoc = getLocForRead(Inst, TLI);
547 if (!InstReadLoc.Ptr)
548 return false; // Not a reading instruction.
550 // If the read and written loc obviously don't alias, it isn't a read.
551 if (AA.isNoAlias(InstReadLoc, InstStoreLoc))
552 return false;
554 if (isa<AnyMemCpyInst>(Inst)) {
555 // LLVM's memcpy overlap semantics are not fully fleshed out (see PR11763)
556 // but in practice memcpy(A <- B) either means that A and B are disjoint or
557 // are equal (i.e. there are not partial overlaps). Given that, if we have:
559 // memcpy/memmove(A <- B) // DepWrite
560 // memcpy(A <- B) // Inst
562 // with Inst reading/writing a >= size than DepWrite, we can reason as
563 // follows:
565 // - If A == B then both the copies are no-ops, so the DepWrite can be
566 // removed.
567 // - If A != B then A and B are disjoint locations in Inst. Since
568 // Inst.size >= DepWrite.size A and B are disjoint in DepWrite too.
569 // Therefore DepWrite can be removed.
570 MemoryLocation DepReadLoc = getLocForRead(DepWrite, TLI);
572 if (DepReadLoc.Ptr && AA.isMustAlias(InstReadLoc.Ptr, DepReadLoc.Ptr))
573 return false;
576 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
577 // then it can't be considered dead.
578 return true;
581 /// Returns true if the memory which is accessed by the second instruction is not
582 /// modified between the first and the second instruction.
583 /// Precondition: Second instruction must be dominated by the first
584 /// instruction.
585 static bool memoryIsNotModifiedBetween(Instruction *FirstI,
586 Instruction *SecondI,
587 AliasAnalysis *AA) {
588 SmallVector<BasicBlock *, 16> WorkList;
589 SmallPtrSet<BasicBlock *, 8> Visited;
590 BasicBlock::iterator FirstBBI(FirstI);
591 ++FirstBBI;
592 BasicBlock::iterator SecondBBI(SecondI);
593 BasicBlock *FirstBB = FirstI->getParent();
594 BasicBlock *SecondBB = SecondI->getParent();
595 MemoryLocation MemLoc = MemoryLocation::get(SecondI);
597 // Start checking the store-block.
598 WorkList.push_back(SecondBB);
599 bool isFirstBlock = true;
601 // Check all blocks going backward until we reach the load-block.
602 while (!WorkList.empty()) {
603 BasicBlock *B = WorkList.pop_back_val();
605 // Ignore instructions before LI if this is the FirstBB.
606 BasicBlock::iterator BI = (B == FirstBB ? FirstBBI : B->begin());
608 BasicBlock::iterator EI;
609 if (isFirstBlock) {
610 // Ignore instructions after SI if this is the first visit of SecondBB.
611 assert(B == SecondBB && "first block is not the store block");
612 EI = SecondBBI;
613 isFirstBlock = false;
614 } else {
615 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
616 // In this case we also have to look at instructions after SI.
617 EI = B->end();
619 for (; BI != EI; ++BI) {
620 Instruction *I = &*BI;
621 if (I->mayWriteToMemory() && I != SecondI)
622 if (isModSet(AA->getModRefInfo(I, MemLoc)))
623 return false;
625 if (B != FirstBB) {
626 assert(B != &FirstBB->getParent()->getEntryBlock() &&
627 "Should not hit the entry block because SI must be dominated by LI");
628 for (auto PredI = pred_begin(B), PE = pred_end(B); PredI != PE; ++PredI) {
629 if (!Visited.insert(*PredI).second)
630 continue;
631 WorkList.push_back(*PredI);
635 return true;
638 /// Find all blocks that will unconditionally lead to the block BB and append
639 /// them to F.
640 static void findUnconditionalPreds(SmallVectorImpl<BasicBlock *> &Blocks,
641 BasicBlock *BB, DominatorTree *DT) {
642 for (pred_iterator I = pred_begin(BB), E = pred_end(BB); I != E; ++I) {
643 BasicBlock *Pred = *I;
644 if (Pred == BB) continue;
645 Instruction *PredTI = Pred->getTerminator();
646 if (PredTI->getNumSuccessors() != 1)
647 continue;
649 if (DT->isReachableFromEntry(Pred))
650 Blocks.push_back(Pred);
654 /// Handle frees of entire structures whose dependency is a store
655 /// to a field of that structure.
656 static bool handleFree(CallInst *F, AliasAnalysis *AA,
657 MemoryDependenceResults *MD, DominatorTree *DT,
658 const TargetLibraryInfo *TLI,
659 InstOverlapIntervalsTy &IOL,
660 DenseMap<Instruction*, size_t> *InstrOrdering) {
661 bool MadeChange = false;
663 MemoryLocation Loc = MemoryLocation(F->getOperand(0));
664 SmallVector<BasicBlock *, 16> Blocks;
665 Blocks.push_back(F->getParent());
666 const DataLayout &DL = F->getModule()->getDataLayout();
668 while (!Blocks.empty()) {
669 BasicBlock *BB = Blocks.pop_back_val();
670 Instruction *InstPt = BB->getTerminator();
671 if (BB == F->getParent()) InstPt = F;
673 MemDepResult Dep =
674 MD->getPointerDependencyFrom(Loc, false, InstPt->getIterator(), BB);
675 while (Dep.isDef() || Dep.isClobber()) {
676 Instruction *Dependency = Dep.getInst();
677 if (!hasAnalyzableMemoryWrite(Dependency, *TLI) ||
678 !isRemovable(Dependency))
679 break;
681 Value *DepPointer =
682 GetUnderlyingObject(getStoredPointerOperand(Dependency), DL);
684 // Check for aliasing.
685 if (!AA->isMustAlias(F->getArgOperand(0), DepPointer))
686 break;
688 LLVM_DEBUG(
689 dbgs() << "DSE: Dead Store to soon to be freed memory:\n DEAD: "
690 << *Dependency << '\n');
692 // DCE instructions only used to calculate that store.
693 BasicBlock::iterator BBI(Dependency);
694 deleteDeadInstruction(Dependency, &BBI, *MD, *TLI, IOL, InstrOrdering);
695 ++NumFastStores;
696 MadeChange = true;
698 // Inst's old Dependency is now deleted. Compute the next dependency,
699 // which may also be dead, as in
700 // s[0] = 0;
701 // s[1] = 0; // This has just been deleted.
702 // free(s);
703 Dep = MD->getPointerDependencyFrom(Loc, false, BBI, BB);
706 if (Dep.isNonLocal())
707 findUnconditionalPreds(Blocks, BB, DT);
710 return MadeChange;
713 /// Check to see if the specified location may alias any of the stack objects in
714 /// the DeadStackObjects set. If so, they become live because the location is
715 /// being loaded.
716 static void removeAccessedObjects(const MemoryLocation &LoadedLoc,
717 SmallSetVector<Value *, 16> &DeadStackObjects,
718 const DataLayout &DL, AliasAnalysis *AA,
719 const TargetLibraryInfo *TLI,
720 const Function *F) {
721 const Value *UnderlyingPointer = GetUnderlyingObject(LoadedLoc.Ptr, DL);
723 // A constant can't be in the dead pointer set.
724 if (isa<Constant>(UnderlyingPointer))
725 return;
727 // If the kill pointer can be easily reduced to an alloca, don't bother doing
728 // extraneous AA queries.
729 if (isa<AllocaInst>(UnderlyingPointer) || isa<Argument>(UnderlyingPointer)) {
730 DeadStackObjects.remove(const_cast<Value*>(UnderlyingPointer));
731 return;
734 // Remove objects that could alias LoadedLoc.
735 DeadStackObjects.remove_if([&](Value *I) {
736 // See if the loaded location could alias the stack location.
737 MemoryLocation StackLoc(I, getPointerSize(I, DL, *TLI, F));
738 return !AA->isNoAlias(StackLoc, LoadedLoc);
742 /// Remove dead stores to stack-allocated locations in the function end block.
743 /// Ex:
744 /// %A = alloca i32
745 /// ...
746 /// store i32 1, i32* %A
747 /// ret void
748 static bool handleEndBlock(BasicBlock &BB, AliasAnalysis *AA,
749 MemoryDependenceResults *MD,
750 const TargetLibraryInfo *TLI,
751 InstOverlapIntervalsTy &IOL,
752 DenseMap<Instruction*, size_t> *InstrOrdering) {
753 bool MadeChange = false;
755 // Keep track of all of the stack objects that are dead at the end of the
756 // function.
757 SmallSetVector<Value*, 16> DeadStackObjects;
759 // Find all of the alloca'd pointers in the entry block.
760 BasicBlock &Entry = BB.getParent()->front();
761 for (Instruction &I : Entry) {
762 if (isa<AllocaInst>(&I))
763 DeadStackObjects.insert(&I);
765 // Okay, so these are dead heap objects, but if the pointer never escapes
766 // then it's leaked by this function anyways.
767 else if (isAllocLikeFn(&I, TLI) && !PointerMayBeCaptured(&I, true, true))
768 DeadStackObjects.insert(&I);
771 // Treat byval or inalloca arguments the same, stores to them are dead at the
772 // end of the function.
773 for (Argument &AI : BB.getParent()->args())
774 if (AI.hasByValOrInAllocaAttr())
775 DeadStackObjects.insert(&AI);
777 const DataLayout &DL = BB.getModule()->getDataLayout();
779 // Scan the basic block backwards
780 for (BasicBlock::iterator BBI = BB.end(); BBI != BB.begin(); ){
781 --BBI;
783 // If we find a store, check to see if it points into a dead stack value.
784 if (hasAnalyzableMemoryWrite(&*BBI, *TLI) && isRemovable(&*BBI)) {
785 // See through pointer-to-pointer bitcasts
786 SmallVector<Value *, 4> Pointers;
787 GetUnderlyingObjects(getStoredPointerOperand(&*BBI), Pointers, DL);
789 // Stores to stack values are valid candidates for removal.
790 bool AllDead = true;
791 for (Value *Pointer : Pointers)
792 if (!DeadStackObjects.count(Pointer)) {
793 AllDead = false;
794 break;
797 if (AllDead) {
798 Instruction *Dead = &*BBI;
800 LLVM_DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
801 << *Dead << "\n Objects: ";
802 for (SmallVectorImpl<Value *>::iterator I = Pointers.begin(),
803 E = Pointers.end();
804 I != E; ++I) {
805 dbgs() << **I;
806 if (std::next(I) != E)
807 dbgs() << ", ";
808 } dbgs()
809 << '\n');
811 // DCE instructions only used to calculate that store.
812 deleteDeadInstruction(Dead, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
813 ++NumFastStores;
814 MadeChange = true;
815 continue;
819 // Remove any dead non-memory-mutating instructions.
820 if (isInstructionTriviallyDead(&*BBI, TLI)) {
821 LLVM_DEBUG(dbgs() << "DSE: Removing trivially dead instruction:\n DEAD: "
822 << *&*BBI << '\n');
823 deleteDeadInstruction(&*BBI, &BBI, *MD, *TLI, IOL, InstrOrdering, &DeadStackObjects);
824 ++NumFastOther;
825 MadeChange = true;
826 continue;
829 if (isa<AllocaInst>(BBI)) {
830 // Remove allocas from the list of dead stack objects; there can't be
831 // any references before the definition.
832 DeadStackObjects.remove(&*BBI);
833 continue;
836 if (auto *Call = dyn_cast<CallBase>(&*BBI)) {
837 // Remove allocation function calls from the list of dead stack objects;
838 // there can't be any references before the definition.
839 if (isAllocLikeFn(&*BBI, TLI))
840 DeadStackObjects.remove(&*BBI);
842 // If this call does not access memory, it can't be loading any of our
843 // pointers.
844 if (AA->doesNotAccessMemory(Call))
845 continue;
847 // If the call might load from any of our allocas, then any store above
848 // the call is live.
849 DeadStackObjects.remove_if([&](Value *I) {
850 // See if the call site touches the value.
851 return isRefSet(AA->getModRefInfo(
852 Call, I, getPointerSize(I, DL, *TLI, BB.getParent())));
855 // If all of the allocas were clobbered by the call then we're not going
856 // to find anything else to process.
857 if (DeadStackObjects.empty())
858 break;
860 continue;
863 // We can remove the dead stores, irrespective of the fence and its ordering
864 // (release/acquire/seq_cst). Fences only constraints the ordering of
865 // already visible stores, it does not make a store visible to other
866 // threads. So, skipping over a fence does not change a store from being
867 // dead.
868 if (isa<FenceInst>(*BBI))
869 continue;
871 MemoryLocation LoadedLoc;
873 // If we encounter a use of the pointer, it is no longer considered dead
874 if (LoadInst *L = dyn_cast<LoadInst>(BBI)) {
875 if (!L->isUnordered()) // Be conservative with atomic/volatile load
876 break;
877 LoadedLoc = MemoryLocation::get(L);
878 } else if (VAArgInst *V = dyn_cast<VAArgInst>(BBI)) {
879 LoadedLoc = MemoryLocation::get(V);
880 } else if (!BBI->mayReadFromMemory()) {
881 // Instruction doesn't read memory. Note that stores that weren't removed
882 // above will hit this case.
883 continue;
884 } else {
885 // Unknown inst; assume it clobbers everything.
886 break;
889 // Remove any allocas from the DeadPointer set that are loaded, as this
890 // makes any stores above the access live.
891 removeAccessedObjects(LoadedLoc, DeadStackObjects, DL, AA, TLI, BB.getParent());
893 // If all of the allocas were clobbered by the access then we're not going
894 // to find anything else to process.
895 if (DeadStackObjects.empty())
896 break;
899 return MadeChange;
902 static bool tryToShorten(Instruction *EarlierWrite, int64_t &EarlierOffset,
903 int64_t &EarlierSize, int64_t LaterOffset,
904 int64_t LaterSize, bool IsOverwriteEnd) {
905 // TODO: base this on the target vector size so that if the earlier
906 // store was too small to get vector writes anyway then its likely
907 // a good idea to shorten it
908 // Power of 2 vector writes are probably always a bad idea to optimize
909 // as any store/memset/memcpy is likely using vector instructions so
910 // shortening it to not vector size is likely to be slower
911 auto *EarlierIntrinsic = cast<AnyMemIntrinsic>(EarlierWrite);
912 unsigned EarlierWriteAlign = EarlierIntrinsic->getDestAlignment();
913 if (!IsOverwriteEnd)
914 LaterOffset = int64_t(LaterOffset + LaterSize);
916 if (!(isPowerOf2_64(LaterOffset) && EarlierWriteAlign <= LaterOffset) &&
917 !((EarlierWriteAlign != 0) && LaterOffset % EarlierWriteAlign == 0))
918 return false;
920 int64_t NewLength = IsOverwriteEnd
921 ? LaterOffset - EarlierOffset
922 : EarlierSize - (LaterOffset - EarlierOffset);
924 if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(EarlierWrite)) {
925 // When shortening an atomic memory intrinsic, the newly shortened
926 // length must remain an integer multiple of the element size.
927 const uint32_t ElementSize = AMI->getElementSizeInBytes();
928 if (0 != NewLength % ElementSize)
929 return false;
932 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
933 << (IsOverwriteEnd ? "END" : "BEGIN") << ": "
934 << *EarlierWrite << "\n KILLER (offset " << LaterOffset
935 << ", " << EarlierSize << ")\n");
937 Value *EarlierWriteLength = EarlierIntrinsic->getLength();
938 Value *TrimmedLength =
939 ConstantInt::get(EarlierWriteLength->getType(), NewLength);
940 EarlierIntrinsic->setLength(TrimmedLength);
942 EarlierSize = NewLength;
943 if (!IsOverwriteEnd) {
944 int64_t OffsetMoved = (LaterOffset - EarlierOffset);
945 Value *Indices[1] = {
946 ConstantInt::get(EarlierWriteLength->getType(), OffsetMoved)};
947 GetElementPtrInst *NewDestGEP = GetElementPtrInst::CreateInBounds(
948 EarlierIntrinsic->getRawDest()->getType()->getPointerElementType(),
949 EarlierIntrinsic->getRawDest(), Indices, "", EarlierWrite);
950 EarlierIntrinsic->setDest(NewDestGEP);
951 EarlierOffset = EarlierOffset + OffsetMoved;
953 return true;
956 static bool tryToShortenEnd(Instruction *EarlierWrite,
957 OverlapIntervalsTy &IntervalMap,
958 int64_t &EarlierStart, int64_t &EarlierSize) {
959 if (IntervalMap.empty() || !isShortenableAtTheEnd(EarlierWrite))
960 return false;
962 OverlapIntervalsTy::iterator OII = --IntervalMap.end();
963 int64_t LaterStart = OII->second;
964 int64_t LaterSize = OII->first - LaterStart;
966 if (LaterStart > EarlierStart && LaterStart < EarlierStart + EarlierSize &&
967 LaterStart + LaterSize >= EarlierStart + EarlierSize) {
968 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
969 LaterSize, true)) {
970 IntervalMap.erase(OII);
971 return true;
974 return false;
977 static bool tryToShortenBegin(Instruction *EarlierWrite,
978 OverlapIntervalsTy &IntervalMap,
979 int64_t &EarlierStart, int64_t &EarlierSize) {
980 if (IntervalMap.empty() || !isShortenableAtTheBeginning(EarlierWrite))
981 return false;
983 OverlapIntervalsTy::iterator OII = IntervalMap.begin();
984 int64_t LaterStart = OII->second;
985 int64_t LaterSize = OII->first - LaterStart;
987 if (LaterStart <= EarlierStart && LaterStart + LaterSize > EarlierStart) {
988 assert(LaterStart + LaterSize < EarlierStart + EarlierSize &&
989 "Should have been handled as OW_Complete");
990 if (tryToShorten(EarlierWrite, EarlierStart, EarlierSize, LaterStart,
991 LaterSize, false)) {
992 IntervalMap.erase(OII);
993 return true;
996 return false;
999 static bool removePartiallyOverlappedStores(AliasAnalysis *AA,
1000 const DataLayout &DL,
1001 InstOverlapIntervalsTy &IOL) {
1002 bool Changed = false;
1003 for (auto OI : IOL) {
1004 Instruction *EarlierWrite = OI.first;
1005 MemoryLocation Loc = getLocForWrite(EarlierWrite);
1006 assert(isRemovable(EarlierWrite) && "Expect only removable instruction");
1008 const Value *Ptr = Loc.Ptr->stripPointerCasts();
1009 int64_t EarlierStart = 0;
1010 int64_t EarlierSize = int64_t(Loc.Size.getValue());
1011 GetPointerBaseWithConstantOffset(Ptr, EarlierStart, DL);
1012 OverlapIntervalsTy &IntervalMap = OI.second;
1013 Changed |=
1014 tryToShortenEnd(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1015 if (IntervalMap.empty())
1016 continue;
1017 Changed |=
1018 tryToShortenBegin(EarlierWrite, IntervalMap, EarlierStart, EarlierSize);
1020 return Changed;
1023 static bool eliminateNoopStore(Instruction *Inst, BasicBlock::iterator &BBI,
1024 AliasAnalysis *AA, MemoryDependenceResults *MD,
1025 const DataLayout &DL,
1026 const TargetLibraryInfo *TLI,
1027 InstOverlapIntervalsTy &IOL,
1028 DenseMap<Instruction*, size_t> *InstrOrdering) {
1029 // Must be a store instruction.
1030 StoreInst *SI = dyn_cast<StoreInst>(Inst);
1031 if (!SI)
1032 return false;
1034 // If we're storing the same value back to a pointer that we just loaded from,
1035 // then the store can be removed.
1036 if (LoadInst *DepLoad = dyn_cast<LoadInst>(SI->getValueOperand())) {
1037 if (SI->getPointerOperand() == DepLoad->getPointerOperand() &&
1038 isRemovable(SI) && memoryIsNotModifiedBetween(DepLoad, SI, AA)) {
1040 LLVM_DEBUG(
1041 dbgs() << "DSE: Remove Store Of Load from same pointer:\n LOAD: "
1042 << *DepLoad << "\n STORE: " << *SI << '\n');
1044 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1045 ++NumRedundantStores;
1046 return true;
1050 // Remove null stores into the calloc'ed objects
1051 Constant *StoredConstant = dyn_cast<Constant>(SI->getValueOperand());
1052 if (StoredConstant && StoredConstant->isNullValue() && isRemovable(SI)) {
1053 Instruction *UnderlyingPointer =
1054 dyn_cast<Instruction>(GetUnderlyingObject(SI->getPointerOperand(), DL));
1056 if (UnderlyingPointer && isCallocLikeFn(UnderlyingPointer, TLI) &&
1057 memoryIsNotModifiedBetween(UnderlyingPointer, SI, AA)) {
1058 LLVM_DEBUG(
1059 dbgs() << "DSE: Remove null store to the calloc'ed object:\n DEAD: "
1060 << *Inst << "\n OBJECT: " << *UnderlyingPointer << '\n');
1062 deleteDeadInstruction(SI, &BBI, *MD, *TLI, IOL, InstrOrdering);
1063 ++NumRedundantStores;
1064 return true;
1067 return false;
1070 static bool eliminateDeadStores(BasicBlock &BB, AliasAnalysis *AA,
1071 MemoryDependenceResults *MD, DominatorTree *DT,
1072 const TargetLibraryInfo *TLI) {
1073 const DataLayout &DL = BB.getModule()->getDataLayout();
1074 bool MadeChange = false;
1076 // FIXME: Maybe change this to use some abstraction like OrderedBasicBlock?
1077 // The current OrderedBasicBlock can't deal with mutation at the moment.
1078 size_t LastThrowingInstIndex = 0;
1079 DenseMap<Instruction*, size_t> InstrOrdering;
1080 size_t InstrIndex = 1;
1082 // A map of interval maps representing partially-overwritten value parts.
1083 InstOverlapIntervalsTy IOL;
1085 // Do a top-down walk on the BB.
1086 for (BasicBlock::iterator BBI = BB.begin(), BBE = BB.end(); BBI != BBE; ) {
1087 // Handle 'free' calls specially.
1088 if (CallInst *F = isFreeCall(&*BBI, TLI)) {
1089 MadeChange |= handleFree(F, AA, MD, DT, TLI, IOL, &InstrOrdering);
1090 // Increment BBI after handleFree has potentially deleted instructions.
1091 // This ensures we maintain a valid iterator.
1092 ++BBI;
1093 continue;
1096 Instruction *Inst = &*BBI++;
1098 size_t CurInstNumber = InstrIndex++;
1099 InstrOrdering.insert(std::make_pair(Inst, CurInstNumber));
1100 if (Inst->mayThrow()) {
1101 LastThrowingInstIndex = CurInstNumber;
1102 continue;
1105 // Check to see if Inst writes to memory. If not, continue.
1106 if (!hasAnalyzableMemoryWrite(Inst, *TLI))
1107 continue;
1109 // eliminateNoopStore will update in iterator, if necessary.
1110 if (eliminateNoopStore(Inst, BBI, AA, MD, DL, TLI, IOL, &InstrOrdering)) {
1111 MadeChange = true;
1112 continue;
1115 // If we find something that writes memory, get its memory dependence.
1116 MemDepResult InstDep = MD->getDependency(Inst);
1118 // Ignore any store where we can't find a local dependence.
1119 // FIXME: cross-block DSE would be fun. :)
1120 if (!InstDep.isDef() && !InstDep.isClobber())
1121 continue;
1123 // Figure out what location is being stored to.
1124 MemoryLocation Loc = getLocForWrite(Inst);
1126 // If we didn't get a useful location, fail.
1127 if (!Loc.Ptr)
1128 continue;
1130 // Loop until we find a store we can eliminate or a load that
1131 // invalidates the analysis. Without an upper bound on the number of
1132 // instructions examined, this analysis can become very time-consuming.
1133 // However, the potential gain diminishes as we process more instructions
1134 // without eliminating any of them. Therefore, we limit the number of
1135 // instructions we look at.
1136 auto Limit = MD->getDefaultBlockScanLimit();
1137 while (InstDep.isDef() || InstDep.isClobber()) {
1138 // Get the memory clobbered by the instruction we depend on. MemDep will
1139 // skip any instructions that 'Loc' clearly doesn't interact with. If we
1140 // end up depending on a may- or must-aliased load, then we can't optimize
1141 // away the store and we bail out. However, if we depend on something
1142 // that overwrites the memory location we *can* potentially optimize it.
1144 // Find out what memory location the dependent instruction stores.
1145 Instruction *DepWrite = InstDep.getInst();
1146 if (!hasAnalyzableMemoryWrite(DepWrite, *TLI))
1147 break;
1148 MemoryLocation DepLoc = getLocForWrite(DepWrite);
1149 // If we didn't get a useful location, or if it isn't a size, bail out.
1150 if (!DepLoc.Ptr)
1151 break;
1153 // Make sure we don't look past a call which might throw. This is an
1154 // issue because MemoryDependenceAnalysis works in the wrong direction:
1155 // it finds instructions which dominate the current instruction, rather than
1156 // instructions which are post-dominated by the current instruction.
1158 // If the underlying object is a non-escaping memory allocation, any store
1159 // to it is dead along the unwind edge. Otherwise, we need to preserve
1160 // the store.
1161 size_t DepIndex = InstrOrdering.lookup(DepWrite);
1162 assert(DepIndex && "Unexpected instruction");
1163 if (DepIndex <= LastThrowingInstIndex) {
1164 const Value* Underlying = GetUnderlyingObject(DepLoc.Ptr, DL);
1165 bool IsStoreDeadOnUnwind = isa<AllocaInst>(Underlying);
1166 if (!IsStoreDeadOnUnwind) {
1167 // We're looking for a call to an allocation function
1168 // where the allocation doesn't escape before the last
1169 // throwing instruction; PointerMayBeCaptured
1170 // reasonably fast approximation.
1171 IsStoreDeadOnUnwind = isAllocLikeFn(Underlying, TLI) &&
1172 !PointerMayBeCaptured(Underlying, false, true);
1174 if (!IsStoreDeadOnUnwind)
1175 break;
1178 // If we find a write that is a) removable (i.e., non-volatile), b) is
1179 // completely obliterated by the store to 'Loc', and c) which we know that
1180 // 'Inst' doesn't load from, then we can remove it.
1181 // Also try to merge two stores if a later one only touches memory written
1182 // to by the earlier one.
1183 if (isRemovable(DepWrite) &&
1184 !isPossibleSelfRead(Inst, Loc, DepWrite, *TLI, *AA)) {
1185 int64_t InstWriteOffset, DepWriteOffset;
1186 OverwriteResult OR = isOverwrite(Loc, DepLoc, DL, *TLI, DepWriteOffset,
1187 InstWriteOffset, DepWrite, IOL, *AA,
1188 BB.getParent());
1189 if (OR == OW_Complete) {
1190 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: " << *DepWrite
1191 << "\n KILLER: " << *Inst << '\n');
1193 // Delete the store and now-dead instructions that feed it.
1194 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1195 ++NumFastStores;
1196 MadeChange = true;
1198 // We erased DepWrite; start over.
1199 InstDep = MD->getDependency(Inst);
1200 continue;
1201 } else if ((OR == OW_End && isShortenableAtTheEnd(DepWrite)) ||
1202 ((OR == OW_Begin &&
1203 isShortenableAtTheBeginning(DepWrite)))) {
1204 assert(!EnablePartialOverwriteTracking && "Do not expect to perform "
1205 "when partial-overwrite "
1206 "tracking is enabled");
1207 // The overwrite result is known, so these must be known, too.
1208 int64_t EarlierSize = DepLoc.Size.getValue();
1209 int64_t LaterSize = Loc.Size.getValue();
1210 bool IsOverwriteEnd = (OR == OW_End);
1211 MadeChange |= tryToShorten(DepWrite, DepWriteOffset, EarlierSize,
1212 InstWriteOffset, LaterSize, IsOverwriteEnd);
1213 } else if (EnablePartialStoreMerging &&
1214 OR == OW_PartialEarlierWithFullLater) {
1215 auto *Earlier = dyn_cast<StoreInst>(DepWrite);
1216 auto *Later = dyn_cast<StoreInst>(Inst);
1217 if (Earlier && isa<ConstantInt>(Earlier->getValueOperand()) &&
1218 Later && isa<ConstantInt>(Later->getValueOperand()) &&
1219 memoryIsNotModifiedBetween(Earlier, Later, AA)) {
1220 // If the store we find is:
1221 // a) partially overwritten by the store to 'Loc'
1222 // b) the later store is fully contained in the earlier one and
1223 // c) they both have a constant value
1224 // Merge the two stores, replacing the earlier store's value with a
1225 // merge of both values.
1226 // TODO: Deal with other constant types (vectors, etc), and probably
1227 // some mem intrinsics (if needed)
1229 APInt EarlierValue =
1230 cast<ConstantInt>(Earlier->getValueOperand())->getValue();
1231 APInt LaterValue =
1232 cast<ConstantInt>(Later->getValueOperand())->getValue();
1233 unsigned LaterBits = LaterValue.getBitWidth();
1234 assert(EarlierValue.getBitWidth() > LaterValue.getBitWidth());
1235 LaterValue = LaterValue.zext(EarlierValue.getBitWidth());
1237 // Offset of the smaller store inside the larger store
1238 unsigned BitOffsetDiff = (InstWriteOffset - DepWriteOffset) * 8;
1239 unsigned LShiftAmount =
1240 DL.isBigEndian()
1241 ? EarlierValue.getBitWidth() - BitOffsetDiff - LaterBits
1242 : BitOffsetDiff;
1243 APInt Mask =
1244 APInt::getBitsSet(EarlierValue.getBitWidth(), LShiftAmount,
1245 LShiftAmount + LaterBits);
1246 // Clear the bits we'll be replacing, then OR with the smaller
1247 // store, shifted appropriately.
1248 APInt Merged =
1249 (EarlierValue & ~Mask) | (LaterValue << LShiftAmount);
1250 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Earlier: " << *DepWrite
1251 << "\n Later: " << *Inst
1252 << "\n Merged Value: " << Merged << '\n');
1254 auto *SI = new StoreInst(
1255 ConstantInt::get(Earlier->getValueOperand()->getType(), Merged),
1256 Earlier->getPointerOperand(), false, Earlier->getAlignment(),
1257 Earlier->getOrdering(), Earlier->getSyncScopeID(), DepWrite);
1259 unsigned MDToKeep[] = {LLVMContext::MD_dbg, LLVMContext::MD_tbaa,
1260 LLVMContext::MD_alias_scope,
1261 LLVMContext::MD_noalias,
1262 LLVMContext::MD_nontemporal};
1263 SI->copyMetadata(*DepWrite, MDToKeep);
1264 ++NumModifiedStores;
1266 // Remove earlier, wider, store
1267 size_t Idx = InstrOrdering.lookup(DepWrite);
1268 InstrOrdering.erase(DepWrite);
1269 InstrOrdering.insert(std::make_pair(SI, Idx));
1271 // Delete the old stores and now-dead instructions that feed them.
1272 deleteDeadInstruction(Inst, &BBI, *MD, *TLI, IOL, &InstrOrdering);
1273 deleteDeadInstruction(DepWrite, &BBI, *MD, *TLI, IOL,
1274 &InstrOrdering);
1275 MadeChange = true;
1277 // We erased DepWrite and Inst (Loc); start over.
1278 break;
1283 // If this is a may-aliased store that is clobbering the store value, we
1284 // can keep searching past it for another must-aliased pointer that stores
1285 // to the same location. For example, in:
1286 // store -> P
1287 // store -> Q
1288 // store -> P
1289 // we can remove the first store to P even though we don't know if P and Q
1290 // alias.
1291 if (DepWrite == &BB.front()) break;
1293 // Can't look past this instruction if it might read 'Loc'.
1294 if (isRefSet(AA->getModRefInfo(DepWrite, Loc)))
1295 break;
1297 InstDep = MD->getPointerDependencyFrom(Loc, /*isLoad=*/ false,
1298 DepWrite->getIterator(), &BB,
1299 /*QueryInst=*/ nullptr, &Limit);
1303 if (EnablePartialOverwriteTracking)
1304 MadeChange |= removePartiallyOverlappedStores(AA, DL, IOL);
1306 // If this block ends in a return, unwind, or unreachable, all allocas are
1307 // dead at its end, which means stores to them are also dead.
1308 if (BB.getTerminator()->getNumSuccessors() == 0)
1309 MadeChange |= handleEndBlock(BB, AA, MD, TLI, IOL, &InstrOrdering);
1311 return MadeChange;
1314 static bool eliminateDeadStores(Function &F, AliasAnalysis *AA,
1315 MemoryDependenceResults *MD, DominatorTree *DT,
1316 const TargetLibraryInfo *TLI) {
1317 bool MadeChange = false;
1318 for (BasicBlock &BB : F)
1319 // Only check non-dead blocks. Dead blocks may have strange pointer
1320 // cycles that will confuse alias analysis.
1321 if (DT->isReachableFromEntry(&BB))
1322 MadeChange |= eliminateDeadStores(BB, AA, MD, DT, TLI);
1324 return MadeChange;
1327 //===----------------------------------------------------------------------===//
1328 // DSE Pass
1329 //===----------------------------------------------------------------------===//
1330 PreservedAnalyses DSEPass::run(Function &F, FunctionAnalysisManager &AM) {
1331 AliasAnalysis *AA = &AM.getResult<AAManager>(F);
1332 DominatorTree *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1333 MemoryDependenceResults *MD = &AM.getResult<MemoryDependenceAnalysis>(F);
1334 const TargetLibraryInfo *TLI = &AM.getResult<TargetLibraryAnalysis>(F);
1336 if (!eliminateDeadStores(F, AA, MD, DT, TLI))
1337 return PreservedAnalyses::all();
1339 PreservedAnalyses PA;
1340 PA.preserveSet<CFGAnalyses>();
1341 PA.preserve<GlobalsAA>();
1342 PA.preserve<MemoryDependenceAnalysis>();
1343 return PA;
1346 namespace {
1348 /// A legacy pass for the legacy pass manager that wraps \c DSEPass.
1349 class DSELegacyPass : public FunctionPass {
1350 public:
1351 static char ID; // Pass identification, replacement for typeid
1353 DSELegacyPass() : FunctionPass(ID) {
1354 initializeDSELegacyPassPass(*PassRegistry::getPassRegistry());
1357 bool runOnFunction(Function &F) override {
1358 if (skipFunction(F))
1359 return false;
1361 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1362 AliasAnalysis *AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
1363 MemoryDependenceResults *MD =
1364 &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1365 const TargetLibraryInfo *TLI =
1366 &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI();
1368 return eliminateDeadStores(F, AA, MD, DT, TLI);
1371 void getAnalysisUsage(AnalysisUsage &AU) const override {
1372 AU.setPreservesCFG();
1373 AU.addRequired<DominatorTreeWrapperPass>();
1374 AU.addRequired<AAResultsWrapperPass>();
1375 AU.addRequired<MemoryDependenceWrapperPass>();
1376 AU.addRequired<TargetLibraryInfoWrapperPass>();
1377 AU.addPreserved<DominatorTreeWrapperPass>();
1378 AU.addPreserved<GlobalsAAWrapperPass>();
1379 AU.addPreserved<MemoryDependenceWrapperPass>();
1383 } // end anonymous namespace
1385 char DSELegacyPass::ID = 0;
1387 INITIALIZE_PASS_BEGIN(DSELegacyPass, "dse", "Dead Store Elimination", false,
1388 false)
1389 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1390 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
1391 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
1392 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
1393 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1394 INITIALIZE_PASS_END(DSELegacyPass, "dse", "Dead Store Elimination", false,
1395 false)
1397 FunctionPass *llvm::createDeadStoreEliminationPass() {
1398 return new DSELegacyPass();