1 //===- DeadStoreElimination.cpp - MemorySSA Backed Dead Store Elimination -===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // The code below implements dead store elimination using MemorySSA. It uses
10 // the following general approach: given a MemoryDef, walk upwards to find
11 // clobbering MemoryDefs that may be killed by the starting def. Then check
12 // that there are no uses that may read the location of the original MemoryDef
13 // in between both MemoryDefs. A bit more concretely:
15 // For all MemoryDefs StartDef:
16 // 1. Get the next dominating clobbering MemoryDef (MaybeDeadAccess) by walking
18 // 2. Check that there are no reads between MaybeDeadAccess and the StartDef by
19 // checking all uses starting at MaybeDeadAccess and walking until we see
21 // 3. For each found CurrentDef, check that:
22 // 1. There are no barrier instructions between CurrentDef and StartDef (like
23 // throws or stores with ordering constraints).
24 // 2. StartDef is executed whenever CurrentDef is executed.
25 // 3. StartDef completely overwrites CurrentDef.
26 // 4. Erase CurrentDef from the function and MemorySSA.
28 //===----------------------------------------------------------------------===//
30 #include "llvm/Transforms/Scalar/DeadStoreElimination.h"
31 #include "llvm/ADT/APInt.h"
32 #include "llvm/ADT/DenseMap.h"
33 #include "llvm/ADT/MapVector.h"
34 #include "llvm/ADT/PostOrderIterator.h"
35 #include "llvm/ADT/SetVector.h"
36 #include "llvm/ADT/SmallPtrSet.h"
37 #include "llvm/ADT/SmallVector.h"
38 #include "llvm/ADT/Statistic.h"
39 #include "llvm/ADT/StringRef.h"
40 #include "llvm/Analysis/AliasAnalysis.h"
41 #include "llvm/Analysis/CaptureTracking.h"
42 #include "llvm/Analysis/GlobalsModRef.h"
43 #include "llvm/Analysis/LoopInfo.h"
44 #include "llvm/Analysis/MemoryBuiltins.h"
45 #include "llvm/Analysis/MemoryLocation.h"
46 #include "llvm/Analysis/MemorySSA.h"
47 #include "llvm/Analysis/MemorySSAUpdater.h"
48 #include "llvm/Analysis/MustExecute.h"
49 #include "llvm/Analysis/PostDominators.h"
50 #include "llvm/Analysis/TargetLibraryInfo.h"
51 #include "llvm/Analysis/ValueTracking.h"
52 #include "llvm/IR/Argument.h"
53 #include "llvm/IR/BasicBlock.h"
54 #include "llvm/IR/Constant.h"
55 #include "llvm/IR/ConstantRangeList.h"
56 #include "llvm/IR/Constants.h"
57 #include "llvm/IR/DataLayout.h"
58 #include "llvm/IR/DebugInfo.h"
59 #include "llvm/IR/Dominators.h"
60 #include "llvm/IR/Function.h"
61 #include "llvm/IR/IRBuilder.h"
62 #include "llvm/IR/InstIterator.h"
63 #include "llvm/IR/InstrTypes.h"
64 #include "llvm/IR/Instruction.h"
65 #include "llvm/IR/Instructions.h"
66 #include "llvm/IR/IntrinsicInst.h"
67 #include "llvm/IR/Module.h"
68 #include "llvm/IR/PassManager.h"
69 #include "llvm/IR/PatternMatch.h"
70 #include "llvm/IR/Value.h"
71 #include "llvm/Support/Casting.h"
72 #include "llvm/Support/CommandLine.h"
73 #include "llvm/Support/Debug.h"
74 #include "llvm/Support/DebugCounter.h"
75 #include "llvm/Support/ErrorHandling.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
78 #include "llvm/Transforms/Utils/BuildLibCalls.h"
79 #include "llvm/Transforms/Utils/Local.h"
88 using namespace PatternMatch
;
90 #define DEBUG_TYPE "dse"
92 STATISTIC(NumRemainingStores
, "Number of stores remaining after DSE");
93 STATISTIC(NumRedundantStores
, "Number of redundant stores deleted");
94 STATISTIC(NumFastStores
, "Number of stores deleted");
95 STATISTIC(NumFastOther
, "Number of other instrs removed");
96 STATISTIC(NumCompletePartials
, "Number of stores dead by later partials");
97 STATISTIC(NumModifiedStores
, "Number of stores modified");
98 STATISTIC(NumCFGChecks
, "Number of stores modified");
99 STATISTIC(NumCFGTries
, "Number of stores modified");
100 STATISTIC(NumCFGSuccess
, "Number of stores modified");
101 STATISTIC(NumGetDomMemoryDefPassed
,
102 "Number of times a valid candidate is returned from getDomMemoryDef");
103 STATISTIC(NumDomMemDefChecks
,
104 "Number iterations check for reads in getDomMemoryDef");
106 DEBUG_COUNTER(MemorySSACounter
, "dse-memoryssa",
107 "Controls which MemoryDefs are eliminated.");
110 EnablePartialOverwriteTracking("enable-dse-partial-overwrite-tracking",
111 cl::init(true), cl::Hidden
,
112 cl::desc("Enable partial-overwrite tracking in DSE"));
115 EnablePartialStoreMerging("enable-dse-partial-store-merging",
116 cl::init(true), cl::Hidden
,
117 cl::desc("Enable partial store merging in DSE"));
119 static cl::opt
<unsigned>
120 MemorySSAScanLimit("dse-memoryssa-scanlimit", cl::init(150), cl::Hidden
,
121 cl::desc("The number of memory instructions to scan for "
122 "dead store elimination (default = 150)"));
123 static cl::opt
<unsigned> MemorySSAUpwardsStepLimit(
124 "dse-memoryssa-walklimit", cl::init(90), cl::Hidden
,
125 cl::desc("The maximum number of steps while walking upwards to find "
126 "MemoryDefs that may be killed (default = 90)"));
128 static cl::opt
<unsigned> MemorySSAPartialStoreLimit(
129 "dse-memoryssa-partial-store-limit", cl::init(5), cl::Hidden
,
130 cl::desc("The maximum number candidates that only partially overwrite the "
131 "killing MemoryDef to consider"
134 static cl::opt
<unsigned> MemorySSADefsPerBlockLimit(
135 "dse-memoryssa-defs-per-block-limit", cl::init(5000), cl::Hidden
,
136 cl::desc("The number of MemoryDefs we consider as candidates to eliminated "
137 "other stores per basic block (default = 5000)"));
139 static cl::opt
<unsigned> MemorySSASameBBStepCost(
140 "dse-memoryssa-samebb-cost", cl::init(1), cl::Hidden
,
142 "The cost of a step in the same basic block as the killing MemoryDef"
145 static cl::opt
<unsigned>
146 MemorySSAOtherBBStepCost("dse-memoryssa-otherbb-cost", cl::init(5),
148 cl::desc("The cost of a step in a different basic "
149 "block than the killing MemoryDef"
152 static cl::opt
<unsigned> MemorySSAPathCheckLimit(
153 "dse-memoryssa-path-check-limit", cl::init(50), cl::Hidden
,
154 cl::desc("The maximum number of blocks to check when trying to prove that "
155 "all paths to an exit go through a killing block (default = 50)"));
157 // This flags allows or disallows DSE to optimize MemorySSA during its
158 // traversal. Note that DSE optimizing MemorySSA may impact other passes
159 // downstream of the DSE invocation and can lead to issues not being
160 // reproducible in isolation (i.e. when MemorySSA is built from scratch). In
161 // those cases, the flag can be used to check if DSE's MemorySSA optimizations
162 // impact follow-up passes.
164 OptimizeMemorySSA("dse-optimize-memoryssa", cl::init(true), cl::Hidden
,
165 cl::desc("Allow DSE to optimize memory accesses."));
167 // TODO: turn on and remove this flag.
168 static cl::opt
<bool> EnableInitializesImprovement(
169 "enable-dse-initializes-attr-improvement", cl::init(false), cl::Hidden
,
170 cl::desc("Enable the initializes attr improvement in DSE"));
172 //===----------------------------------------------------------------------===//
174 //===----------------------------------------------------------------------===//
175 using OverlapIntervalsTy
= std::map
<int64_t, int64_t>;
176 using InstOverlapIntervalsTy
= DenseMap
<Instruction
*, OverlapIntervalsTy
>;
178 /// Returns true if the end of this instruction can be safely shortened in
180 static bool isShortenableAtTheEnd(Instruction
*I
) {
181 // Don't shorten stores for now
182 if (isa
<StoreInst
>(I
))
185 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
186 switch (II
->getIntrinsicID()) {
187 default: return false;
188 case Intrinsic::memset
:
189 case Intrinsic::memcpy
:
190 case Intrinsic::memcpy_element_unordered_atomic
:
191 case Intrinsic::memset_element_unordered_atomic
:
192 // Do shorten memory intrinsics.
193 // FIXME: Add memmove if it's also safe to transform.
198 // Don't shorten libcalls calls for now.
203 /// Returns true if the beginning of this instruction can be safely shortened
205 static bool isShortenableAtTheBeginning(Instruction
*I
) {
206 // FIXME: Handle only memset for now. Supporting memcpy/memmove should be
207 // easily done by offsetting the source address.
208 return isa
<AnyMemSetInst
>(I
);
211 static std::optional
<TypeSize
> getPointerSize(const Value
*V
,
212 const DataLayout
&DL
,
213 const TargetLibraryInfo
&TLI
,
217 Opts
.NullIsUnknownSize
= NullPointerIsDefined(F
);
219 if (getObjectSize(V
, Size
, DL
, &TLI
, Opts
))
220 return TypeSize::getFixed(Size
);
226 enum OverwriteResult
{
230 OW_PartialEarlierWithFullLater
,
236 } // end anonymous namespace
238 /// Check if two instruction are masked stores that completely
239 /// overwrite one another. More specifically, \p KillingI has to
240 /// overwrite \p DeadI.
241 static OverwriteResult
isMaskedStoreOverwrite(const Instruction
*KillingI
,
242 const Instruction
*DeadI
,
243 BatchAAResults
&AA
) {
244 const auto *KillingII
= dyn_cast
<IntrinsicInst
>(KillingI
);
245 const auto *DeadII
= dyn_cast
<IntrinsicInst
>(DeadI
);
246 if (KillingII
== nullptr || DeadII
== nullptr)
248 if (KillingII
->getIntrinsicID() != DeadII
->getIntrinsicID())
250 if (KillingII
->getIntrinsicID() == Intrinsic::masked_store
) {
252 VectorType
*KillingTy
=
253 cast
<VectorType
>(KillingII
->getArgOperand(0)->getType());
254 VectorType
*DeadTy
= cast
<VectorType
>(DeadII
->getArgOperand(0)->getType());
255 if (KillingTy
->getScalarSizeInBits() != DeadTy
->getScalarSizeInBits())
258 if (KillingTy
->getElementCount() != DeadTy
->getElementCount())
261 Value
*KillingPtr
= KillingII
->getArgOperand(1)->stripPointerCasts();
262 Value
*DeadPtr
= DeadII
->getArgOperand(1)->stripPointerCasts();
263 if (KillingPtr
!= DeadPtr
&& !AA
.isMustAlias(KillingPtr
, DeadPtr
))
266 // TODO: check that KillingII's mask is a superset of the DeadII's mask.
267 if (KillingII
->getArgOperand(3) != DeadII
->getArgOperand(3))
274 /// Return 'OW_Complete' if a store to the 'KillingLoc' location completely
275 /// overwrites a store to the 'DeadLoc' location, 'OW_End' if the end of the
276 /// 'DeadLoc' location is completely overwritten by 'KillingLoc', 'OW_Begin'
277 /// if the beginning of the 'DeadLoc' location is overwritten by 'KillingLoc'.
278 /// 'OW_PartialEarlierWithFullLater' means that a dead (big) store was
279 /// overwritten by a killing (smaller) store which doesn't write outside the big
280 /// store's memory locations. Returns 'OW_Unknown' if nothing can be determined.
281 /// NOTE: This function must only be called if both \p KillingLoc and \p
282 /// DeadLoc belong to the same underlying object with valid \p KillingOff and
284 static OverwriteResult
isPartialOverwrite(const MemoryLocation
&KillingLoc
,
285 const MemoryLocation
&DeadLoc
,
286 int64_t KillingOff
, int64_t DeadOff
,
288 InstOverlapIntervalsTy
&IOL
) {
289 const uint64_t KillingSize
= KillingLoc
.Size
.getValue();
290 const uint64_t DeadSize
= DeadLoc
.Size
.getValue();
291 // We may now overlap, although the overlap is not complete. There might also
292 // be other incomplete overlaps, and together, they might cover the complete
294 // Note: The correctness of this logic depends on the fact that this function
295 // is not even called providing DepWrite when there are any intervening reads.
296 if (EnablePartialOverwriteTracking
&&
297 KillingOff
< int64_t(DeadOff
+ DeadSize
) &&
298 int64_t(KillingOff
+ KillingSize
) >= DeadOff
) {
300 // Insert our part of the overlap into the map.
301 auto &IM
= IOL
[DeadI
];
302 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite: DeadLoc [" << DeadOff
<< ", "
303 << int64_t(DeadOff
+ DeadSize
) << ") KillingLoc ["
304 << KillingOff
<< ", " << int64_t(KillingOff
+ KillingSize
)
307 // Make sure that we only insert non-overlapping intervals and combine
308 // adjacent intervals. The intervals are stored in the map with the ending
309 // offset as the key (in the half-open sense) and the starting offset as
311 int64_t KillingIntStart
= KillingOff
;
312 int64_t KillingIntEnd
= KillingOff
+ KillingSize
;
314 // Find any intervals ending at, or after, KillingIntStart which start
315 // before KillingIntEnd.
316 auto ILI
= IM
.lower_bound(KillingIntStart
);
317 if (ILI
!= IM
.end() && ILI
->second
<= KillingIntEnd
) {
318 // This existing interval is overlapped with the current store somewhere
319 // in [KillingIntStart, KillingIntEnd]. Merge them by erasing the existing
320 // intervals and adjusting our start and end.
321 KillingIntStart
= std::min(KillingIntStart
, ILI
->second
);
322 KillingIntEnd
= std::max(KillingIntEnd
, ILI
->first
);
325 // Continue erasing and adjusting our end in case other previous
326 // intervals are also overlapped with the current store.
328 // |--- dead 1 ---| |--- dead 2 ---|
329 // |------- killing---------|
331 while (ILI
!= IM
.end() && ILI
->second
<= KillingIntEnd
) {
332 assert(ILI
->second
> KillingIntStart
&& "Unexpected interval");
333 KillingIntEnd
= std::max(KillingIntEnd
, ILI
->first
);
338 IM
[KillingIntEnd
] = KillingIntStart
;
341 if (ILI
->second
<= DeadOff
&& ILI
->first
>= int64_t(DeadOff
+ DeadSize
)) {
342 LLVM_DEBUG(dbgs() << "DSE: Full overwrite from partials: DeadLoc ["
343 << DeadOff
<< ", " << int64_t(DeadOff
+ DeadSize
)
344 << ") Composite KillingLoc [" << ILI
->second
<< ", "
345 << ILI
->first
<< ")\n");
346 ++NumCompletePartials
;
351 // Check for a dead store which writes to all the memory locations that
352 // the killing store writes to.
353 if (EnablePartialStoreMerging
&& KillingOff
>= DeadOff
&&
354 int64_t(DeadOff
+ DeadSize
) > KillingOff
&&
355 uint64_t(KillingOff
- DeadOff
) + KillingSize
<= DeadSize
) {
356 LLVM_DEBUG(dbgs() << "DSE: Partial overwrite a dead load [" << DeadOff
357 << ", " << int64_t(DeadOff
+ DeadSize
)
358 << ") by a killing store [" << KillingOff
<< ", "
359 << int64_t(KillingOff
+ KillingSize
) << ")\n");
360 // TODO: Maybe come up with a better name?
361 return OW_PartialEarlierWithFullLater
;
364 // Another interesting case is if the killing store overwrites the end of the
370 // In this case we may want to trim the size of dead store to avoid
371 // generating stores to addresses which will definitely be overwritten killing
373 if (!EnablePartialOverwriteTracking
&&
374 (KillingOff
> DeadOff
&& KillingOff
< int64_t(DeadOff
+ DeadSize
) &&
375 int64_t(KillingOff
+ KillingSize
) >= int64_t(DeadOff
+ DeadSize
)))
378 // Finally, we also need to check if the killing store overwrites the
379 // beginning of the dead store.
384 // In this case we may want to move the destination address and trim the size
385 // of dead store to avoid generating stores to addresses which will definitely
386 // be overwritten killing store.
387 if (!EnablePartialOverwriteTracking
&&
388 (KillingOff
<= DeadOff
&& int64_t(KillingOff
+ KillingSize
) > DeadOff
)) {
389 assert(int64_t(KillingOff
+ KillingSize
) < int64_t(DeadOff
+ DeadSize
) &&
390 "Expect to be handled as OW_Complete");
393 // Otherwise, they don't completely overlap.
397 /// Returns true if the memory which is accessed by the second instruction is not
398 /// modified between the first and the second instruction.
399 /// Precondition: Second instruction must be dominated by the first
402 memoryIsNotModifiedBetween(Instruction
*FirstI
, Instruction
*SecondI
,
403 BatchAAResults
&AA
, const DataLayout
&DL
,
405 // Do a backwards scan through the CFG from SecondI to FirstI. Look for
406 // instructions which can modify the memory location accessed by SecondI.
408 // While doing the walk keep track of the address to check. It might be
409 // different in different basic blocks due to PHI translation.
410 using BlockAddressPair
= std::pair
<BasicBlock
*, PHITransAddr
>;
411 SmallVector
<BlockAddressPair
, 16> WorkList
;
412 // Keep track of the address we visited each block with. Bail out if we
413 // visit a block with different addresses.
414 DenseMap
<BasicBlock
*, Value
*> Visited
;
416 BasicBlock::iterator
FirstBBI(FirstI
);
418 BasicBlock::iterator
SecondBBI(SecondI
);
419 BasicBlock
*FirstBB
= FirstI
->getParent();
420 BasicBlock
*SecondBB
= SecondI
->getParent();
421 MemoryLocation MemLoc
;
422 if (auto *MemSet
= dyn_cast
<MemSetInst
>(SecondI
))
423 MemLoc
= MemoryLocation::getForDest(MemSet
);
425 MemLoc
= MemoryLocation::get(SecondI
);
427 auto *MemLocPtr
= const_cast<Value
*>(MemLoc
.Ptr
);
429 // Start checking the SecondBB.
431 std::make_pair(SecondBB
, PHITransAddr(MemLocPtr
, DL
, nullptr)));
432 bool isFirstBlock
= true;
434 // Check all blocks going backward until we reach the FirstBB.
435 while (!WorkList
.empty()) {
436 BlockAddressPair Current
= WorkList
.pop_back_val();
437 BasicBlock
*B
= Current
.first
;
438 PHITransAddr
&Addr
= Current
.second
;
439 Value
*Ptr
= Addr
.getAddr();
441 // Ignore instructions before FirstI if this is the FirstBB.
442 BasicBlock::iterator BI
= (B
== FirstBB
? FirstBBI
: B
->begin());
444 BasicBlock::iterator EI
;
446 // Ignore instructions after SecondI if this is the first visit of SecondBB.
447 assert(B
== SecondBB
&& "first block is not the store block");
449 isFirstBlock
= false;
451 // It's not SecondBB or (in case of a loop) the second visit of SecondBB.
452 // In this case we also have to look at instructions after SecondI.
455 for (; BI
!= EI
; ++BI
) {
456 Instruction
*I
= &*BI
;
457 if (I
->mayWriteToMemory() && I
!= SecondI
)
458 if (isModSet(AA
.getModRefInfo(I
, MemLoc
.getWithNewPtr(Ptr
))))
462 assert(B
!= &FirstBB
->getParent()->getEntryBlock() &&
463 "Should not hit the entry block because SI must be dominated by LI");
464 for (BasicBlock
*Pred
: predecessors(B
)) {
465 PHITransAddr PredAddr
= Addr
;
466 if (PredAddr
.needsPHITranslationFromBlock(B
)) {
467 if (!PredAddr
.isPotentiallyPHITranslatable())
469 if (!PredAddr
.translateValue(B
, Pred
, DT
, false))
472 Value
*TranslatedPtr
= PredAddr
.getAddr();
473 auto Inserted
= Visited
.insert(std::make_pair(Pred
, TranslatedPtr
));
474 if (!Inserted
.second
) {
475 // We already visited this block before. If it was with a different
476 // address - bail out!
477 if (TranslatedPtr
!= Inserted
.first
->second
)
479 // ... otherwise just skip it.
482 WorkList
.push_back(std::make_pair(Pred
, PredAddr
));
489 static void shortenAssignment(Instruction
*Inst
, Value
*OriginalDest
,
490 uint64_t OldOffsetInBits
, uint64_t OldSizeInBits
,
491 uint64_t NewSizeInBits
, bool IsOverwriteEnd
) {
492 const DataLayout
&DL
= Inst
->getDataLayout();
493 uint64_t DeadSliceSizeInBits
= OldSizeInBits
- NewSizeInBits
;
494 uint64_t DeadSliceOffsetInBits
=
495 OldOffsetInBits
+ (IsOverwriteEnd
? NewSizeInBits
: 0);
496 auto SetDeadFragExpr
= [](auto *Assign
,
497 DIExpression::FragmentInfo DeadFragment
) {
498 // createFragmentExpression expects an offset relative to the existing
499 // fragment offset if there is one.
500 uint64_t RelativeOffset
= DeadFragment
.OffsetInBits
-
501 Assign
->getExpression()
503 .value_or(DIExpression::FragmentInfo(0, 0))
505 if (auto NewExpr
= DIExpression::createFragmentExpression(
506 Assign
->getExpression(), RelativeOffset
, DeadFragment
.SizeInBits
)) {
507 Assign
->setExpression(*NewExpr
);
510 // Failed to create a fragment expression for this so discard the value,
511 // making this a kill location.
512 auto *Expr
= *DIExpression::createFragmentExpression(
513 DIExpression::get(Assign
->getContext(), {}), DeadFragment
.OffsetInBits
,
514 DeadFragment
.SizeInBits
);
515 Assign
->setExpression(Expr
);
516 Assign
->setKillLocation();
519 // A DIAssignID to use so that the inserted dbg.assign intrinsics do not
520 // link to any instructions. Created in the loop below (once).
521 DIAssignID
*LinkToNothing
= nullptr;
522 LLVMContext
&Ctx
= Inst
->getContext();
523 auto GetDeadLink
= [&Ctx
, &LinkToNothing
]() {
525 LinkToNothing
= DIAssignID::getDistinct(Ctx
);
526 return LinkToNothing
;
529 // Insert an unlinked dbg.assign intrinsic for the dead fragment after each
530 // overlapping dbg.assign intrinsic. The loop invalidates the iterators
531 // returned by getAssignmentMarkers so save a copy of the markers to iterate
533 auto LinkedRange
= at::getAssignmentMarkers(Inst
);
534 SmallVector
<DbgVariableRecord
*> LinkedDVRAssigns
=
535 at::getDVRAssignmentMarkers(Inst
);
536 SmallVector
<DbgAssignIntrinsic
*> Linked(LinkedRange
.begin(),
538 auto InsertAssignForOverlap
= [&](auto *Assign
) {
539 std::optional
<DIExpression::FragmentInfo
> NewFragment
;
540 if (!at::calculateFragmentIntersect(DL
, OriginalDest
, DeadSliceOffsetInBits
,
541 DeadSliceSizeInBits
, Assign
,
544 // We couldn't calculate the intersecting fragment for some reason. Be
545 // cautious and unlink the whole assignment from the store.
546 Assign
->setKillAddress();
547 Assign
->setAssignId(GetDeadLink());
551 if (NewFragment
->SizeInBits
== 0)
554 // Fragments overlap: insert a new dbg.assign for this dead part.
555 auto *NewAssign
= static_cast<decltype(Assign
)>(Assign
->clone());
556 NewAssign
->insertAfter(Assign
);
557 NewAssign
->setAssignId(GetDeadLink());
559 SetDeadFragExpr(NewAssign
, *NewFragment
);
560 NewAssign
->setKillAddress();
562 for_each(Linked
, InsertAssignForOverlap
);
563 for_each(LinkedDVRAssigns
, InsertAssignForOverlap
);
566 static bool tryToShorten(Instruction
*DeadI
, int64_t &DeadStart
,
567 uint64_t &DeadSize
, int64_t KillingStart
,
568 uint64_t KillingSize
, bool IsOverwriteEnd
) {
569 auto *DeadIntrinsic
= cast
<AnyMemIntrinsic
>(DeadI
);
570 Align PrefAlign
= DeadIntrinsic
->getDestAlign().valueOrOne();
572 // We assume that memet/memcpy operates in chunks of the "largest" native
573 // type size and aligned on the same value. That means optimal start and size
574 // of memset/memcpy should be modulo of preferred alignment of that type. That
575 // is it there is no any sense in trying to reduce store size any further
576 // since any "extra" stores comes for free anyway.
577 // On the other hand, maximum alignment we can achieve is limited by alignment
580 // TODO: Limit maximum alignment by preferred (or abi?) alignment of the
581 // "largest" native type.
582 // Note: What is the proper way to get that value?
583 // Should TargetTransformInfo::getRegisterBitWidth be used or anything else?
584 // PrefAlign = std::min(DL.getPrefTypeAlign(LargestType), PrefAlign);
586 int64_t ToRemoveStart
= 0;
587 uint64_t ToRemoveSize
= 0;
588 // Compute start and size of the region to remove. Make sure 'PrefAlign' is
589 // maintained on the remaining store.
590 if (IsOverwriteEnd
) {
591 // Calculate required adjustment for 'KillingStart' in order to keep
592 // remaining store size aligned on 'PerfAlign'.
594 offsetToAlignment(uint64_t(KillingStart
- DeadStart
), PrefAlign
);
595 ToRemoveStart
= KillingStart
+ Off
;
596 if (DeadSize
<= uint64_t(ToRemoveStart
- DeadStart
))
598 ToRemoveSize
= DeadSize
- uint64_t(ToRemoveStart
- DeadStart
);
600 ToRemoveStart
= DeadStart
;
601 assert(KillingSize
>= uint64_t(DeadStart
- KillingStart
) &&
602 "Not overlapping accesses?");
603 ToRemoveSize
= KillingSize
- uint64_t(DeadStart
- KillingStart
);
604 // Calculate required adjustment for 'ToRemoveSize'in order to keep
605 // start of the remaining store aligned on 'PerfAlign'.
606 uint64_t Off
= offsetToAlignment(ToRemoveSize
, PrefAlign
);
608 if (ToRemoveSize
<= (PrefAlign
.value() - Off
))
610 ToRemoveSize
-= PrefAlign
.value() - Off
;
612 assert(isAligned(PrefAlign
, ToRemoveSize
) &&
613 "Should preserve selected alignment");
616 assert(ToRemoveSize
> 0 && "Shouldn't reach here if nothing to remove");
617 assert(DeadSize
> ToRemoveSize
&& "Can't remove more than original size");
619 uint64_t NewSize
= DeadSize
- ToRemoveSize
;
620 if (auto *AMI
= dyn_cast
<AtomicMemIntrinsic
>(DeadI
)) {
621 // When shortening an atomic memory intrinsic, the newly shortened
622 // length must remain an integer multiple of the element size.
623 const uint32_t ElementSize
= AMI
->getElementSizeInBytes();
624 if (0 != NewSize
% ElementSize
)
628 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n OW "
629 << (IsOverwriteEnd
? "END" : "BEGIN") << ": " << *DeadI
630 << "\n KILLER [" << ToRemoveStart
<< ", "
631 << int64_t(ToRemoveStart
+ ToRemoveSize
) << ")\n");
633 Value
*DeadWriteLength
= DeadIntrinsic
->getLength();
634 Value
*TrimmedLength
= ConstantInt::get(DeadWriteLength
->getType(), NewSize
);
635 DeadIntrinsic
->setLength(TrimmedLength
);
636 DeadIntrinsic
->setDestAlignment(PrefAlign
);
638 Value
*OrigDest
= DeadIntrinsic
->getRawDest();
639 if (!IsOverwriteEnd
) {
640 Value
*Indices
[1] = {
641 ConstantInt::get(DeadWriteLength
->getType(), ToRemoveSize
)};
642 Instruction
*NewDestGEP
= GetElementPtrInst::CreateInBounds(
643 Type::getInt8Ty(DeadIntrinsic
->getContext()), OrigDest
, Indices
, "",
644 DeadI
->getIterator());
645 NewDestGEP
->setDebugLoc(DeadIntrinsic
->getDebugLoc());
646 DeadIntrinsic
->setDest(NewDestGEP
);
649 // Update attached dbg.assign intrinsics. Assume 8-bit byte.
650 shortenAssignment(DeadI
, OrigDest
, DeadStart
* 8, DeadSize
* 8, NewSize
* 8,
653 // Finally update start and size of dead access.
655 DeadStart
+= ToRemoveSize
;
661 static bool tryToShortenEnd(Instruction
*DeadI
, OverlapIntervalsTy
&IntervalMap
,
662 int64_t &DeadStart
, uint64_t &DeadSize
) {
663 if (IntervalMap
.empty() || !isShortenableAtTheEnd(DeadI
))
666 OverlapIntervalsTy::iterator OII
= --IntervalMap
.end();
667 int64_t KillingStart
= OII
->second
;
668 uint64_t KillingSize
= OII
->first
- KillingStart
;
670 assert(OII
->first
- KillingStart
>= 0 && "Size expected to be positive");
672 if (KillingStart
> DeadStart
&&
673 // Note: "KillingStart - KillingStart" is known to be positive due to
675 (uint64_t)(KillingStart
- DeadStart
) < DeadSize
&&
676 // Note: "DeadSize - (uint64_t)(KillingStart - DeadStart)" is known to
677 // be non negative due to preceding checks.
678 KillingSize
>= DeadSize
- (uint64_t)(KillingStart
- DeadStart
)) {
679 if (tryToShorten(DeadI
, DeadStart
, DeadSize
, KillingStart
, KillingSize
,
681 IntervalMap
.erase(OII
);
688 static bool tryToShortenBegin(Instruction
*DeadI
,
689 OverlapIntervalsTy
&IntervalMap
,
690 int64_t &DeadStart
, uint64_t &DeadSize
) {
691 if (IntervalMap
.empty() || !isShortenableAtTheBeginning(DeadI
))
694 OverlapIntervalsTy::iterator OII
= IntervalMap
.begin();
695 int64_t KillingStart
= OII
->second
;
696 uint64_t KillingSize
= OII
->first
- KillingStart
;
698 assert(OII
->first
- KillingStart
>= 0 && "Size expected to be positive");
700 if (KillingStart
<= DeadStart
&&
701 // Note: "DeadStart - KillingStart" is known to be non negative due to
703 KillingSize
> (uint64_t)(DeadStart
- KillingStart
)) {
704 // Note: "KillingSize - (uint64_t)(DeadStart - DeadStart)" is known to
705 // be positive due to preceding checks.
706 assert(KillingSize
- (uint64_t)(DeadStart
- KillingStart
) < DeadSize
&&
707 "Should have been handled as OW_Complete");
708 if (tryToShorten(DeadI
, DeadStart
, DeadSize
, KillingStart
, KillingSize
,
710 IntervalMap
.erase(OII
);
718 tryToMergePartialOverlappingStores(StoreInst
*KillingI
, StoreInst
*DeadI
,
719 int64_t KillingOffset
, int64_t DeadOffset
,
720 const DataLayout
&DL
, BatchAAResults
&AA
,
723 if (DeadI
&& isa
<ConstantInt
>(DeadI
->getValueOperand()) &&
724 DL
.typeSizeEqualsStoreSize(DeadI
->getValueOperand()->getType()) &&
725 KillingI
&& isa
<ConstantInt
>(KillingI
->getValueOperand()) &&
726 DL
.typeSizeEqualsStoreSize(KillingI
->getValueOperand()->getType()) &&
727 memoryIsNotModifiedBetween(DeadI
, KillingI
, AA
, DL
, DT
)) {
728 // If the store we find is:
729 // a) partially overwritten by the store to 'Loc'
730 // b) the killing store is fully contained in the dead one and
731 // c) they both have a constant value
732 // d) none of the two stores need padding
733 // Merge the two stores, replacing the dead store's value with a
734 // merge of both values.
735 // TODO: Deal with other constant types (vectors, etc), and probably
736 // some mem intrinsics (if needed)
738 APInt DeadValue
= cast
<ConstantInt
>(DeadI
->getValueOperand())->getValue();
740 cast
<ConstantInt
>(KillingI
->getValueOperand())->getValue();
741 unsigned KillingBits
= KillingValue
.getBitWidth();
742 assert(DeadValue
.getBitWidth() > KillingValue
.getBitWidth());
743 KillingValue
= KillingValue
.zext(DeadValue
.getBitWidth());
745 // Offset of the smaller store inside the larger store
746 unsigned BitOffsetDiff
= (KillingOffset
- DeadOffset
) * 8;
747 unsigned LShiftAmount
=
748 DL
.isBigEndian() ? DeadValue
.getBitWidth() - BitOffsetDiff
- KillingBits
750 APInt Mask
= APInt::getBitsSet(DeadValue
.getBitWidth(), LShiftAmount
,
751 LShiftAmount
+ KillingBits
);
752 // Clear the bits we'll be replacing, then OR with the smaller
753 // store, shifted appropriately.
754 APInt Merged
= (DeadValue
& ~Mask
) | (KillingValue
<< LShiftAmount
);
755 LLVM_DEBUG(dbgs() << "DSE: Merge Stores:\n Dead: " << *DeadI
756 << "\n Killing: " << *KillingI
757 << "\n Merged Value: " << Merged
<< '\n');
758 return ConstantInt::get(DeadI
->getValueOperand()->getType(), Merged
);
764 // Returns true if \p I is an intrinsic that does not read or write memory.
765 bool isNoopIntrinsic(Instruction
*I
) {
766 if (const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
767 switch (II
->getIntrinsicID()) {
768 case Intrinsic::lifetime_start
:
769 case Intrinsic::lifetime_end
:
770 case Intrinsic::invariant_end
:
771 case Intrinsic::launder_invariant_group
:
772 case Intrinsic::assume
:
774 case Intrinsic::dbg_declare
:
775 case Intrinsic::dbg_label
:
776 case Intrinsic::dbg_value
:
777 llvm_unreachable("Intrinsic should not be modeled in MemorySSA");
785 // Check if we can ignore \p D for DSE.
786 bool canSkipDef(MemoryDef
*D
, bool DefVisibleToCaller
) {
787 Instruction
*DI
= D
->getMemoryInst();
788 // Calls that only access inaccessible memory cannot read or write any memory
789 // locations we consider for elimination.
790 if (auto *CB
= dyn_cast
<CallBase
>(DI
))
791 if (CB
->onlyAccessesInaccessibleMemory())
794 // We can eliminate stores to locations not visible to the caller across
795 // throwing instructions.
796 if (DI
->mayThrow() && !DefVisibleToCaller
)
799 // We can remove the dead stores, irrespective of the fence and its ordering
800 // (release/acquire/seq_cst). Fences only constraints the ordering of
801 // already visible stores, it does not make a store visible to other
802 // threads. So, skipping over a fence does not change a store from being
804 if (isa
<FenceInst
>(DI
))
807 // Skip intrinsics that do not really read or modify memory.
808 if (isNoopIntrinsic(DI
))
814 // A memory location wrapper that represents a MemoryLocation, `MemLoc`,
815 // defined by `MemDef`.
816 struct MemoryLocationWrapper
{
817 MemoryLocationWrapper(MemoryLocation MemLoc
, MemoryDef
*MemDef
,
818 bool DefByInitializesAttr
)
819 : MemLoc(MemLoc
), MemDef(MemDef
),
820 DefByInitializesAttr(DefByInitializesAttr
) {
821 assert(MemLoc
.Ptr
&& "MemLoc should be not null");
822 UnderlyingObject
= getUnderlyingObject(MemLoc
.Ptr
);
823 DefInst
= MemDef
->getMemoryInst();
826 MemoryLocation MemLoc
;
827 const Value
*UnderlyingObject
;
829 Instruction
*DefInst
;
830 bool DefByInitializesAttr
= false;
833 // A memory def wrapper that represents a MemoryDef and the MemoryLocation(s)
834 // defined by this MemoryDef.
835 struct MemoryDefWrapper
{
836 MemoryDefWrapper(MemoryDef
*MemDef
,
837 ArrayRef
<std::pair
<MemoryLocation
, bool>> MemLocations
) {
838 DefInst
= MemDef
->getMemoryInst();
839 for (auto &[MemLoc
, DefByInitializesAttr
] : MemLocations
)
840 DefinedLocations
.push_back(
841 MemoryLocationWrapper(MemLoc
, MemDef
, DefByInitializesAttr
));
843 Instruction
*DefInst
;
844 SmallVector
<MemoryLocationWrapper
, 1> DefinedLocations
;
847 bool hasInitializesAttr(Instruction
*I
) {
848 CallBase
*CB
= dyn_cast
<CallBase
>(I
);
849 return CB
&& CB
->getArgOperandWithAttribute(Attribute::Initializes
);
852 struct ArgumentInitInfo
{
854 bool IsDeadOrInvisibleOnUnwind
;
855 ConstantRangeList Inits
;
858 // Return the intersected range list of the initializes attributes of "Args".
859 // "Args" are call arguments that alias to each other.
860 // If any argument in "Args" doesn't have dead_on_unwind attr and
861 // "CallHasNoUnwindAttr" is false, return empty.
862 ConstantRangeList
getIntersectedInitRangeList(ArrayRef
<ArgumentInitInfo
> Args
,
863 bool CallHasNoUnwindAttr
) {
867 // To address unwind, the function should have nounwind attribute or the
868 // arguments have dead or invisible on unwind. Otherwise, return empty.
869 for (const auto &Arg
: Args
) {
870 if (!CallHasNoUnwindAttr
&& !Arg
.IsDeadOrInvisibleOnUnwind
)
872 if (Arg
.Inits
.empty())
876 ConstantRangeList IntersectedIntervals
= Args
.front().Inits
;
877 for (auto &Arg
: Args
.drop_front())
878 IntersectedIntervals
= IntersectedIntervals
.intersectWith(Arg
.Inits
);
880 return IntersectedIntervals
;
886 EarliestEscapeAnalysis EA
;
888 /// The single BatchAA instance that is used to cache AA queries. It will
889 /// not be invalidated over the whole run. This is safe, because:
890 /// 1. Only memory writes are removed, so the alias cache for memory
891 /// locations remains valid.
892 /// 2. No new instructions are added (only instructions removed), so cached
893 /// information for a deleted value cannot be accessed by a re-used new
895 BatchAAResults BatchAA
;
899 PostDominatorTree
&PDT
;
900 const TargetLibraryInfo
&TLI
;
901 const DataLayout
&DL
;
904 // Whether the function contains any irreducible control flow, useful for
905 // being accurately able to detect loops.
906 bool ContainsIrreducibleLoops
;
908 // All MemoryDefs that potentially could kill other MemDefs.
909 SmallVector
<MemoryDef
*, 64> MemDefs
;
910 // Any that should be skipped as they are already deleted
911 SmallPtrSet
<MemoryAccess
*, 4> SkipStores
;
912 // Keep track whether a given object is captured before return or not.
913 DenseMap
<const Value
*, bool> CapturedBeforeReturn
;
914 // Keep track of all of the objects that are invisible to the caller after
915 // the function returns.
916 DenseMap
<const Value
*, bool> InvisibleToCallerAfterRet
;
917 // Keep track of blocks with throwing instructions not modeled in MemorySSA.
918 SmallPtrSet
<BasicBlock
*, 16> ThrowingBlocks
;
919 // Post-order numbers for each basic block. Used to figure out if memory
920 // accesses are executed before another access.
921 DenseMap
<BasicBlock
*, unsigned> PostOrderNumbers
;
923 /// Keep track of instructions (partly) overlapping with killing MemoryDefs per
925 MapVector
<BasicBlock
*, InstOverlapIntervalsTy
> IOLs
;
926 // Check if there are root nodes that are terminated by UnreachableInst.
927 // Those roots pessimize post-dominance queries. If there are such roots,
928 // fall back to CFG scan starting from all non-unreachable roots.
929 bool AnyUnreachableExit
;
931 // Whether or not we should iterate on removing dead stores at the end of the
932 // function due to removing a store causing a previously captured pointer to
933 // no longer be captured.
934 bool ShouldIterateEndOfFunctionDSE
;
936 /// Dead instructions to be removed at the end of DSE.
937 SmallVector
<Instruction
*> ToRemove
;
939 // Class contains self-reference, make sure it's not copied/moved.
940 DSEState(const DSEState
&) = delete;
941 DSEState
&operator=(const DSEState
&) = delete;
943 DSEState(Function
&F
, AliasAnalysis
&AA
, MemorySSA
&MSSA
, DominatorTree
&DT
,
944 PostDominatorTree
&PDT
, const TargetLibraryInfo
&TLI
,
946 : F(F
), AA(AA
), EA(DT
, &LI
), BatchAA(AA
, &EA
), MSSA(MSSA
), DT(DT
),
947 PDT(PDT
), TLI(TLI
), DL(F
.getDataLayout()), LI(LI
) {
948 // Collect blocks with throwing instructions not modeled in MemorySSA and
949 // alloc-like objects.
951 for (BasicBlock
*BB
: post_order(&F
)) {
952 PostOrderNumbers
[BB
] = PO
++;
953 for (Instruction
&I
: *BB
) {
954 MemoryAccess
*MA
= MSSA
.getMemoryAccess(&I
);
955 if (I
.mayThrow() && !MA
)
956 ThrowingBlocks
.insert(I
.getParent());
958 auto *MD
= dyn_cast_or_null
<MemoryDef
>(MA
);
959 if (MD
&& MemDefs
.size() < MemorySSADefsPerBlockLimit
&&
960 (getLocForWrite(&I
) || isMemTerminatorInst(&I
) ||
961 (EnableInitializesImprovement
&& hasInitializesAttr(&I
))))
962 MemDefs
.push_back(MD
);
966 // Treat byval or inalloca arguments the same as Allocas, stores to them are
967 // dead at the end of the function.
968 for (Argument
&AI
: F
.args())
969 if (AI
.hasPassPointeeByValueCopyAttr())
970 InvisibleToCallerAfterRet
.insert({&AI
, true});
972 // Collect whether there is any irreducible control flow in the function.
973 ContainsIrreducibleLoops
= mayContainIrreducibleControl(F
, &LI
);
975 AnyUnreachableExit
= any_of(PDT
.roots(), [](const BasicBlock
*E
) {
976 return isa
<UnreachableInst
>(E
->getTerminator());
980 static void pushMemUses(MemoryAccess
*Acc
,
981 SmallVectorImpl
<MemoryAccess
*> &WorkList
,
982 SmallPtrSetImpl
<MemoryAccess
*> &Visited
) {
983 for (Use
&U
: Acc
->uses()) {
984 auto *MA
= cast
<MemoryAccess
>(U
.getUser());
985 if (Visited
.insert(MA
).second
)
986 WorkList
.push_back(MA
);
990 LocationSize
strengthenLocationSize(const Instruction
*I
,
991 LocationSize Size
) const {
992 if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
994 if (TLI
.getLibFunc(*CB
, F
) && TLI
.has(F
) &&
995 (F
== LibFunc_memset_chk
|| F
== LibFunc_memcpy_chk
)) {
996 // Use the precise location size specified by the 3rd argument
997 // for determining KillingI overwrites DeadLoc if it is a memset_chk
998 // instruction. memset_chk will write either the amount specified as 3rd
999 // argument or the function will immediately abort and exit the program.
1000 // NOTE: AA may determine NoAlias if it can prove that the access size
1001 // is larger than the allocation size due to that being UB. To avoid
1002 // returning potentially invalid NoAlias results by AA, limit the use of
1003 // the precise location size to isOverwrite.
1004 if (const auto *Len
= dyn_cast
<ConstantInt
>(CB
->getArgOperand(2)))
1005 return LocationSize::precise(Len
->getZExtValue());
1011 /// Return 'OW_Complete' if a store to the 'KillingLoc' location (by \p
1012 /// KillingI instruction) completely overwrites a store to the 'DeadLoc'
1013 /// location (by \p DeadI instruction).
1014 /// Return OW_MaybePartial if \p KillingI does not completely overwrite
1015 /// \p DeadI, but they both write to the same underlying object. In that
1016 /// case, use isPartialOverwrite to check if \p KillingI partially overwrites
1017 /// \p DeadI. Returns 'OR_None' if \p KillingI is known to not overwrite the
1018 /// \p DeadI. Returns 'OW_Unknown' if nothing can be determined.
1019 OverwriteResult
isOverwrite(const Instruction
*KillingI
,
1020 const Instruction
*DeadI
,
1021 const MemoryLocation
&KillingLoc
,
1022 const MemoryLocation
&DeadLoc
,
1023 int64_t &KillingOff
, int64_t &DeadOff
) {
1024 // AliasAnalysis does not always account for loops. Limit overwrite checks
1025 // to dependencies for which we can guarantee they are independent of any
1026 // loops they are in.
1027 if (!isGuaranteedLoopIndependent(DeadI
, KillingI
, DeadLoc
))
1030 LocationSize KillingLocSize
=
1031 strengthenLocationSize(KillingI
, KillingLoc
.Size
);
1032 const Value
*DeadPtr
= DeadLoc
.Ptr
->stripPointerCasts();
1033 const Value
*KillingPtr
= KillingLoc
.Ptr
->stripPointerCasts();
1034 const Value
*DeadUndObj
= getUnderlyingObject(DeadPtr
);
1035 const Value
*KillingUndObj
= getUnderlyingObject(KillingPtr
);
1037 // Check whether the killing store overwrites the whole object, in which
1038 // case the size/offset of the dead store does not matter.
1039 if (DeadUndObj
== KillingUndObj
&& KillingLocSize
.isPrecise() &&
1040 isIdentifiedObject(KillingUndObj
)) {
1041 std::optional
<TypeSize
> KillingUndObjSize
=
1042 getPointerSize(KillingUndObj
, DL
, TLI
, &F
);
1043 if (KillingUndObjSize
&& *KillingUndObjSize
== KillingLocSize
.getValue())
1047 // FIXME: Vet that this works for size upper-bounds. Seems unlikely that we'll
1048 // get imprecise values here, though (except for unknown sizes).
1049 if (!KillingLocSize
.isPrecise() || !DeadLoc
.Size
.isPrecise()) {
1050 // In case no constant size is known, try to an IR values for the number
1051 // of bytes written and check if they match.
1052 const auto *KillingMemI
= dyn_cast
<MemIntrinsic
>(KillingI
);
1053 const auto *DeadMemI
= dyn_cast
<MemIntrinsic
>(DeadI
);
1054 if (KillingMemI
&& DeadMemI
) {
1055 const Value
*KillingV
= KillingMemI
->getLength();
1056 const Value
*DeadV
= DeadMemI
->getLength();
1057 if (KillingV
== DeadV
&& BatchAA
.isMustAlias(DeadLoc
, KillingLoc
))
1061 // Masked stores have imprecise locations, but we can reason about them
1063 return isMaskedStoreOverwrite(KillingI
, DeadI
, BatchAA
);
1066 const TypeSize KillingSize
= KillingLocSize
.getValue();
1067 const TypeSize DeadSize
= DeadLoc
.Size
.getValue();
1068 // Bail on doing Size comparison which depends on AA for now
1069 // TODO: Remove AnyScalable once Alias Analysis deal with scalable vectors
1070 const bool AnyScalable
=
1071 DeadSize
.isScalable() || KillingLocSize
.isScalable();
1075 // Query the alias information
1076 AliasResult AAR
= BatchAA
.alias(KillingLoc
, DeadLoc
);
1078 // If the start pointers are the same, we just have to compare sizes to see if
1079 // the killing store was larger than the dead store.
1080 if (AAR
== AliasResult::MustAlias
) {
1081 // Make sure that the KillingSize size is >= the DeadSize size.
1082 if (KillingSize
>= DeadSize
)
1086 // If we hit a partial alias we may have a full overwrite
1087 if (AAR
== AliasResult::PartialAlias
&& AAR
.hasOffset()) {
1088 int32_t Off
= AAR
.getOffset();
1089 if (Off
>= 0 && (uint64_t)Off
+ DeadSize
<= KillingSize
)
1093 // If we can't resolve the same pointers to the same object, then we can't
1094 // analyze them at all.
1095 if (DeadUndObj
!= KillingUndObj
) {
1096 // Non aliasing stores to different objects don't overlap. Note that
1097 // if the killing store is known to overwrite whole object (out of
1098 // bounds access overwrites whole object as well) then it is assumed to
1099 // completely overwrite any store to the same object even if they don't
1100 // actually alias (see next check).
1101 if (AAR
== AliasResult::NoAlias
)
1106 // Okay, we have stores to two completely different pointers. Try to
1107 // decompose the pointer into a "base + constant_offset" form. If the base
1108 // pointers are equal, then we can reason about the two stores.
1111 const Value
*DeadBasePtr
=
1112 GetPointerBaseWithConstantOffset(DeadPtr
, DeadOff
, DL
);
1113 const Value
*KillingBasePtr
=
1114 GetPointerBaseWithConstantOffset(KillingPtr
, KillingOff
, DL
);
1116 // If the base pointers still differ, we have two completely different
1118 if (DeadBasePtr
!= KillingBasePtr
)
1121 // The killing access completely overlaps the dead store if and only if
1122 // both start and end of the dead one is "inside" the killing one:
1123 // |<->|--dead--|<->|
1124 // |-----killing------|
1125 // Accesses may overlap if and only if start of one of them is "inside"
1127 // |<->|--dead--|<-------->|
1128 // |-------killing--------|
1130 // |-------dead-------|
1131 // |<->|---killing---|<----->|
1133 // We have to be careful here as *Off is signed while *.Size is unsigned.
1135 // Check if the dead access starts "not before" the killing one.
1136 if (DeadOff
>= KillingOff
) {
1137 // If the dead access ends "not after" the killing access then the
1138 // dead one is completely overwritten by the killing one.
1139 if (uint64_t(DeadOff
- KillingOff
) + DeadSize
<= KillingSize
)
1141 // If start of the dead access is "before" end of the killing access
1142 // then accesses overlap.
1143 else if ((uint64_t)(DeadOff
- KillingOff
) < KillingSize
)
1144 return OW_MaybePartial
;
1146 // If start of the killing access is "before" end of the dead access then
1147 // accesses overlap.
1148 else if ((uint64_t)(KillingOff
- DeadOff
) < DeadSize
) {
1149 return OW_MaybePartial
;
1152 // Can reach here only if accesses are known not to overlap.
1156 bool isInvisibleToCallerAfterRet(const Value
*V
) {
1157 if (isa
<AllocaInst
>(V
))
1159 auto I
= InvisibleToCallerAfterRet
.insert({V
, false});
1161 if (!isInvisibleToCallerOnUnwind(V
)) {
1162 I
.first
->second
= false;
1163 } else if (isNoAliasCall(V
)) {
1164 I
.first
->second
= !PointerMayBeCaptured(V
, true, false);
1167 return I
.first
->second
;
1170 bool isInvisibleToCallerOnUnwind(const Value
*V
) {
1171 bool RequiresNoCaptureBeforeUnwind
;
1172 if (!isNotVisibleOnUnwind(V
, RequiresNoCaptureBeforeUnwind
))
1174 if (!RequiresNoCaptureBeforeUnwind
)
1177 auto I
= CapturedBeforeReturn
.insert({V
, true});
1179 // NOTE: This could be made more precise by PointerMayBeCapturedBefore
1180 // with the killing MemoryDef. But we refrain from doing so for now to
1181 // limit compile-time and this does not cause any changes to the number
1182 // of stores removed on a large test set in practice.
1183 I
.first
->second
= PointerMayBeCaptured(V
, false, true);
1184 return !I
.first
->second
;
1187 std::optional
<MemoryLocation
> getLocForWrite(Instruction
*I
) const {
1188 if (!I
->mayWriteToMemory())
1189 return std::nullopt
;
1191 if (auto *CB
= dyn_cast
<CallBase
>(I
))
1192 return MemoryLocation::getForDest(CB
, TLI
);
1194 return MemoryLocation::getOrNone(I
);
1197 // Returns a list of <MemoryLocation, bool> pairs written by I.
1198 // The bool means whether the write is from Initializes attr.
1199 SmallVector
<std::pair
<MemoryLocation
, bool>, 1>
1200 getLocForInst(Instruction
*I
, bool ConsiderInitializesAttr
) {
1201 SmallVector
<std::pair
<MemoryLocation
, bool>, 1> Locations
;
1202 if (isMemTerminatorInst(I
)) {
1203 if (auto Loc
= getLocForTerminator(I
))
1204 Locations
.push_back(std::make_pair(Loc
->first
, false));
1208 if (auto Loc
= getLocForWrite(I
))
1209 Locations
.push_back(std::make_pair(*Loc
, false));
1211 if (ConsiderInitializesAttr
) {
1212 for (auto &MemLoc
: getInitializesArgMemLoc(I
)) {
1213 Locations
.push_back(std::make_pair(MemLoc
, true));
1219 /// Assuming this instruction has a dead analyzable write, can we delete
1220 /// this instruction?
1221 bool isRemovable(Instruction
*I
) {
1222 assert(getLocForWrite(I
) && "Must have analyzable write");
1224 // Don't remove volatile/atomic stores.
1225 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
1226 return SI
->isUnordered();
1228 if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
1229 // Don't remove volatile memory intrinsics.
1230 if (auto *MI
= dyn_cast
<MemIntrinsic
>(CB
))
1231 return !MI
->isVolatile();
1233 // Never remove dead lifetime intrinsics, e.g. because they are followed
1235 if (CB
->isLifetimeStartOrEnd())
1238 return CB
->use_empty() && CB
->willReturn() && CB
->doesNotThrow() &&
1239 !CB
->isTerminator();
1245 /// Returns true if \p UseInst completely overwrites \p DefLoc
1246 /// (stored by \p DefInst).
1247 bool isCompleteOverwrite(const MemoryLocation
&DefLoc
, Instruction
*DefInst
,
1248 Instruction
*UseInst
) {
1249 // UseInst has a MemoryDef associated in MemorySSA. It's possible for a
1250 // MemoryDef to not write to memory, e.g. a volatile load is modeled as a
1252 if (!UseInst
->mayWriteToMemory())
1255 if (auto *CB
= dyn_cast
<CallBase
>(UseInst
))
1256 if (CB
->onlyAccessesInaccessibleMemory())
1259 int64_t InstWriteOffset
, DepWriteOffset
;
1260 if (auto CC
= getLocForWrite(UseInst
))
1261 return isOverwrite(UseInst
, DefInst
, *CC
, DefLoc
, InstWriteOffset
,
1262 DepWriteOffset
) == OW_Complete
;
1266 /// Returns true if \p Def is not read before returning from the function.
1267 bool isWriteAtEndOfFunction(MemoryDef
*Def
, const MemoryLocation
&DefLoc
) {
1268 LLVM_DEBUG(dbgs() << " Check if def " << *Def
<< " ("
1269 << *Def
->getMemoryInst()
1270 << ") is at the end the function \n");
1271 SmallVector
<MemoryAccess
*, 4> WorkList
;
1272 SmallPtrSet
<MemoryAccess
*, 8> Visited
;
1274 pushMemUses(Def
, WorkList
, Visited
);
1275 for (unsigned I
= 0; I
< WorkList
.size(); I
++) {
1276 if (WorkList
.size() >= MemorySSAScanLimit
) {
1277 LLVM_DEBUG(dbgs() << " ... hit exploration limit.\n");
1281 MemoryAccess
*UseAccess
= WorkList
[I
];
1282 if (isa
<MemoryPhi
>(UseAccess
)) {
1283 // AliasAnalysis does not account for loops. Limit elimination to
1284 // candidates for which we can guarantee they always store to the same
1286 if (!isGuaranteedLoopInvariant(DefLoc
.Ptr
))
1289 pushMemUses(cast
<MemoryPhi
>(UseAccess
), WorkList
, Visited
);
1292 // TODO: Checking for aliasing is expensive. Consider reducing the amount
1293 // of times this is called and/or caching it.
1294 Instruction
*UseInst
= cast
<MemoryUseOrDef
>(UseAccess
)->getMemoryInst();
1295 if (isReadClobber(DefLoc
, UseInst
)) {
1296 LLVM_DEBUG(dbgs() << " ... hit read clobber " << *UseInst
<< ".\n");
1300 if (MemoryDef
*UseDef
= dyn_cast
<MemoryDef
>(UseAccess
))
1301 pushMemUses(UseDef
, WorkList
, Visited
);
1306 /// If \p I is a memory terminator like llvm.lifetime.end or free, return a
1307 /// pair with the MemoryLocation terminated by \p I and a boolean flag
1308 /// indicating whether \p I is a free-like call.
1309 std::optional
<std::pair
<MemoryLocation
, bool>>
1310 getLocForTerminator(Instruction
*I
) const {
1313 if (match(I
, m_Intrinsic
<Intrinsic::lifetime_end
>(m_ConstantInt(Len
),
1315 return {std::make_pair(MemoryLocation(Ptr
, Len
), false)};
1317 if (auto *CB
= dyn_cast
<CallBase
>(I
)) {
1318 if (Value
*FreedOp
= getFreedOperand(CB
, &TLI
))
1319 return {std::make_pair(MemoryLocation::getAfter(FreedOp
), true)};
1322 return std::nullopt
;
1325 /// Returns true if \p I is a memory terminator instruction like
1326 /// llvm.lifetime.end or free.
1327 bool isMemTerminatorInst(Instruction
*I
) const {
1328 auto *CB
= dyn_cast
<CallBase
>(I
);
1329 return CB
&& (CB
->getIntrinsicID() == Intrinsic::lifetime_end
||
1330 getFreedOperand(CB
, &TLI
) != nullptr);
1333 /// Returns true if \p MaybeTerm is a memory terminator for \p Loc from
1334 /// instruction \p AccessI.
1335 bool isMemTerminator(const MemoryLocation
&Loc
, Instruction
*AccessI
,
1336 Instruction
*MaybeTerm
) {
1337 std::optional
<std::pair
<MemoryLocation
, bool>> MaybeTermLoc
=
1338 getLocForTerminator(MaybeTerm
);
1343 // If the terminator is a free-like call, all accesses to the underlying
1344 // object can be considered terminated.
1345 if (getUnderlyingObject(Loc
.Ptr
) !=
1346 getUnderlyingObject(MaybeTermLoc
->first
.Ptr
))
1349 auto TermLoc
= MaybeTermLoc
->first
;
1350 if (MaybeTermLoc
->second
) {
1351 const Value
*LocUO
= getUnderlyingObject(Loc
.Ptr
);
1352 return BatchAA
.isMustAlias(TermLoc
.Ptr
, LocUO
);
1354 int64_t InstWriteOffset
= 0;
1355 int64_t DepWriteOffset
= 0;
1356 return isOverwrite(MaybeTerm
, AccessI
, TermLoc
, Loc
, InstWriteOffset
,
1357 DepWriteOffset
) == OW_Complete
;
1360 // Returns true if \p Use may read from \p DefLoc.
1361 bool isReadClobber(const MemoryLocation
&DefLoc
, Instruction
*UseInst
) {
1362 if (isNoopIntrinsic(UseInst
))
1365 // Monotonic or weaker atomic stores can be re-ordered and do not need to be
1366 // treated as read clobber.
1367 if (auto SI
= dyn_cast
<StoreInst
>(UseInst
))
1368 return isStrongerThan(SI
->getOrdering(), AtomicOrdering::Monotonic
);
1370 if (!UseInst
->mayReadFromMemory())
1373 if (auto *CB
= dyn_cast
<CallBase
>(UseInst
))
1374 if (CB
->onlyAccessesInaccessibleMemory())
1377 return isRefSet(BatchAA
.getModRefInfo(UseInst
, DefLoc
));
1380 /// Returns true if a dependency between \p Current and \p KillingDef is
1381 /// guaranteed to be loop invariant for the loops that they are in. Either
1382 /// because they are known to be in the same block, in the same loop level or
1383 /// by guaranteeing that \p CurrentLoc only references a single MemoryLocation
1384 /// during execution of the containing function.
1385 bool isGuaranteedLoopIndependent(const Instruction
*Current
,
1386 const Instruction
*KillingDef
,
1387 const MemoryLocation
&CurrentLoc
) {
1388 // If the dependency is within the same block or loop level (being careful
1389 // of irreducible loops), we know that AA will return a valid result for the
1390 // memory dependency. (Both at the function level, outside of any loop,
1391 // would also be valid but we currently disable that to limit compile time).
1392 if (Current
->getParent() == KillingDef
->getParent())
1394 const Loop
*CurrentLI
= LI
.getLoopFor(Current
->getParent());
1395 if (!ContainsIrreducibleLoops
&& CurrentLI
&&
1396 CurrentLI
== LI
.getLoopFor(KillingDef
->getParent()))
1398 // Otherwise check the memory location is invariant to any loops.
1399 return isGuaranteedLoopInvariant(CurrentLoc
.Ptr
);
1402 /// Returns true if \p Ptr is guaranteed to be loop invariant for any possible
1403 /// loop. In particular, this guarantees that it only references a single
1404 /// MemoryLocation during execution of the containing function.
1405 bool isGuaranteedLoopInvariant(const Value
*Ptr
) {
1406 Ptr
= Ptr
->stripPointerCasts();
1407 if (auto *GEP
= dyn_cast
<GEPOperator
>(Ptr
))
1408 if (GEP
->hasAllConstantIndices())
1409 Ptr
= GEP
->getPointerOperand()->stripPointerCasts();
1411 if (auto *I
= dyn_cast
<Instruction
>(Ptr
)) {
1412 return I
->getParent()->isEntryBlock() ||
1413 (!ContainsIrreducibleLoops
&& !LI
.getLoopFor(I
->getParent()));
1418 // Find a MemoryDef writing to \p KillingLoc and dominating \p StartAccess,
1419 // with no read access between them or on any other path to a function exit
1420 // block if \p KillingLoc is not accessible after the function returns. If
1421 // there is no such MemoryDef, return std::nullopt. The returned value may not
1422 // (completely) overwrite \p KillingLoc. Currently we bail out when we
1423 // encounter an aliasing MemoryUse (read).
1424 std::optional
<MemoryAccess
*>
1425 getDomMemoryDef(MemoryDef
*KillingDef
, MemoryAccess
*StartAccess
,
1426 const MemoryLocation
&KillingLoc
, const Value
*KillingUndObj
,
1427 unsigned &ScanLimit
, unsigned &WalkerStepLimit
,
1428 bool IsMemTerm
, unsigned &PartialLimit
,
1429 bool IsInitializesAttrMemLoc
) {
1430 if (ScanLimit
== 0 || WalkerStepLimit
== 0) {
1431 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1432 return std::nullopt
;
1435 MemoryAccess
*Current
= StartAccess
;
1436 Instruction
*KillingI
= KillingDef
->getMemoryInst();
1437 LLVM_DEBUG(dbgs() << " trying to get dominating access\n");
1439 // Only optimize defining access of KillingDef when directly starting at its
1440 // defining access. The defining access also must only access KillingLoc. At
1441 // the moment we only support instructions with a single write location, so
1442 // it should be sufficient to disable optimizations for instructions that
1443 // also read from memory.
1444 bool CanOptimize
= OptimizeMemorySSA
&&
1445 KillingDef
->getDefiningAccess() == StartAccess
&&
1446 !KillingI
->mayReadFromMemory();
1448 // Find the next clobbering Mod access for DefLoc, starting at StartAccess.
1449 std::optional
<MemoryLocation
> CurrentLoc
;
1450 for (;; Current
= cast
<MemoryDef
>(Current
)->getDefiningAccess()) {
1452 dbgs() << " visiting " << *Current
;
1453 if (!MSSA
.isLiveOnEntryDef(Current
) && isa
<MemoryUseOrDef
>(Current
))
1454 dbgs() << " (" << *cast
<MemoryUseOrDef
>(Current
)->getMemoryInst()
1460 if (MSSA
.isLiveOnEntryDef(Current
)) {
1461 LLVM_DEBUG(dbgs() << " ... found LiveOnEntryDef\n");
1462 if (CanOptimize
&& Current
!= KillingDef
->getDefiningAccess())
1463 // The first clobbering def is... none.
1464 KillingDef
->setOptimized(Current
);
1465 return std::nullopt
;
1468 // Cost of a step. Accesses in the same block are more likely to be valid
1469 // candidates for elimination, hence consider them cheaper.
1470 unsigned StepCost
= KillingDef
->getBlock() == Current
->getBlock()
1471 ? MemorySSASameBBStepCost
1472 : MemorySSAOtherBBStepCost
;
1473 if (WalkerStepLimit
<= StepCost
) {
1474 LLVM_DEBUG(dbgs() << " ... hit walker step limit\n");
1475 return std::nullopt
;
1477 WalkerStepLimit
-= StepCost
;
1479 // Return for MemoryPhis. They cannot be eliminated directly and the
1480 // caller is responsible for traversing them.
1481 if (isa
<MemoryPhi
>(Current
)) {
1482 LLVM_DEBUG(dbgs() << " ... found MemoryPhi\n");
1486 // Below, check if CurrentDef is a valid candidate to be eliminated by
1487 // KillingDef. If it is not, check the next candidate.
1488 MemoryDef
*CurrentDef
= cast
<MemoryDef
>(Current
);
1489 Instruction
*CurrentI
= CurrentDef
->getMemoryInst();
1491 if (canSkipDef(CurrentDef
, !isInvisibleToCallerOnUnwind(KillingUndObj
))) {
1492 CanOptimize
= false;
1496 // Before we try to remove anything, check for any extra throwing
1497 // instructions that block us from DSEing
1498 if (mayThrowBetween(KillingI
, CurrentI
, KillingUndObj
)) {
1499 LLVM_DEBUG(dbgs() << " ... skip, may throw!\n");
1500 return std::nullopt
;
1503 // Check for anything that looks like it will be a barrier to further
1505 if (isDSEBarrier(KillingUndObj
, CurrentI
)) {
1506 LLVM_DEBUG(dbgs() << " ... skip, barrier\n");
1507 return std::nullopt
;
1510 // If Current is known to be on path that reads DefLoc or is a read
1511 // clobber, bail out, as the path is not profitable. We skip this check
1512 // for intrinsic calls, because the code knows how to handle memcpy
1514 if (!isa
<IntrinsicInst
>(CurrentI
) && isReadClobber(KillingLoc
, CurrentI
))
1515 return std::nullopt
;
1517 // Quick check if there are direct uses that are read-clobbers.
1518 if (any_of(Current
->uses(), [this, &KillingLoc
, StartAccess
](Use
&U
) {
1519 if (auto *UseOrDef
= dyn_cast
<MemoryUseOrDef
>(U
.getUser()))
1520 return !MSSA
.dominates(StartAccess
, UseOrDef
) &&
1521 isReadClobber(KillingLoc
, UseOrDef
->getMemoryInst());
1524 LLVM_DEBUG(dbgs() << " ... found a read clobber\n");
1525 return std::nullopt
;
1528 // If Current does not have an analyzable write location or is not
1529 // removable, skip it.
1530 CurrentLoc
= getLocForWrite(CurrentI
);
1531 if (!CurrentLoc
|| !isRemovable(CurrentI
)) {
1532 CanOptimize
= false;
1536 // AliasAnalysis does not account for loops. Limit elimination to
1537 // candidates for which we can guarantee they always store to the same
1538 // memory location and not located in different loops.
1539 if (!isGuaranteedLoopIndependent(CurrentI
, KillingI
, *CurrentLoc
)) {
1540 LLVM_DEBUG(dbgs() << " ... not guaranteed loop independent\n");
1541 CanOptimize
= false;
1546 // If the killing def is a memory terminator (e.g. lifetime.end), check
1547 // the next candidate if the current Current does not write the same
1548 // underlying object as the terminator.
1549 if (!isMemTerminator(*CurrentLoc
, CurrentI
, KillingI
)) {
1550 CanOptimize
= false;
1554 int64_t KillingOffset
= 0;
1555 int64_t DeadOffset
= 0;
1556 auto OR
= isOverwrite(KillingI
, CurrentI
, KillingLoc
, *CurrentLoc
,
1557 KillingOffset
, DeadOffset
);
1559 // CurrentDef is the earliest write clobber of KillingDef. Use it as
1560 // optimized access. Do not optimize if CurrentDef is already the
1561 // defining access of KillingDef.
1562 if (CurrentDef
!= KillingDef
->getDefiningAccess() &&
1563 (OR
== OW_Complete
|| OR
== OW_MaybePartial
))
1564 KillingDef
->setOptimized(CurrentDef
);
1566 // Once a may-aliasing def is encountered do not set an optimized
1569 CanOptimize
= false;
1572 // If Current does not write to the same object as KillingDef, check
1573 // the next candidate.
1574 if (OR
== OW_Unknown
|| OR
== OW_None
)
1576 else if (OR
== OW_MaybePartial
) {
1577 // If KillingDef only partially overwrites Current, check the next
1578 // candidate if the partial step limit is exceeded. This aggressively
1579 // limits the number of candidates for partial store elimination,
1580 // which are less likely to be removable in the end.
1581 if (PartialLimit
<= 1) {
1582 WalkerStepLimit
-= 1;
1583 LLVM_DEBUG(dbgs() << " ... reached partial limit ... continue with next access\n");
1592 // Accesses to objects accessible after the function returns can only be
1593 // eliminated if the access is dead along all paths to the exit. Collect
1594 // the blocks with killing (=completely overwriting MemoryDefs) and check if
1595 // they cover all paths from MaybeDeadAccess to any function exit.
1596 SmallPtrSet
<Instruction
*, 16> KillingDefs
;
1597 KillingDefs
.insert(KillingDef
->getMemoryInst());
1598 MemoryAccess
*MaybeDeadAccess
= Current
;
1599 MemoryLocation MaybeDeadLoc
= *CurrentLoc
;
1600 Instruction
*MaybeDeadI
= cast
<MemoryDef
>(MaybeDeadAccess
)->getMemoryInst();
1601 LLVM_DEBUG(dbgs() << " Checking for reads of " << *MaybeDeadAccess
<< " ("
1602 << *MaybeDeadI
<< ")\n");
1604 SmallVector
<MemoryAccess
*, 32> WorkList
;
1605 SmallPtrSet
<MemoryAccess
*, 32> Visited
;
1606 pushMemUses(MaybeDeadAccess
, WorkList
, Visited
);
1608 // Check if DeadDef may be read.
1609 for (unsigned I
= 0; I
< WorkList
.size(); I
++) {
1610 MemoryAccess
*UseAccess
= WorkList
[I
];
1612 LLVM_DEBUG(dbgs() << " " << *UseAccess
);
1613 // Bail out if the number of accesses to check exceeds the scan limit.
1614 if (ScanLimit
< (WorkList
.size() - I
)) {
1615 LLVM_DEBUG(dbgs() << "\n ... hit scan limit\n");
1616 return std::nullopt
;
1619 NumDomMemDefChecks
++;
1621 if (isa
<MemoryPhi
>(UseAccess
)) {
1622 if (any_of(KillingDefs
, [this, UseAccess
](Instruction
*KI
) {
1623 return DT
.properlyDominates(KI
->getParent(),
1624 UseAccess
->getBlock());
1626 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing block\n");
1629 LLVM_DEBUG(dbgs() << "\n ... adding PHI uses\n");
1630 pushMemUses(UseAccess
, WorkList
, Visited
);
1634 Instruction
*UseInst
= cast
<MemoryUseOrDef
>(UseAccess
)->getMemoryInst();
1635 LLVM_DEBUG(dbgs() << " (" << *UseInst
<< ")\n");
1637 if (any_of(KillingDefs
, [this, UseInst
](Instruction
*KI
) {
1638 return DT
.dominates(KI
, UseInst
);
1640 LLVM_DEBUG(dbgs() << " ... skipping, dominated by killing def\n");
1644 // A memory terminator kills all preceeding MemoryDefs and all succeeding
1645 // MemoryAccesses. We do not have to check it's users.
1646 if (isMemTerminator(MaybeDeadLoc
, MaybeDeadI
, UseInst
)) {
1649 << " ... skipping, memterminator invalidates following accesses\n");
1653 if (isNoopIntrinsic(cast
<MemoryUseOrDef
>(UseAccess
)->getMemoryInst())) {
1654 LLVM_DEBUG(dbgs() << " ... adding uses of intrinsic\n");
1655 pushMemUses(UseAccess
, WorkList
, Visited
);
1659 if (UseInst
->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj
)) {
1660 LLVM_DEBUG(dbgs() << " ... found throwing instruction\n");
1661 return std::nullopt
;
1664 // Uses which may read the original MemoryDef mean we cannot eliminate the
1665 // original MD. Stop walk.
1666 // If KillingDef is a CallInst with "initializes" attribute, the reads in
1667 // the callee would be dominated by initializations, so it should be safe.
1668 bool IsKillingDefFromInitAttr
= false;
1669 if (IsInitializesAttrMemLoc
) {
1670 if (KillingI
== UseInst
&&
1671 KillingUndObj
== getUnderlyingObject(MaybeDeadLoc
.Ptr
))
1672 IsKillingDefFromInitAttr
= true;
1675 if (isReadClobber(MaybeDeadLoc
, UseInst
) && !IsKillingDefFromInitAttr
) {
1676 LLVM_DEBUG(dbgs() << " ... found read clobber\n");
1677 return std::nullopt
;
1680 // If this worklist walks back to the original memory access (and the
1681 // pointer is not guarenteed loop invariant) then we cannot assume that a
1682 // store kills itself.
1683 if (MaybeDeadAccess
== UseAccess
&&
1684 !isGuaranteedLoopInvariant(MaybeDeadLoc
.Ptr
)) {
1685 LLVM_DEBUG(dbgs() << " ... found not loop invariant self access\n");
1686 return std::nullopt
;
1688 // Otherwise, for the KillingDef and MaybeDeadAccess we only have to check
1689 // if it reads the memory location.
1690 // TODO: It would probably be better to check for self-reads before
1691 // calling the function.
1692 if (KillingDef
== UseAccess
|| MaybeDeadAccess
== UseAccess
) {
1693 LLVM_DEBUG(dbgs() << " ... skipping killing def/dom access\n");
1697 // Check all uses for MemoryDefs, except for defs completely overwriting
1698 // the original location. Otherwise we have to check uses of *all*
1699 // MemoryDefs we discover, including non-aliasing ones. Otherwise we might
1700 // miss cases like the following
1701 // 1 = Def(LoE) ; <----- DeadDef stores [0,1]
1702 // 2 = Def(1) ; (2, 1) = NoAlias, stores [2,3]
1703 // Use(2) ; MayAlias 2 *and* 1, loads [0, 3].
1704 // (The Use points to the *first* Def it may alias)
1705 // 3 = Def(1) ; <---- Current (3, 2) = NoAlias, (3,1) = MayAlias,
1707 if (MemoryDef
*UseDef
= dyn_cast
<MemoryDef
>(UseAccess
)) {
1708 if (isCompleteOverwrite(MaybeDeadLoc
, MaybeDeadI
, UseInst
)) {
1709 BasicBlock
*MaybeKillingBlock
= UseInst
->getParent();
1710 if (PostOrderNumbers
.find(MaybeKillingBlock
)->second
<
1711 PostOrderNumbers
.find(MaybeDeadAccess
->getBlock())->second
) {
1712 if (!isInvisibleToCallerAfterRet(KillingUndObj
)) {
1714 << " ... found killing def " << *UseInst
<< "\n");
1715 KillingDefs
.insert(UseInst
);
1719 << " ... found preceeding def " << *UseInst
<< "\n");
1720 return std::nullopt
;
1723 pushMemUses(UseDef
, WorkList
, Visited
);
1727 // For accesses to locations visible after the function returns, make sure
1728 // that the location is dead (=overwritten) along all paths from
1729 // MaybeDeadAccess to the exit.
1730 if (!isInvisibleToCallerAfterRet(KillingUndObj
)) {
1731 SmallPtrSet
<BasicBlock
*, 16> KillingBlocks
;
1732 for (Instruction
*KD
: KillingDefs
)
1733 KillingBlocks
.insert(KD
->getParent());
1734 assert(!KillingBlocks
.empty() &&
1735 "Expected at least a single killing block");
1737 // Find the common post-dominator of all killing blocks.
1738 BasicBlock
*CommonPred
= *KillingBlocks
.begin();
1739 for (BasicBlock
*BB
: llvm::drop_begin(KillingBlocks
)) {
1742 CommonPred
= PDT
.findNearestCommonDominator(CommonPred
, BB
);
1745 // If the common post-dominator does not post-dominate MaybeDeadAccess,
1746 // there is a path from MaybeDeadAccess to an exit not going through a
1748 if (!PDT
.dominates(CommonPred
, MaybeDeadAccess
->getBlock())) {
1749 if (!AnyUnreachableExit
)
1750 return std::nullopt
;
1752 // Fall back to CFG scan starting at all non-unreachable roots if not
1753 // all paths to the exit go through CommonPred.
1754 CommonPred
= nullptr;
1757 // If CommonPred itself is in the set of killing blocks, we're done.
1758 if (KillingBlocks
.count(CommonPred
))
1759 return {MaybeDeadAccess
};
1761 SetVector
<BasicBlock
*> WorkList
;
1762 // If CommonPred is null, there are multiple exits from the function.
1763 // They all have to be added to the worklist.
1765 WorkList
.insert(CommonPred
);
1767 for (BasicBlock
*R
: PDT
.roots()) {
1768 if (!isa
<UnreachableInst
>(R
->getTerminator()))
1773 // Check if all paths starting from an exit node go through one of the
1774 // killing blocks before reaching MaybeDeadAccess.
1775 for (unsigned I
= 0; I
< WorkList
.size(); I
++) {
1777 BasicBlock
*Current
= WorkList
[I
];
1778 if (KillingBlocks
.count(Current
))
1780 if (Current
== MaybeDeadAccess
->getBlock())
1781 return std::nullopt
;
1783 // MaybeDeadAccess is reachable from the entry, so we don't have to
1784 // explore unreachable blocks further.
1785 if (!DT
.isReachableFromEntry(Current
))
1788 for (BasicBlock
*Pred
: predecessors(Current
))
1789 WorkList
.insert(Pred
);
1791 if (WorkList
.size() >= MemorySSAPathCheckLimit
)
1792 return std::nullopt
;
1797 // No aliasing MemoryUses of MaybeDeadAccess found, MaybeDeadAccess is
1798 // potentially dead.
1799 return {MaybeDeadAccess
};
1802 /// Delete dead memory defs and recursively add their operands to ToRemove if
1803 /// they became dead.
1805 deleteDeadInstruction(Instruction
*SI
,
1806 SmallPtrSetImpl
<MemoryAccess
*> *Deleted
= nullptr) {
1807 MemorySSAUpdater
Updater(&MSSA
);
1808 SmallVector
<Instruction
*, 32> NowDeadInsts
;
1809 NowDeadInsts
.push_back(SI
);
1812 while (!NowDeadInsts
.empty()) {
1813 Instruction
*DeadInst
= NowDeadInsts
.pop_back_val();
1816 // Try to preserve debug information attached to the dead instruction.
1817 salvageDebugInfo(*DeadInst
);
1818 salvageKnowledge(DeadInst
);
1820 // Remove the Instruction from MSSA.
1821 MemoryAccess
*MA
= MSSA
.getMemoryAccess(DeadInst
);
1822 bool IsMemDef
= MA
&& isa
<MemoryDef
>(MA
);
1825 auto *MD
= cast
<MemoryDef
>(MA
);
1826 SkipStores
.insert(MD
);
1828 Deleted
->insert(MD
);
1829 if (auto *SI
= dyn_cast
<StoreInst
>(MD
->getMemoryInst())) {
1830 if (SI
->getValueOperand()->getType()->isPointerTy()) {
1831 const Value
*UO
= getUnderlyingObject(SI
->getValueOperand());
1832 if (CapturedBeforeReturn
.erase(UO
))
1833 ShouldIterateEndOfFunctionDSE
= true;
1834 InvisibleToCallerAfterRet
.erase(UO
);
1839 Updater
.removeMemoryAccess(MA
);
1842 auto I
= IOLs
.find(DeadInst
->getParent());
1843 if (I
!= IOLs
.end())
1844 I
->second
.erase(DeadInst
);
1845 // Remove its operands
1846 for (Use
&O
: DeadInst
->operands())
1847 if (Instruction
*OpI
= dyn_cast
<Instruction
>(O
)) {
1848 O
.set(PoisonValue::get(O
->getType()));
1849 if (isInstructionTriviallyDead(OpI
, &TLI
))
1850 NowDeadInsts
.push_back(OpI
);
1853 EA
.removeInstruction(DeadInst
);
1854 // Remove memory defs directly if they don't produce results, but only
1855 // queue other dead instructions for later removal. They may have been
1856 // used as memory locations that have been cached by BatchAA. Removing
1857 // them here may lead to newly created instructions to be allocated at the
1858 // same address, yielding stale cache entries.
1859 if (IsMemDef
&& DeadInst
->getType()->isVoidTy())
1860 DeadInst
->eraseFromParent();
1862 ToRemove
.push_back(DeadInst
);
1866 // Check for any extra throws between \p KillingI and \p DeadI that block
1867 // DSE. This only checks extra maythrows (those that aren't MemoryDef's).
1868 // MemoryDef that may throw are handled during the walk from one def to the
1870 bool mayThrowBetween(Instruction
*KillingI
, Instruction
*DeadI
,
1871 const Value
*KillingUndObj
) {
1872 // First see if we can ignore it by using the fact that KillingI is an
1873 // alloca/alloca like object that is not visible to the caller during
1874 // execution of the function.
1875 if (KillingUndObj
&& isInvisibleToCallerOnUnwind(KillingUndObj
))
1878 if (KillingI
->getParent() == DeadI
->getParent())
1879 return ThrowingBlocks
.count(KillingI
->getParent());
1880 return !ThrowingBlocks
.empty();
1883 // Check if \p DeadI acts as a DSE barrier for \p KillingI. The following
1884 // instructions act as barriers:
1885 // * A memory instruction that may throw and \p KillingI accesses a non-stack
1887 // * Atomic stores stronger that monotonic.
1888 bool isDSEBarrier(const Value
*KillingUndObj
, Instruction
*DeadI
) {
1889 // If DeadI may throw it acts as a barrier, unless we are to an
1890 // alloca/alloca like object that does not escape.
1891 if (DeadI
->mayThrow() && !isInvisibleToCallerOnUnwind(KillingUndObj
))
1894 // If DeadI is an atomic load/store stronger than monotonic, do not try to
1895 // eliminate/reorder it.
1896 if (DeadI
->isAtomic()) {
1897 if (auto *LI
= dyn_cast
<LoadInst
>(DeadI
))
1898 return isStrongerThanMonotonic(LI
->getOrdering());
1899 if (auto *SI
= dyn_cast
<StoreInst
>(DeadI
))
1900 return isStrongerThanMonotonic(SI
->getOrdering());
1901 if (auto *ARMW
= dyn_cast
<AtomicRMWInst
>(DeadI
))
1902 return isStrongerThanMonotonic(ARMW
->getOrdering());
1903 if (auto *CmpXchg
= dyn_cast
<AtomicCmpXchgInst
>(DeadI
))
1904 return isStrongerThanMonotonic(CmpXchg
->getSuccessOrdering()) ||
1905 isStrongerThanMonotonic(CmpXchg
->getFailureOrdering());
1906 llvm_unreachable("other instructions should be skipped in MemorySSA");
1911 /// Eliminate writes to objects that are not visible in the caller and are not
1912 /// accessed before returning from the function.
1913 bool eliminateDeadWritesAtEndOfFunction() {
1914 bool MadeChange
= false;
1917 << "Trying to eliminate MemoryDefs at the end of the function\n");
1919 ShouldIterateEndOfFunctionDSE
= false;
1920 for (MemoryDef
*Def
: llvm::reverse(MemDefs
)) {
1921 if (SkipStores
.contains(Def
))
1924 Instruction
*DefI
= Def
->getMemoryInst();
1925 auto DefLoc
= getLocForWrite(DefI
);
1926 if (!DefLoc
|| !isRemovable(DefI
)) {
1927 LLVM_DEBUG(dbgs() << " ... could not get location for write or "
1928 "instruction not removable.\n");
1932 // NOTE: Currently eliminating writes at the end of a function is
1933 // limited to MemoryDefs with a single underlying object, to save
1934 // compile-time. In practice it appears the case with multiple
1935 // underlying objects is very uncommon. If it turns out to be important,
1936 // we can use getUnderlyingObjects here instead.
1937 const Value
*UO
= getUnderlyingObject(DefLoc
->Ptr
);
1938 if (!isInvisibleToCallerAfterRet(UO
))
1941 if (isWriteAtEndOfFunction(Def
, *DefLoc
)) {
1942 // See through pointer-to-pointer bitcasts
1943 LLVM_DEBUG(dbgs() << " ... MemoryDef is not accessed until the end "
1944 "of the function\n");
1945 deleteDeadInstruction(DefI
);
1950 } while (ShouldIterateEndOfFunctionDSE
);
1954 /// If we have a zero initializing memset following a call to malloc,
1955 /// try folding it into a call to calloc.
1956 bool tryFoldIntoCalloc(MemoryDef
*Def
, const Value
*DefUO
) {
1957 Instruction
*DefI
= Def
->getMemoryInst();
1958 MemSetInst
*MemSet
= dyn_cast
<MemSetInst
>(DefI
);
1960 // TODO: Could handle zero store to small allocation as well.
1962 Constant
*StoredConstant
= dyn_cast
<Constant
>(MemSet
->getValue());
1963 if (!StoredConstant
|| !StoredConstant
->isNullValue())
1966 if (!isRemovable(DefI
))
1967 // The memset might be volatile..
1970 if (F
.hasFnAttribute(Attribute::SanitizeMemory
) ||
1971 F
.hasFnAttribute(Attribute::SanitizeAddress
) ||
1972 F
.hasFnAttribute(Attribute::SanitizeHWAddress
) ||
1973 F
.getName() == "calloc")
1975 auto *Malloc
= const_cast<CallInst
*>(dyn_cast
<CallInst
>(DefUO
));
1978 auto *InnerCallee
= Malloc
->getCalledFunction();
1982 if (!TLI
.getLibFunc(*InnerCallee
, Func
) || !TLI
.has(Func
) ||
1983 Func
!= LibFunc_malloc
)
1985 // Gracefully handle malloc with unexpected memory attributes.
1986 auto *MallocDef
= dyn_cast_or_null
<MemoryDef
>(MSSA
.getMemoryAccess(Malloc
));
1990 auto shouldCreateCalloc
= [](CallInst
*Malloc
, CallInst
*Memset
) {
1991 // Check for br(icmp ptr, null), truebb, falsebb) pattern at the end
1993 auto *MallocBB
= Malloc
->getParent(),
1994 *MemsetBB
= Memset
->getParent();
1995 if (MallocBB
== MemsetBB
)
1997 auto *Ptr
= Memset
->getArgOperand(0);
1998 auto *TI
= MallocBB
->getTerminator();
1999 BasicBlock
*TrueBB
, *FalseBB
;
2000 if (!match(TI
, m_Br(m_SpecificICmp(ICmpInst::ICMP_EQ
, m_Specific(Ptr
),
2004 if (MemsetBB
!= FalseBB
)
2009 if (Malloc
->getOperand(0) != MemSet
->getLength())
2011 if (!shouldCreateCalloc(Malloc
, MemSet
) ||
2012 !DT
.dominates(Malloc
, MemSet
) ||
2013 !memoryIsNotModifiedBetween(Malloc
, MemSet
, BatchAA
, DL
, &DT
))
2015 IRBuilder
<> IRB(Malloc
);
2016 Type
*SizeTTy
= Malloc
->getArgOperand(0)->getType();
2017 auto *Calloc
= emitCalloc(Malloc
->getType(), ConstantInt::get(SizeTTy
, 1),
2018 Malloc
->getArgOperand(0), IRB
, TLI
);
2022 MemorySSAUpdater
Updater(&MSSA
);
2024 Updater
.createMemoryAccessAfter(cast
<Instruction
>(Calloc
), nullptr,
2026 auto *NewAccessMD
= cast
<MemoryDef
>(NewAccess
);
2027 Updater
.insertDef(NewAccessMD
, /*RenameUses=*/true);
2028 Malloc
->replaceAllUsesWith(Calloc
);
2029 deleteDeadInstruction(Malloc
);
2033 // Check if there is a dominating condition, that implies that the value
2034 // being stored in a ptr is already present in the ptr.
2035 bool dominatingConditionImpliesValue(MemoryDef
*Def
) {
2036 auto *StoreI
= cast
<StoreInst
>(Def
->getMemoryInst());
2037 BasicBlock
*StoreBB
= StoreI
->getParent();
2038 Value
*StorePtr
= StoreI
->getPointerOperand();
2039 Value
*StoreVal
= StoreI
->getValueOperand();
2041 DomTreeNode
*IDom
= DT
.getNode(StoreBB
)->getIDom();
2045 auto *BI
= dyn_cast
<BranchInst
>(IDom
->getBlock()->getTerminator());
2046 if (!BI
|| !BI
->isConditional())
2049 // In case both blocks are the same, it is not possible to determine
2050 // if optimization is possible. (We would not want to optimize a store
2051 // in the FalseBB if condition is true and vice versa.)
2052 if (BI
->getSuccessor(0) == BI
->getSuccessor(1))
2056 ICmpInst::Predicate Pred
;
2057 if (!match(BI
->getCondition(),
2059 m_CombineAnd(m_Load(m_Specific(StorePtr
)),
2060 m_Instruction(ICmpL
)),
2061 m_Specific(StoreVal
))) ||
2062 !ICmpInst::isEquality(Pred
))
2065 // In case the else blocks also branches to the if block or the other way
2066 // around it is not possible to determine if the optimization is possible.
2067 if (Pred
== ICmpInst::ICMP_EQ
&&
2068 !DT
.dominates(BasicBlockEdge(BI
->getParent(), BI
->getSuccessor(0)),
2072 if (Pred
== ICmpInst::ICMP_NE
&&
2073 !DT
.dominates(BasicBlockEdge(BI
->getParent(), BI
->getSuccessor(1)),
2077 MemoryAccess
*LoadAcc
= MSSA
.getMemoryAccess(ICmpL
);
2078 MemoryAccess
*ClobAcc
=
2079 MSSA
.getSkipSelfWalker()->getClobberingMemoryAccess(Def
, BatchAA
);
2081 return MSSA
.dominates(ClobAcc
, LoadAcc
);
2084 /// \returns true if \p Def is a no-op store, either because it
2085 /// directly stores back a loaded value or stores zero to a calloced object.
2086 bool storeIsNoop(MemoryDef
*Def
, const Value
*DefUO
) {
2087 Instruction
*DefI
= Def
->getMemoryInst();
2088 StoreInst
*Store
= dyn_cast
<StoreInst
>(DefI
);
2089 MemSetInst
*MemSet
= dyn_cast
<MemSetInst
>(DefI
);
2090 Constant
*StoredConstant
= nullptr;
2092 StoredConstant
= dyn_cast
<Constant
>(Store
->getOperand(0));
2094 StoredConstant
= dyn_cast
<Constant
>(MemSet
->getValue());
2098 if (!isRemovable(DefI
))
2101 if (StoredConstant
) {
2103 getInitialValueOfAllocation(DefUO
, &TLI
, StoredConstant
->getType());
2104 // If the clobbering access is LiveOnEntry, no instructions between them
2105 // can modify the memory location.
2106 if (InitC
&& InitC
== StoredConstant
)
2107 return MSSA
.isLiveOnEntryDef(
2108 MSSA
.getSkipSelfWalker()->getClobberingMemoryAccess(Def
, BatchAA
));
2114 if (dominatingConditionImpliesValue(Def
))
2117 if (auto *LoadI
= dyn_cast
<LoadInst
>(Store
->getOperand(0))) {
2118 if (LoadI
->getPointerOperand() == Store
->getOperand(1)) {
2119 // Get the defining access for the load.
2120 auto *LoadAccess
= MSSA
.getMemoryAccess(LoadI
)->getDefiningAccess();
2121 // Fast path: the defining accesses are the same.
2122 if (LoadAccess
== Def
->getDefiningAccess())
2125 // Look through phi accesses. Recursively scan all phi accesses by
2126 // adding them to a worklist. Bail when we run into a memory def that
2127 // does not match LoadAccess.
2128 SetVector
<MemoryAccess
*> ToCheck
;
2129 MemoryAccess
*Current
=
2130 MSSA
.getWalker()->getClobberingMemoryAccess(Def
, BatchAA
);
2131 // We don't want to bail when we run into the store memory def. But,
2132 // the phi access may point to it. So, pretend like we've already
2134 ToCheck
.insert(Def
);
2135 ToCheck
.insert(Current
);
2136 // Start at current (1) to simulate already having checked Def.
2137 for (unsigned I
= 1; I
< ToCheck
.size(); ++I
) {
2138 Current
= ToCheck
[I
];
2139 if (auto PhiAccess
= dyn_cast
<MemoryPhi
>(Current
)) {
2140 // Check all the operands.
2141 for (auto &Use
: PhiAccess
->incoming_values())
2142 ToCheck
.insert(cast
<MemoryAccess
>(&Use
));
2146 // If we found a memory def, bail. This happens when we have an
2147 // unrelated write in between an otherwise noop store.
2148 assert(isa
<MemoryDef
>(Current
) &&
2149 "Only MemoryDefs should reach here.");
2150 // TODO: Skip no alias MemoryDefs that have no aliasing reads.
2151 // We are searching for the definition of the store's destination.
2152 // So, if that is the same definition as the load, then this is a
2153 // noop. Otherwise, fail.
2154 if (LoadAccess
!= Current
)
2164 bool removePartiallyOverlappedStores(InstOverlapIntervalsTy
&IOL
) {
2165 bool Changed
= false;
2166 for (auto OI
: IOL
) {
2167 Instruction
*DeadI
= OI
.first
;
2168 MemoryLocation Loc
= *getLocForWrite(DeadI
);
2169 assert(isRemovable(DeadI
) && "Expect only removable instruction");
2171 const Value
*Ptr
= Loc
.Ptr
->stripPointerCasts();
2172 int64_t DeadStart
= 0;
2173 uint64_t DeadSize
= Loc
.Size
.getValue();
2174 GetPointerBaseWithConstantOffset(Ptr
, DeadStart
, DL
);
2175 OverlapIntervalsTy
&IntervalMap
= OI
.second
;
2176 Changed
|= tryToShortenEnd(DeadI
, IntervalMap
, DeadStart
, DeadSize
);
2177 if (IntervalMap
.empty())
2179 Changed
|= tryToShortenBegin(DeadI
, IntervalMap
, DeadStart
, DeadSize
);
2184 /// Eliminates writes to locations where the value that is being written
2185 /// is already stored at the same location.
2186 bool eliminateRedundantStoresOfExistingValues() {
2187 bool MadeChange
= false;
2188 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs that write the "
2189 "already existing value\n");
2190 for (auto *Def
: MemDefs
) {
2191 if (SkipStores
.contains(Def
) || MSSA
.isLiveOnEntryDef(Def
))
2194 Instruction
*DefInst
= Def
->getMemoryInst();
2195 auto MaybeDefLoc
= getLocForWrite(DefInst
);
2196 if (!MaybeDefLoc
|| !isRemovable(DefInst
))
2199 MemoryDef
*UpperDef
;
2200 // To conserve compile-time, we avoid walking to the next clobbering def.
2201 // Instead, we just try to get the optimized access, if it exists. DSE
2202 // will try to optimize defs during the earlier traversal.
2203 if (Def
->isOptimized())
2204 UpperDef
= dyn_cast
<MemoryDef
>(Def
->getOptimized());
2206 UpperDef
= dyn_cast
<MemoryDef
>(Def
->getDefiningAccess());
2207 if (!UpperDef
|| MSSA
.isLiveOnEntryDef(UpperDef
))
2210 Instruction
*UpperInst
= UpperDef
->getMemoryInst();
2211 auto IsRedundantStore
= [&]() {
2212 if (DefInst
->isIdenticalTo(UpperInst
))
2214 if (auto *MemSetI
= dyn_cast
<MemSetInst
>(UpperInst
)) {
2215 if (auto *SI
= dyn_cast
<StoreInst
>(DefInst
)) {
2216 // MemSetInst must have a write location.
2217 auto UpperLoc
= getLocForWrite(UpperInst
);
2220 int64_t InstWriteOffset
= 0;
2221 int64_t DepWriteOffset
= 0;
2222 auto OR
= isOverwrite(UpperInst
, DefInst
, *UpperLoc
, *MaybeDefLoc
,
2223 InstWriteOffset
, DepWriteOffset
);
2224 Value
*StoredByte
= isBytewiseValue(SI
->getValueOperand(), DL
);
2225 return StoredByte
&& StoredByte
== MemSetI
->getOperand(1) &&
2232 if (!IsRedundantStore() || isReadClobber(*MaybeDefLoc
, DefInst
))
2234 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: " << *DefInst
2236 deleteDeadInstruction(DefInst
);
2237 NumRedundantStores
++;
2243 // Return the locations written by the initializes attribute.
2244 // Note that this function considers:
2245 // 1. Unwind edge: use "initializes" attribute only if the callee has
2246 // "nounwind" attribute, or the argument has "dead_on_unwind" attribute,
2247 // or the argument is invisible to caller on unwind. That is, we don't
2248 // perform incorrect DSE on unwind edges in the current function.
2249 // 2. Argument alias: for aliasing arguments, the "initializes" attribute is
2250 // the intersected range list of their "initializes" attributes.
2251 SmallVector
<MemoryLocation
, 1> getInitializesArgMemLoc(const Instruction
*I
);
2253 // Try to eliminate dead defs that access `KillingLocWrapper.MemLoc` and are
2254 // killed by `KillingLocWrapper.MemDef`. Return whether
2255 // any changes were made, and whether `KillingLocWrapper.DefInst` was deleted.
2256 std::pair
<bool, bool>
2257 eliminateDeadDefs(const MemoryLocationWrapper
&KillingLocWrapper
);
2259 // Try to eliminate dead defs killed by `KillingDefWrapper` and return the
2260 // change state: whether make any change.
2261 bool eliminateDeadDefs(const MemoryDefWrapper
&KillingDefWrapper
);
2264 SmallVector
<MemoryLocation
, 1>
2265 DSEState::getInitializesArgMemLoc(const Instruction
*I
) {
2266 const CallBase
*CB
= dyn_cast
<CallBase
>(I
);
2270 // Collect aliasing arguments and their initializes ranges.
2271 SmallMapVector
<Value
*, SmallVector
<ArgumentInitInfo
, 2>, 2> Arguments
;
2272 for (unsigned Idx
= 0, Count
= CB
->arg_size(); Idx
< Count
; ++Idx
) {
2273 ConstantRangeList Inits
;
2274 Attribute InitializesAttr
= CB
->getParamAttr(Idx
, Attribute::Initializes
);
2275 if (InitializesAttr
.isValid())
2276 Inits
= InitializesAttr
.getValueAsConstantRangeList();
2278 Value
*CurArg
= CB
->getArgOperand(Idx
);
2279 // We don't perform incorrect DSE on unwind edges in the current function,
2280 // and use the "initializes" attribute to kill dead stores if:
2281 // - The call does not throw exceptions, "CB->doesNotThrow()".
2282 // - Or the callee parameter has "dead_on_unwind" attribute.
2283 // - Or the argument is invisible to caller on unwind, and there are no
2284 // unwind edges from this call in the current function (e.g. `CallInst`).
2285 bool IsDeadOrInvisibleOnUnwind
=
2286 CB
->paramHasAttr(Idx
, Attribute::DeadOnUnwind
) ||
2287 (isa
<CallInst
>(CB
) && isInvisibleToCallerOnUnwind(CurArg
));
2288 ArgumentInitInfo InitInfo
{Idx
, IsDeadOrInvisibleOnUnwind
, Inits
};
2289 bool FoundAliasing
= false;
2290 for (auto &[Arg
, AliasList
] : Arguments
) {
2291 auto AAR
= BatchAA
.alias(MemoryLocation::getBeforeOrAfter(Arg
),
2292 MemoryLocation::getBeforeOrAfter(CurArg
));
2293 if (AAR
== AliasResult::NoAlias
) {
2295 } else if (AAR
== AliasResult::MustAlias
) {
2296 FoundAliasing
= true;
2297 AliasList
.push_back(InitInfo
);
2299 // For PartialAlias and MayAlias, there is an offset or may be an
2300 // unknown offset between the arguments and we insert an empty init
2301 // range to discard the entire initializes info while intersecting.
2302 FoundAliasing
= true;
2303 AliasList
.push_back(ArgumentInitInfo
{Idx
, IsDeadOrInvisibleOnUnwind
,
2304 ConstantRangeList()});
2308 Arguments
[CurArg
] = {InitInfo
};
2311 SmallVector
<MemoryLocation
, 1> Locations
;
2312 for (const auto &[_
, Args
] : Arguments
) {
2313 auto IntersectedRanges
=
2314 getIntersectedInitRangeList(Args
, CB
->doesNotThrow());
2315 if (IntersectedRanges
.empty())
2318 for (const auto &Arg
: Args
) {
2319 for (const auto &Range
: IntersectedRanges
) {
2320 int64_t Start
= Range
.getLower().getSExtValue();
2321 int64_t End
= Range
.getUpper().getSExtValue();
2322 // For now, we only handle locations starting at offset 0.
2324 Locations
.push_back(MemoryLocation(CB
->getArgOperand(Arg
.Idx
),
2325 LocationSize::precise(End
- Start
),
2326 CB
->getAAMetadata()));
2333 std::pair
<bool, bool>
2334 DSEState::eliminateDeadDefs(const MemoryLocationWrapper
&KillingLocWrapper
) {
2335 bool Changed
= false;
2336 bool DeletedKillingLoc
= false;
2337 unsigned ScanLimit
= MemorySSAScanLimit
;
2338 unsigned WalkerStepLimit
= MemorySSAUpwardsStepLimit
;
2339 unsigned PartialLimit
= MemorySSAPartialStoreLimit
;
2340 // Worklist of MemoryAccesses that may be killed by
2341 // "KillingLocWrapper.MemDef".
2342 SmallSetVector
<MemoryAccess
*, 8> ToCheck
;
2343 // Track MemoryAccesses that have been deleted in the loop below, so we can
2344 // skip them. Don't use SkipStores for this, which may contain reused
2345 // MemoryAccess addresses.
2346 SmallPtrSet
<MemoryAccess
*, 8> Deleted
;
2347 [[maybe_unused
]] unsigned OrigNumSkipStores
= SkipStores
.size();
2348 ToCheck
.insert(KillingLocWrapper
.MemDef
->getDefiningAccess());
2350 // Check if MemoryAccesses in the worklist are killed by
2351 // "KillingLocWrapper.MemDef".
2352 for (unsigned I
= 0; I
< ToCheck
.size(); I
++) {
2353 MemoryAccess
*Current
= ToCheck
[I
];
2354 if (Deleted
.contains(Current
))
2356 std::optional
<MemoryAccess
*> MaybeDeadAccess
= getDomMemoryDef(
2357 KillingLocWrapper
.MemDef
, Current
, KillingLocWrapper
.MemLoc
,
2358 KillingLocWrapper
.UnderlyingObject
, ScanLimit
, WalkerStepLimit
,
2359 isMemTerminatorInst(KillingLocWrapper
.DefInst
), PartialLimit
,
2360 KillingLocWrapper
.DefByInitializesAttr
);
2362 if (!MaybeDeadAccess
) {
2363 LLVM_DEBUG(dbgs() << " finished walk\n");
2366 MemoryAccess
*DeadAccess
= *MaybeDeadAccess
;
2367 LLVM_DEBUG(dbgs() << " Checking if we can kill " << *DeadAccess
);
2368 if (isa
<MemoryPhi
>(DeadAccess
)) {
2369 LLVM_DEBUG(dbgs() << "\n ... adding incoming values to worklist\n");
2370 for (Value
*V
: cast
<MemoryPhi
>(DeadAccess
)->incoming_values()) {
2371 MemoryAccess
*IncomingAccess
= cast
<MemoryAccess
>(V
);
2372 BasicBlock
*IncomingBlock
= IncomingAccess
->getBlock();
2373 BasicBlock
*PhiBlock
= DeadAccess
->getBlock();
2375 // We only consider incoming MemoryAccesses that come before the
2376 // MemoryPhi. Otherwise we could discover candidates that do not
2377 // strictly dominate our starting def.
2378 if (PostOrderNumbers
[IncomingBlock
] > PostOrderNumbers
[PhiBlock
])
2379 ToCheck
.insert(IncomingAccess
);
2383 // We cannot apply the initializes attribute to DeadAccess/DeadDef.
2384 // It would incorrectly consider a call instruction as redundant store
2385 // and remove this call instruction.
2386 // TODO: this conflates the existence of a MemoryLocation with being able
2387 // to delete the instruction. Fix isRemovable() to consider calls with
2388 // side effects that cannot be removed, e.g. calls with the initializes
2389 // attribute, and remove getLocForInst(ConsiderInitializesAttr = false).
2390 MemoryDefWrapper
DeadDefWrapper(
2391 cast
<MemoryDef
>(DeadAccess
),
2392 getLocForInst(cast
<MemoryDef
>(DeadAccess
)->getMemoryInst(),
2393 /*ConsiderInitializesAttr=*/false));
2394 assert(DeadDefWrapper
.DefinedLocations
.size() == 1);
2395 MemoryLocationWrapper
&DeadLocWrapper
=
2396 DeadDefWrapper
.DefinedLocations
.front();
2397 LLVM_DEBUG(dbgs() << " (" << *DeadLocWrapper
.DefInst
<< ")\n");
2398 ToCheck
.insert(DeadLocWrapper
.MemDef
->getDefiningAccess());
2399 NumGetDomMemoryDefPassed
++;
2401 if (!DebugCounter::shouldExecute(MemorySSACounter
))
2403 if (isMemTerminatorInst(KillingLocWrapper
.DefInst
)) {
2404 if (KillingLocWrapper
.UnderlyingObject
!= DeadLocWrapper
.UnderlyingObject
)
2406 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
2407 << *DeadLocWrapper
.DefInst
<< "\n KILLER: "
2408 << *KillingLocWrapper
.DefInst
<< '\n');
2409 deleteDeadInstruction(DeadLocWrapper
.DefInst
, &Deleted
);
2413 // Check if DeadI overwrites KillingI.
2414 int64_t KillingOffset
= 0;
2415 int64_t DeadOffset
= 0;
2416 OverwriteResult OR
=
2417 isOverwrite(KillingLocWrapper
.DefInst
, DeadLocWrapper
.DefInst
,
2418 KillingLocWrapper
.MemLoc
, DeadLocWrapper
.MemLoc
,
2419 KillingOffset
, DeadOffset
);
2420 if (OR
== OW_MaybePartial
) {
2421 auto &IOL
= IOLs
[DeadLocWrapper
.DefInst
->getParent()];
2422 OR
= isPartialOverwrite(KillingLocWrapper
.MemLoc
, DeadLocWrapper
.MemLoc
,
2423 KillingOffset
, DeadOffset
,
2424 DeadLocWrapper
.DefInst
, IOL
);
2426 if (EnablePartialStoreMerging
&& OR
== OW_PartialEarlierWithFullLater
) {
2427 auto *DeadSI
= dyn_cast
<StoreInst
>(DeadLocWrapper
.DefInst
);
2428 auto *KillingSI
= dyn_cast
<StoreInst
>(KillingLocWrapper
.DefInst
);
2429 // We are re-using tryToMergePartialOverlappingStores, which requires
2430 // DeadSI to dominate KillingSI.
2431 // TODO: implement tryToMergeParialOverlappingStores using MemorySSA.
2432 if (DeadSI
&& KillingSI
&& DT
.dominates(DeadSI
, KillingSI
)) {
2433 if (Constant
*Merged
= tryToMergePartialOverlappingStores(
2434 KillingSI
, DeadSI
, KillingOffset
, DeadOffset
, DL
, BatchAA
,
2437 // Update stored value of earlier store to merged constant.
2438 DeadSI
->setOperand(0, Merged
);
2439 ++NumModifiedStores
;
2441 DeletedKillingLoc
= true;
2443 // Remove killing store and remove any outstanding overlap
2444 // intervals for the updated store.
2445 deleteDeadInstruction(KillingSI
, &Deleted
);
2446 auto I
= IOLs
.find(DeadSI
->getParent());
2447 if (I
!= IOLs
.end())
2448 I
->second
.erase(DeadSI
);
2453 if (OR
== OW_Complete
) {
2454 LLVM_DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
2455 << *DeadLocWrapper
.DefInst
<< "\n KILLER: "
2456 << *KillingLocWrapper
.DefInst
<< '\n');
2457 deleteDeadInstruction(DeadLocWrapper
.DefInst
, &Deleted
);
2464 assert(SkipStores
.size() - OrigNumSkipStores
== Deleted
.size() &&
2465 "SkipStores and Deleted out of sync?");
2467 return {Changed
, DeletedKillingLoc
};
2470 bool DSEState::eliminateDeadDefs(const MemoryDefWrapper
&KillingDefWrapper
) {
2471 if (KillingDefWrapper
.DefinedLocations
.empty()) {
2472 LLVM_DEBUG(dbgs() << "Failed to find analyzable write location for "
2473 << *KillingDefWrapper
.DefInst
<< "\n");
2477 bool MadeChange
= false;
2478 for (auto &KillingLocWrapper
: KillingDefWrapper
.DefinedLocations
) {
2479 LLVM_DEBUG(dbgs() << "Trying to eliminate MemoryDefs killed by "
2480 << *KillingLocWrapper
.MemDef
<< " ("
2481 << *KillingLocWrapper
.DefInst
<< ")\n");
2482 auto [Changed
, DeletedKillingLoc
] = eliminateDeadDefs(KillingLocWrapper
);
2483 MadeChange
|= Changed
;
2485 // Check if the store is a no-op.
2486 if (!DeletedKillingLoc
&& storeIsNoop(KillingLocWrapper
.MemDef
,
2487 KillingLocWrapper
.UnderlyingObject
)) {
2488 LLVM_DEBUG(dbgs() << "DSE: Remove No-Op Store:\n DEAD: "
2489 << *KillingLocWrapper
.DefInst
<< '\n');
2490 deleteDeadInstruction(KillingLocWrapper
.DefInst
);
2491 NumRedundantStores
++;
2495 // Can we form a calloc from a memset/malloc pair?
2496 if (!DeletedKillingLoc
&&
2497 tryFoldIntoCalloc(KillingLocWrapper
.MemDef
,
2498 KillingLocWrapper
.UnderlyingObject
)) {
2499 LLVM_DEBUG(dbgs() << "DSE: Remove memset after forming calloc:\n"
2500 << " DEAD: " << *KillingLocWrapper
.DefInst
<< '\n');
2501 deleteDeadInstruction(KillingLocWrapper
.DefInst
);
2509 static bool eliminateDeadStores(Function
&F
, AliasAnalysis
&AA
, MemorySSA
&MSSA
,
2510 DominatorTree
&DT
, PostDominatorTree
&PDT
,
2511 const TargetLibraryInfo
&TLI
,
2512 const LoopInfo
&LI
) {
2513 bool MadeChange
= false;
2514 DSEState
State(F
, AA
, MSSA
, DT
, PDT
, TLI
, LI
);
2516 for (unsigned I
= 0; I
< State
.MemDefs
.size(); I
++) {
2517 MemoryDef
*KillingDef
= State
.MemDefs
[I
];
2518 if (State
.SkipStores
.count(KillingDef
))
2521 MemoryDefWrapper
KillingDefWrapper(
2522 KillingDef
, State
.getLocForInst(KillingDef
->getMemoryInst(),
2523 EnableInitializesImprovement
));
2524 MadeChange
|= State
.eliminateDeadDefs(KillingDefWrapper
);
2527 if (EnablePartialOverwriteTracking
)
2528 for (auto &KV
: State
.IOLs
)
2529 MadeChange
|= State
.removePartiallyOverlappedStores(KV
.second
);
2531 MadeChange
|= State
.eliminateRedundantStoresOfExistingValues();
2532 MadeChange
|= State
.eliminateDeadWritesAtEndOfFunction();
2534 while (!State
.ToRemove
.empty()) {
2535 Instruction
*DeadInst
= State
.ToRemove
.pop_back_val();
2536 DeadInst
->eraseFromParent();
2541 } // end anonymous namespace
2543 //===----------------------------------------------------------------------===//
2545 //===----------------------------------------------------------------------===//
2546 PreservedAnalyses
DSEPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
2547 AliasAnalysis
&AA
= AM
.getResult
<AAManager
>(F
);
2548 const TargetLibraryInfo
&TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
2549 DominatorTree
&DT
= AM
.getResult
<DominatorTreeAnalysis
>(F
);
2550 MemorySSA
&MSSA
= AM
.getResult
<MemorySSAAnalysis
>(F
).getMSSA();
2551 PostDominatorTree
&PDT
= AM
.getResult
<PostDominatorTreeAnalysis
>(F
);
2552 LoopInfo
&LI
= AM
.getResult
<LoopAnalysis
>(F
);
2554 bool Changed
= eliminateDeadStores(F
, AA
, MSSA
, DT
, PDT
, TLI
, LI
);
2556 #ifdef LLVM_ENABLE_STATS
2557 if (AreStatisticsEnabled())
2558 for (auto &I
: instructions(F
))
2559 NumRemainingStores
+= isa
<StoreInst
>(&I
);
2563 return PreservedAnalyses::all();
2565 PreservedAnalyses PA
;
2566 PA
.preserveSet
<CFGAnalyses
>();
2567 PA
.preserve
<MemorySSAAnalysis
>();
2568 PA
.preserve
<LoopAnalysis
>();