1 //===- DeadStoreElimination.cpp - Fast Dead Store Elimination -------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements a trivial dead store elimination that only considers
11 // basic-block local redundant stores.
13 // FIXME: This should eventually be extended to be a post-dominator tree
14 // traversal. Doing so would be pretty trivial.
16 //===----------------------------------------------------------------------===//
18 #define DEBUG_TYPE "dse"
19 #include "llvm/Transforms/Scalar.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Function.h"
22 #include "llvm/GlobalVariable.h"
23 #include "llvm/Instructions.h"
24 #include "llvm/IntrinsicInst.h"
25 #include "llvm/Pass.h"
26 #include "llvm/Analysis/AliasAnalysis.h"
27 #include "llvm/Analysis/Dominators.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Target/TargetData.h"
32 #include "llvm/Transforms/Utils/Local.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/ADT/SmallPtrSet.h"
35 #include "llvm/ADT/Statistic.h"
38 STATISTIC(NumFastStores
, "Number of stores deleted");
39 STATISTIC(NumFastOther
, "Number of other instrs removed");
42 struct DSE
: public FunctionPass
{
44 MemoryDependenceAnalysis
*MD
;
46 static char ID
; // Pass identification, replacement for typeid
47 DSE() : FunctionPass(ID
), AA(0), MD(0) {
48 initializeDSEPass(*PassRegistry::getPassRegistry());
51 virtual bool runOnFunction(Function
&F
) {
52 AA
= &getAnalysis
<AliasAnalysis
>();
53 MD
= &getAnalysis
<MemoryDependenceAnalysis
>();
54 DominatorTree
&DT
= getAnalysis
<DominatorTree
>();
57 for (Function::iterator I
= F
.begin(), E
= F
.end(); I
!= E
; ++I
)
58 // Only check non-dead blocks. Dead blocks may have strange pointer
59 // cycles that will confuse alias analysis.
60 if (DT
.isReachableFromEntry(I
))
61 Changed
|= runOnBasicBlock(*I
);
67 bool runOnBasicBlock(BasicBlock
&BB
);
68 bool HandleFree(CallInst
*F
);
69 bool handleEndBlock(BasicBlock
&BB
);
70 void RemoveAccessedObjects(const AliasAnalysis::Location
&LoadedLoc
,
71 SmallPtrSet
<Value
*, 16> &DeadStackObjects
);
73 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
75 AU
.addRequired
<DominatorTree
>();
76 AU
.addRequired
<AliasAnalysis
>();
77 AU
.addRequired
<MemoryDependenceAnalysis
>();
78 AU
.addPreserved
<AliasAnalysis
>();
79 AU
.addPreserved
<DominatorTree
>();
80 AU
.addPreserved
<MemoryDependenceAnalysis
>();
86 INITIALIZE_PASS_BEGIN(DSE
, "dse", "Dead Store Elimination", false, false)
87 INITIALIZE_PASS_DEPENDENCY(DominatorTree
)
88 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis
)
89 INITIALIZE_AG_DEPENDENCY(AliasAnalysis
)
90 INITIALIZE_PASS_END(DSE
, "dse", "Dead Store Elimination", false, false)
92 FunctionPass
*llvm::createDeadStoreEliminationPass() { return new DSE(); }
94 //===----------------------------------------------------------------------===//
96 //===----------------------------------------------------------------------===//
98 /// DeleteDeadInstruction - Delete this instruction. Before we do, go through
99 /// and zero out all the operands of this instruction. If any of them become
100 /// dead, delete them and the computation tree that feeds them.
102 /// If ValueSet is non-null, remove any deleted instructions from it as well.
104 static void DeleteDeadInstruction(Instruction
*I
,
105 MemoryDependenceAnalysis
&MD
,
106 SmallPtrSet
<Value
*, 16> *ValueSet
= 0) {
107 SmallVector
<Instruction
*, 32> NowDeadInsts
;
109 NowDeadInsts
.push_back(I
);
112 // Before we touch this instruction, remove it from memdep!
114 Instruction
*DeadInst
= NowDeadInsts
.pop_back_val();
117 // This instruction is dead, zap it, in stages. Start by removing it from
118 // MemDep, which needs to know the operands and needs it to be in the
120 MD
.removeInstruction(DeadInst
);
122 for (unsigned op
= 0, e
= DeadInst
->getNumOperands(); op
!= e
; ++op
) {
123 Value
*Op
= DeadInst
->getOperand(op
);
124 DeadInst
->setOperand(op
, 0);
126 // If this operand just became dead, add it to the NowDeadInsts list.
127 if (!Op
->use_empty()) continue;
129 if (Instruction
*OpI
= dyn_cast
<Instruction
>(Op
))
130 if (isInstructionTriviallyDead(OpI
))
131 NowDeadInsts
.push_back(OpI
);
134 DeadInst
->eraseFromParent();
136 if (ValueSet
) ValueSet
->erase(DeadInst
);
137 } while (!NowDeadInsts
.empty());
141 /// hasMemoryWrite - Does this instruction write some memory? This only returns
142 /// true for things that we can analyze with other helpers below.
143 static bool hasMemoryWrite(Instruction
*I
) {
144 if (isa
<StoreInst
>(I
))
146 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
)) {
147 switch (II
->getIntrinsicID()) {
150 case Intrinsic::memset
:
151 case Intrinsic::memmove
:
152 case Intrinsic::memcpy
:
153 case Intrinsic::init_trampoline
:
154 case Intrinsic::lifetime_end
:
161 /// getLocForWrite - Return a Location stored to by the specified instruction.
162 static AliasAnalysis::Location
163 getLocForWrite(Instruction
*Inst
, AliasAnalysis
&AA
) {
164 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
))
165 return AA
.getLocation(SI
);
167 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(Inst
)) {
168 // memcpy/memmove/memset.
169 AliasAnalysis::Location Loc
= AA
.getLocationForDest(MI
);
170 // If we don't have target data around, an unknown size in Location means
171 // that we should use the size of the pointee type. This isn't valid for
172 // memset/memcpy, which writes more than an i8.
173 if (Loc
.Size
== AliasAnalysis::UnknownSize
&& AA
.getTargetData() == 0)
174 return AliasAnalysis::Location();
178 IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Inst
);
179 if (II
== 0) return AliasAnalysis::Location();
181 switch (II
->getIntrinsicID()) {
182 default: return AliasAnalysis::Location(); // Unhandled intrinsic.
183 case Intrinsic::init_trampoline
:
184 // If we don't have target data around, an unknown size in Location means
185 // that we should use the size of the pointee type. This isn't valid for
186 // init.trampoline, which writes more than an i8.
187 if (AA
.getTargetData() == 0) return AliasAnalysis::Location();
189 // FIXME: We don't know the size of the trampoline, so we can't really
191 return AliasAnalysis::Location(II
->getArgOperand(0));
192 case Intrinsic::lifetime_end
: {
193 uint64_t Len
= cast
<ConstantInt
>(II
->getArgOperand(0))->getZExtValue();
194 return AliasAnalysis::Location(II
->getArgOperand(1), Len
);
199 /// getLocForRead - Return the location read by the specified "hasMemoryWrite"
200 /// instruction if any.
201 static AliasAnalysis::Location
202 getLocForRead(Instruction
*Inst
, AliasAnalysis
&AA
) {
203 assert(hasMemoryWrite(Inst
) && "Unknown instruction case");
205 // The only instructions that both read and write are the mem transfer
206 // instructions (memcpy/memmove).
207 if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(Inst
))
208 return AA
.getLocationForSource(MTI
);
209 return AliasAnalysis::Location();
213 /// isRemovable - If the value of this instruction and the memory it writes to
214 /// is unused, may we delete this instruction?
215 static bool isRemovable(Instruction
*I
) {
216 // Don't remove volatile stores.
217 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
218 return !SI
->isVolatile();
220 IntrinsicInst
*II
= cast
<IntrinsicInst
>(I
);
221 switch (II
->getIntrinsicID()) {
222 default: assert(0 && "doesn't pass 'hasMemoryWrite' predicate");
223 case Intrinsic::lifetime_end
:
224 // Never remove dead lifetime_end's, e.g. because it is followed by a
227 case Intrinsic::init_trampoline
:
228 // Always safe to remove init_trampoline.
231 case Intrinsic::memset
:
232 case Intrinsic::memmove
:
233 case Intrinsic::memcpy
:
234 // Don't remove volatile memory intrinsics.
235 return !cast
<MemIntrinsic
>(II
)->isVolatile();
239 /// getStoredPointerOperand - Return the pointer that is being written to.
240 static Value
*getStoredPointerOperand(Instruction
*I
) {
241 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
242 return SI
->getPointerOperand();
243 if (MemIntrinsic
*MI
= dyn_cast
<MemIntrinsic
>(I
))
244 return MI
->getDest();
246 IntrinsicInst
*II
= cast
<IntrinsicInst
>(I
);
247 switch (II
->getIntrinsicID()) {
248 default: assert(false && "Unexpected intrinsic!");
249 case Intrinsic::init_trampoline
:
250 return II
->getArgOperand(0);
254 static uint64_t getPointerSize(Value
*V
, AliasAnalysis
&AA
) {
255 const TargetData
*TD
= AA
.getTargetData();
257 return AliasAnalysis::UnknownSize
;
259 if (AllocaInst
*A
= dyn_cast
<AllocaInst
>(V
)) {
260 // Get size information for the alloca
261 if (ConstantInt
*C
= dyn_cast
<ConstantInt
>(A
->getArraySize()))
262 return C
->getZExtValue() * TD
->getTypeAllocSize(A
->getAllocatedType());
263 return AliasAnalysis::UnknownSize
;
266 assert(isa
<Argument
>(V
) && "Expected AllocaInst or Argument!");
267 const PointerType
*PT
= cast
<PointerType
>(V
->getType());
268 return TD
->getTypeAllocSize(PT
->getElementType());
271 /// isObjectPointerWithTrustworthySize - Return true if the specified Value* is
272 /// pointing to an object with a pointer size we can trust.
273 static bool isObjectPointerWithTrustworthySize(const Value
*V
) {
274 if (const AllocaInst
*AI
= dyn_cast
<AllocaInst
>(V
))
275 return !AI
->isArrayAllocation();
276 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
))
277 return !GV
->mayBeOverridden();
278 if (const Argument
*A
= dyn_cast
<Argument
>(V
))
279 return A
->hasByValAttr();
283 /// isCompleteOverwrite - Return true if a store to the 'Later' location
284 /// completely overwrites a store to the 'Earlier' location.
285 static bool isCompleteOverwrite(const AliasAnalysis::Location
&Later
,
286 const AliasAnalysis::Location
&Earlier
,
288 const Value
*P1
= Earlier
.Ptr
->stripPointerCasts();
289 const Value
*P2
= Later
.Ptr
->stripPointerCasts();
291 // If the start pointers are the same, we just have to compare sizes to see if
292 // the later store was larger than the earlier store.
294 // If we don't know the sizes of either access, then we can't do a
296 if (Later
.Size
== AliasAnalysis::UnknownSize
||
297 Earlier
.Size
== AliasAnalysis::UnknownSize
) {
298 // If we have no TargetData information around, then the size of the store
299 // is inferrable from the pointee type. If they are the same type, then
300 // we know that the store is safe.
301 if (AA
.getTargetData() == 0)
302 return Later
.Ptr
->getType() == Earlier
.Ptr
->getType();
306 // Make sure that the Later size is >= the Earlier size.
307 if (Later
.Size
< Earlier
.Size
)
312 // Otherwise, we have to have size information, and the later store has to be
313 // larger than the earlier one.
314 if (Later
.Size
== AliasAnalysis::UnknownSize
||
315 Earlier
.Size
== AliasAnalysis::UnknownSize
||
316 Later
.Size
<= Earlier
.Size
|| AA
.getTargetData() == 0)
319 // Check to see if the later store is to the entire object (either a global,
320 // an alloca, or a byval argument). If so, then it clearly overwrites any
321 // other store to the same object.
322 const TargetData
&TD
= *AA
.getTargetData();
324 const Value
*UO1
= GetUnderlyingObject(P1
, &TD
),
325 *UO2
= GetUnderlyingObject(P2
, &TD
);
327 // If we can't resolve the same pointers to the same object, then we can't
328 // analyze them at all.
332 // If the "Later" store is to a recognizable object, get its size.
333 if (isObjectPointerWithTrustworthySize(UO2
)) {
334 uint64_t ObjectSize
=
335 TD
.getTypeAllocSize(cast
<PointerType
>(UO2
->getType())->getElementType());
336 if (ObjectSize
== Later
.Size
)
340 // Okay, we have stores to two completely different pointers. Try to
341 // decompose the pointer into a "base + constant_offset" form. If the base
342 // pointers are equal, then we can reason about the two stores.
343 int64_t EarlierOff
= 0, LaterOff
= 0;
344 const Value
*BP1
= GetPointerBaseWithConstantOffset(P1
, EarlierOff
, TD
);
345 const Value
*BP2
= GetPointerBaseWithConstantOffset(P2
, LaterOff
, TD
);
347 // If the base pointers still differ, we have two completely different stores.
351 // The later store completely overlaps the earlier store if:
353 // 1. Both start at the same offset and the later one's size is greater than
354 // or equal to the earlier one's, or
359 // 2. The earlier store has an offset greater than the later offset, but which
360 // still lies completely within the later store.
363 // |----- later ------|
365 // We have to be careful here as *Off is signed while *.Size is unsigned.
366 if (EarlierOff
>= LaterOff
&&
367 uint64_t(EarlierOff
- LaterOff
) + Earlier
.Size
<= Later
.Size
)
370 // Otherwise, they don't completely overlap.
374 /// isPossibleSelfRead - If 'Inst' might be a self read (i.e. a noop copy of a
375 /// memory region into an identical pointer) then it doesn't actually make its
376 /// input dead in the traditional sense. Consider this case:
381 /// In this case, the second store to A does not make the first store to A dead.
382 /// The usual situation isn't an explicit A<-A store like this (which can be
383 /// trivially removed) but a case where two pointers may alias.
385 /// This function detects when it is unsafe to remove a dependent instruction
386 /// because the DSE inducing instruction may be a self-read.
387 static bool isPossibleSelfRead(Instruction
*Inst
,
388 const AliasAnalysis::Location
&InstStoreLoc
,
389 Instruction
*DepWrite
, AliasAnalysis
&AA
) {
390 // Self reads can only happen for instructions that read memory. Get the
392 AliasAnalysis::Location InstReadLoc
= getLocForRead(Inst
, AA
);
393 if (InstReadLoc
.Ptr
== 0) return false; // Not a reading instruction.
395 // If the read and written loc obviously don't alias, it isn't a read.
396 if (AA
.isNoAlias(InstReadLoc
, InstStoreLoc
)) return false;
398 // Okay, 'Inst' may copy over itself. However, we can still remove a the
399 // DepWrite instruction if we can prove that it reads from the same location
400 // as Inst. This handles useful cases like:
403 // Here we don't know if A/B may alias, but we do know that B/B are must
404 // aliases, so removing the first memcpy is safe (assuming it writes <= #
405 // bytes as the second one.
406 AliasAnalysis::Location DepReadLoc
= getLocForRead(DepWrite
, AA
);
408 if (DepReadLoc
.Ptr
&& AA
.isMustAlias(InstReadLoc
.Ptr
, DepReadLoc
.Ptr
))
411 // If DepWrite doesn't read memory or if we can't prove it is a must alias,
412 // then it can't be considered dead.
417 //===----------------------------------------------------------------------===//
419 //===----------------------------------------------------------------------===//
421 bool DSE::runOnBasicBlock(BasicBlock
&BB
) {
422 bool MadeChange
= false;
424 // Do a top-down walk on the BB.
425 for (BasicBlock::iterator BBI
= BB
.begin(), BBE
= BB
.end(); BBI
!= BBE
; ) {
426 Instruction
*Inst
= BBI
++;
428 // Handle 'free' calls specially.
429 if (CallInst
*F
= isFreeCall(Inst
)) {
430 MadeChange
|= HandleFree(F
);
434 // If we find something that writes memory, get its memory dependence.
435 if (!hasMemoryWrite(Inst
))
438 MemDepResult InstDep
= MD
->getDependency(Inst
);
440 // Ignore any store where we can't find a local dependence.
441 // FIXME: cross-block DSE would be fun. :)
442 if (InstDep
.isNonLocal() || InstDep
.isUnknown())
445 // If we're storing the same value back to a pointer that we just
446 // loaded from, then the store can be removed.
447 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
448 if (LoadInst
*DepLoad
= dyn_cast
<LoadInst
>(InstDep
.getInst())) {
449 if (SI
->getPointerOperand() == DepLoad
->getPointerOperand() &&
450 SI
->getOperand(0) == DepLoad
&& !SI
->isVolatile()) {
451 DEBUG(dbgs() << "DSE: Remove Store Of Load from same pointer:\n "
452 << "LOAD: " << *DepLoad
<< "\n STORE: " << *SI
<< '\n');
454 // DeleteDeadInstruction can delete the current instruction. Save BBI
455 // in case we need it.
456 WeakVH
NextInst(BBI
);
458 DeleteDeadInstruction(SI
, *MD
);
460 if (NextInst
== 0) // Next instruction deleted.
462 else if (BBI
!= BB
.begin()) // Revisit this instruction if possible.
471 // Figure out what location is being stored to.
472 AliasAnalysis::Location Loc
= getLocForWrite(Inst
, *AA
);
474 // If we didn't get a useful location, fail.
478 while (!InstDep
.isNonLocal() && !InstDep
.isUnknown()) {
479 // Get the memory clobbered by the instruction we depend on. MemDep will
480 // skip any instructions that 'Loc' clearly doesn't interact with. If we
481 // end up depending on a may- or must-aliased load, then we can't optimize
482 // away the store and we bail out. However, if we depend on on something
483 // that overwrites the memory location we *can* potentially optimize it.
485 // Find out what memory location the dependent instruction stores.
486 Instruction
*DepWrite
= InstDep
.getInst();
487 AliasAnalysis::Location DepLoc
= getLocForWrite(DepWrite
, *AA
);
488 // If we didn't get a useful location, or if it isn't a size, bail out.
492 // If we find a write that is a) removable (i.e., non-volatile), b) is
493 // completely obliterated by the store to 'Loc', and c) which we know that
494 // 'Inst' doesn't load from, then we can remove it.
495 if (isRemovable(DepWrite
) && isCompleteOverwrite(Loc
, DepLoc
, *AA
) &&
496 !isPossibleSelfRead(Inst
, Loc
, DepWrite
, *AA
)) {
497 DEBUG(dbgs() << "DSE: Remove Dead Store:\n DEAD: "
498 << *DepWrite
<< "\n KILLER: " << *Inst
<< '\n');
500 // Delete the store and now-dead instructions that feed it.
501 DeleteDeadInstruction(DepWrite
, *MD
);
505 // DeleteDeadInstruction can delete the current instruction in loop
508 if (BBI
!= BB
.begin())
513 // If this is a may-aliased store that is clobbering the store value, we
514 // can keep searching past it for another must-aliased pointer that stores
515 // to the same location. For example, in:
519 // we can remove the first store to P even though we don't know if P and Q
521 if (DepWrite
== &BB
.front()) break;
523 // Can't look past this instruction if it might read 'Loc'.
524 if (AA
->getModRefInfo(DepWrite
, Loc
) & AliasAnalysis::Ref
)
527 InstDep
= MD
->getPointerDependencyFrom(Loc
, false, DepWrite
, &BB
);
531 // If this block ends in a return, unwind, or unreachable, all allocas are
532 // dead at its end, which means stores to them are also dead.
533 if (BB
.getTerminator()->getNumSuccessors() == 0)
534 MadeChange
|= handleEndBlock(BB
);
539 /// HandleFree - Handle frees of entire structures whose dependency is a store
540 /// to a field of that structure.
541 bool DSE::HandleFree(CallInst
*F
) {
542 bool MadeChange
= false;
544 MemDepResult Dep
= MD
->getDependency(F
);
546 while (!Dep
.isNonLocal() && !Dep
.isUnknown()) {
547 Instruction
*Dependency
= Dep
.getInst();
548 if (!hasMemoryWrite(Dependency
) || !isRemovable(Dependency
))
552 GetUnderlyingObject(getStoredPointerOperand(Dependency
));
554 // Check for aliasing.
555 if (!AA
->isMustAlias(F
->getArgOperand(0), DepPointer
))
558 // DCE instructions only used to calculate that store
559 DeleteDeadInstruction(Dependency
, *MD
);
563 // Inst's old Dependency is now deleted. Compute the next dependency,
564 // which may also be dead, as in
566 // s[1] = 0; // This has just been deleted.
568 Dep
= MD
->getDependency(F
);
574 /// handleEndBlock - Remove dead stores to stack-allocated locations in the
575 /// function end block. Ex:
578 /// store i32 1, i32* %A
580 bool DSE::handleEndBlock(BasicBlock
&BB
) {
581 bool MadeChange
= false;
583 // Keep track of all of the stack objects that are dead at the end of the
585 SmallPtrSet
<Value
*, 16> DeadStackObjects
;
587 // Find all of the alloca'd pointers in the entry block.
588 BasicBlock
*Entry
= BB
.getParent()->begin();
589 for (BasicBlock::iterator I
= Entry
->begin(), E
= Entry
->end(); I
!= E
; ++I
)
590 if (AllocaInst
*AI
= dyn_cast
<AllocaInst
>(I
))
591 DeadStackObjects
.insert(AI
);
593 // Treat byval arguments the same, stores to them are dead at the end of the
595 for (Function::arg_iterator AI
= BB
.getParent()->arg_begin(),
596 AE
= BB
.getParent()->arg_end(); AI
!= AE
; ++AI
)
597 if (AI
->hasByValAttr())
598 DeadStackObjects
.insert(AI
);
600 // Scan the basic block backwards
601 for (BasicBlock::iterator BBI
= BB
.end(); BBI
!= BB
.begin(); ){
604 // If we find a store, check to see if it points into a dead stack value.
605 if (hasMemoryWrite(BBI
) && isRemovable(BBI
)) {
606 // See through pointer-to-pointer bitcasts
607 Value
*Pointer
= GetUnderlyingObject(getStoredPointerOperand(BBI
));
609 // Stores to stack values are valid candidates for removal.
610 if (DeadStackObjects
.count(Pointer
)) {
611 Instruction
*Dead
= BBI
++;
613 DEBUG(dbgs() << "DSE: Dead Store at End of Block:\n DEAD: "
614 << *Dead
<< "\n Object: " << *Pointer
<< '\n');
616 // DCE instructions only used to calculate that store.
617 DeleteDeadInstruction(Dead
, *MD
, &DeadStackObjects
);
624 // Remove any dead non-memory-mutating instructions.
625 if (isInstructionTriviallyDead(BBI
)) {
626 Instruction
*Inst
= BBI
++;
627 DeleteDeadInstruction(Inst
, *MD
, &DeadStackObjects
);
633 if (AllocaInst
*A
= dyn_cast
<AllocaInst
>(BBI
)) {
634 DeadStackObjects
.erase(A
);
638 if (CallSite CS
= cast
<Value
>(BBI
)) {
639 // If this call does not access memory, it can't be loading any of our
641 if (AA
->doesNotAccessMemory(CS
))
644 // If the call might load from any of our allocas, then any store above
646 SmallVector
<Value
*, 8> LiveAllocas
;
647 for (SmallPtrSet
<Value
*, 16>::iterator I
= DeadStackObjects
.begin(),
648 E
= DeadStackObjects
.end(); I
!= E
; ++I
) {
649 // See if the call site touches it.
650 AliasAnalysis::ModRefResult A
=
651 AA
->getModRefInfo(CS
, *I
, getPointerSize(*I
, *AA
));
653 if (A
== AliasAnalysis::ModRef
|| A
== AliasAnalysis::Ref
)
654 LiveAllocas
.push_back(*I
);
657 for (SmallVector
<Value
*, 8>::iterator I
= LiveAllocas
.begin(),
658 E
= LiveAllocas
.end(); I
!= E
; ++I
)
659 DeadStackObjects
.erase(*I
);
661 // If all of the allocas were clobbered by the call then we're not going
662 // to find anything else to process.
663 if (DeadStackObjects
.empty())
669 AliasAnalysis::Location LoadedLoc
;
671 // If we encounter a use of the pointer, it is no longer considered dead
672 if (LoadInst
*L
= dyn_cast
<LoadInst
>(BBI
)) {
673 LoadedLoc
= AA
->getLocation(L
);
674 } else if (VAArgInst
*V
= dyn_cast
<VAArgInst
>(BBI
)) {
675 LoadedLoc
= AA
->getLocation(V
);
676 } else if (MemTransferInst
*MTI
= dyn_cast
<MemTransferInst
>(BBI
)) {
677 LoadedLoc
= AA
->getLocationForSource(MTI
);
679 // Not a loading instruction.
683 // Remove any allocas from the DeadPointer set that are loaded, as this
684 // makes any stores above the access live.
685 RemoveAccessedObjects(LoadedLoc
, DeadStackObjects
);
687 // If all of the allocas were clobbered by the access then we're not going
688 // to find anything else to process.
689 if (DeadStackObjects
.empty())
696 /// RemoveAccessedObjects - Check to see if the specified location may alias any
697 /// of the stack objects in the DeadStackObjects set. If so, they become live
698 /// because the location is being loaded.
699 void DSE::RemoveAccessedObjects(const AliasAnalysis::Location
&LoadedLoc
,
700 SmallPtrSet
<Value
*, 16> &DeadStackObjects
) {
701 const Value
*UnderlyingPointer
= GetUnderlyingObject(LoadedLoc
.Ptr
);
703 // A constant can't be in the dead pointer set.
704 if (isa
<Constant
>(UnderlyingPointer
))
707 // If the kill pointer can be easily reduced to an alloca, don't bother doing
708 // extraneous AA queries.
709 if (isa
<AllocaInst
>(UnderlyingPointer
) || isa
<Argument
>(UnderlyingPointer
)) {
710 DeadStackObjects
.erase(const_cast<Value
*>(UnderlyingPointer
));
714 SmallVector
<Value
*, 16> NowLive
;
715 for (SmallPtrSet
<Value
*, 16>::iterator I
= DeadStackObjects
.begin(),
716 E
= DeadStackObjects
.end(); I
!= E
; ++I
) {
717 // See if the loaded location could alias the stack location.
718 AliasAnalysis::Location
StackLoc(*I
, getPointerSize(*I
, *AA
));
719 if (!AA
->isNoAlias(StackLoc
, LoadedLoc
))
720 NowLive
.push_back(*I
);
723 for (SmallVector
<Value
*, 16>::iterator I
= NowLive
.begin(), E
= NowLive
.end();
725 DeadStackObjects
.erase(*I
);