1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/ADT/Statistic.h"
23 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
25 Instruction
*InstCombiner::visitAllocaInst(AllocaInst
&AI
) {
26 // Ensure that the alloca array size argument has type intptr_t, so that
27 // any casting is exposed early.
29 const Type
*IntPtrTy
= TD
->getIntPtrType(AI
.getContext());
30 if (AI
.getArraySize()->getType() != IntPtrTy
) {
31 Value
*V
= Builder
->CreateIntCast(AI
.getArraySize(),
38 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
39 if (AI
.isArrayAllocation()) { // Check C != 1
40 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
42 ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
43 assert(isa
<AllocaInst
>(AI
) && "Unknown type of allocation inst!");
44 AllocaInst
*New
= Builder
->CreateAlloca(NewTy
, 0, AI
.getName());
45 New
->setAlignment(AI
.getAlignment());
47 // Scan to the end of the allocation instructions, to skip over a block of
48 // allocas if possible...also skip interleaved debug info
50 BasicBlock::iterator It
= New
;
51 while (isa
<AllocaInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
)) ++It
;
53 // Now that I is pointing to the first non-allocation-inst in the block,
54 // insert our getelementptr instruction...
56 Value
*NullIdx
=Constant::getNullValue(Type::getInt32Ty(AI
.getContext()));
61 GetElementPtrInst::CreateInBounds(New
, Idx
, Idx
+ 2,
62 New
->getName()+".sub");
63 InsertNewInstBefore(GEP
, *It
);
65 // Now make everything use the getelementptr instead of the original
67 return ReplaceInstUsesWith(AI
, GEP
);
68 } else if (isa
<UndefValue
>(AI
.getArraySize())) {
69 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
73 if (TD
&& isa
<AllocaInst
>(AI
) && AI
.getAllocatedType()->isSized()) {
74 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
75 // Note that we only do this for alloca's, because malloc should allocate
76 // and return a unique pointer, even for a zero byte allocation.
77 if (TD
->getTypeAllocSize(AI
.getAllocatedType()) == 0)
78 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
80 // If the alignment is 0 (unspecified), assign it the preferred alignment.
81 if (AI
.getAlignment() == 0)
82 AI
.setAlignment(TD
->getPrefTypeAlignment(AI
.getAllocatedType()));
89 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
90 static Instruction
*InstCombineLoadCast(InstCombiner
&IC
, LoadInst
&LI
,
91 const TargetData
*TD
) {
92 User
*CI
= cast
<User
>(LI
.getOperand(0));
93 Value
*CastOp
= CI
->getOperand(0);
95 const PointerType
*DestTy
= cast
<PointerType
>(CI
->getType());
96 const Type
*DestPTy
= DestTy
->getElementType();
97 if (const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType())) {
99 // If the address spaces don't match, don't eliminate the cast.
100 if (DestTy
->getAddressSpace() != SrcTy
->getAddressSpace())
103 const Type
*SrcPTy
= SrcTy
->getElementType();
105 if (DestPTy
->isIntegerTy() || DestPTy
->isPointerTy() ||
106 DestPTy
->isVectorTy()) {
107 // If the source is an array, the code below will not succeed. Check to
108 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
110 if (const ArrayType
*ASrcTy
= dyn_cast
<ArrayType
>(SrcPTy
))
111 if (Constant
*CSrc
= dyn_cast
<Constant
>(CastOp
))
112 if (ASrcTy
->getNumElements() != 0) {
114 Idxs
[0] = Constant::getNullValue(Type::getInt32Ty(LI
.getContext()));
116 CastOp
= ConstantExpr::getGetElementPtr(CSrc
, Idxs
, 2);
117 SrcTy
= cast
<PointerType
>(CastOp
->getType());
118 SrcPTy
= SrcTy
->getElementType();
121 if (IC
.getTargetData() &&
122 (SrcPTy
->isIntegerTy() || SrcPTy
->isPointerTy() ||
123 SrcPTy
->isVectorTy()) &&
124 // Do not allow turning this into a load of an integer, which is then
125 // casted to a pointer, this pessimizes pointer analysis a lot.
126 (SrcPTy
->isPointerTy() == LI
.getType()->isPointerTy()) &&
127 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) ==
128 IC
.getTargetData()->getTypeSizeInBits(DestPTy
)) {
130 // Okay, we are casting from one integer or pointer type to another of
131 // the same size. Instead of casting the pointer before the load, cast
132 // the result of the loaded value.
134 IC
.Builder
->CreateLoad(CastOp
, LI
.isVolatile(), CI
->getName());
135 NewLoad
->setAlignment(LI
.getAlignment());
136 // Now cast the result of the load.
137 return new BitCastInst(NewLoad
, LI
.getType());
144 Instruction
*InstCombiner::visitLoadInst(LoadInst
&LI
) {
145 Value
*Op
= LI
.getOperand(0);
147 // Attempt to improve the alignment.
149 unsigned KnownAlign
=
150 getOrEnforceKnownAlignment(Op
, TD
->getPrefTypeAlignment(LI
.getType()),TD
);
151 unsigned LoadAlign
= LI
.getAlignment();
152 unsigned EffectiveLoadAlign
= LoadAlign
!= 0 ? LoadAlign
:
153 TD
->getABITypeAlignment(LI
.getType());
155 if (KnownAlign
> EffectiveLoadAlign
)
156 LI
.setAlignment(KnownAlign
);
157 else if (LoadAlign
== 0)
158 LI
.setAlignment(EffectiveLoadAlign
);
161 // load (cast X) --> cast (load X) iff safe.
162 if (isa
<CastInst
>(Op
))
163 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
166 // None of the following transforms are legal for volatile loads.
167 if (LI
.isVolatile()) return 0;
169 // Do really simple store-to-load forwarding and load CSE, to catch cases
170 // where there are several consecutive memory accesses to the same location,
171 // separated by a few arithmetic operations.
172 BasicBlock::iterator BBI
= &LI
;
173 if (Value
*AvailableVal
= FindAvailableLoadedValue(Op
, LI
.getParent(), BBI
,6))
174 return ReplaceInstUsesWith(LI
, AvailableVal
);
176 // load(gep null, ...) -> unreachable
177 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
178 const Value
*GEPI0
= GEPI
->getOperand(0);
179 // TODO: Consider a target hook for valid address spaces for this xform.
180 if (isa
<ConstantPointerNull
>(GEPI0
) && GEPI
->getPointerAddressSpace() == 0){
181 // Insert a new store to null instruction before the load to indicate
182 // that this code is not reachable. We do this instead of inserting
183 // an unreachable instruction directly because we cannot modify the
185 new StoreInst(UndefValue::get(LI
.getType()),
186 Constant::getNullValue(Op
->getType()), &LI
);
187 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
191 // load null/undef -> unreachable
192 // TODO: Consider a target hook for valid address spaces for this xform.
193 if (isa
<UndefValue
>(Op
) ||
194 (isa
<ConstantPointerNull
>(Op
) && LI
.getPointerAddressSpace() == 0)) {
195 // Insert a new store to null instruction before the load to indicate that
196 // this code is not reachable. We do this instead of inserting an
197 // unreachable instruction directly because we cannot modify the CFG.
198 new StoreInst(UndefValue::get(LI
.getType()),
199 Constant::getNullValue(Op
->getType()), &LI
);
200 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
203 // Instcombine load (constantexpr_cast global) -> cast (load global)
204 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Op
))
206 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
209 if (Op
->hasOneUse()) {
210 // Change select and PHI nodes to select values instead of addresses: this
211 // helps alias analysis out a lot, allows many others simplifications, and
212 // exposes redundancy in the code.
214 // Note that we cannot do the transformation unless we know that the
215 // introduced loads cannot trap! Something like this is valid as long as
216 // the condition is always false: load (select bool %C, int* null, int* %G),
217 // but it would not be valid if we transformed it to load from null
220 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
221 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
222 unsigned Align
= LI
.getAlignment();
223 if (isSafeToLoadUnconditionally(SI
->getOperand(1), SI
, Align
, TD
) &&
224 isSafeToLoadUnconditionally(SI
->getOperand(2), SI
, Align
, TD
)) {
225 LoadInst
*V1
= Builder
->CreateLoad(SI
->getOperand(1),
226 SI
->getOperand(1)->getName()+".val");
227 LoadInst
*V2
= Builder
->CreateLoad(SI
->getOperand(2),
228 SI
->getOperand(2)->getName()+".val");
229 V1
->setAlignment(Align
);
230 V2
->setAlignment(Align
);
231 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
234 // load (select (cond, null, P)) -> load P
235 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(1)))
236 if (C
->isNullValue()) {
237 LI
.setOperand(0, SI
->getOperand(2));
241 // load (select (cond, P, null)) -> load P
242 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(2)))
243 if (C
->isNullValue()) {
244 LI
.setOperand(0, SI
->getOperand(1));
252 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
253 /// when possible. This makes it generally easy to do alias analysis and/or
254 /// SROA/mem2reg of the memory object.
255 static Instruction
*InstCombineStoreToCast(InstCombiner
&IC
, StoreInst
&SI
) {
256 User
*CI
= cast
<User
>(SI
.getOperand(1));
257 Value
*CastOp
= CI
->getOperand(0);
259 const Type
*DestPTy
= cast
<PointerType
>(CI
->getType())->getElementType();
260 const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType());
261 if (SrcTy
== 0) return 0;
263 const Type
*SrcPTy
= SrcTy
->getElementType();
265 if (!DestPTy
->isIntegerTy() && !DestPTy
->isPointerTy())
268 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
269 /// to its first element. This allows us to handle things like:
270 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
272 SmallVector
<Value
*, 4> NewGEPIndices
;
274 // If the source is an array, the code below will not succeed. Check to
275 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
277 if (SrcPTy
->isArrayTy() || SrcPTy
->isStructTy()) {
278 // Index through pointer.
279 Constant
*Zero
= Constant::getNullValue(Type::getInt32Ty(SI
.getContext()));
280 NewGEPIndices
.push_back(Zero
);
283 if (const StructType
*STy
= dyn_cast
<StructType
>(SrcPTy
)) {
284 if (!STy
->getNumElements()) /* Struct can be empty {} */
286 NewGEPIndices
.push_back(Zero
);
287 SrcPTy
= STy
->getElementType(0);
288 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(SrcPTy
)) {
289 NewGEPIndices
.push_back(Zero
);
290 SrcPTy
= ATy
->getElementType();
296 SrcTy
= PointerType::get(SrcPTy
, SrcTy
->getAddressSpace());
299 if (!SrcPTy
->isIntegerTy() && !SrcPTy
->isPointerTy())
302 // If the pointers point into different address spaces or if they point to
303 // values with different sizes, we can't do the transformation.
304 if (!IC
.getTargetData() ||
305 SrcTy
->getAddressSpace() !=
306 cast
<PointerType
>(CI
->getType())->getAddressSpace() ||
307 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) !=
308 IC
.getTargetData()->getTypeSizeInBits(DestPTy
))
311 // Okay, we are casting from one integer or pointer type to another of
312 // the same size. Instead of casting the pointer before
313 // the store, cast the value to be stored.
315 Value
*SIOp0
= SI
.getOperand(0);
316 Instruction::CastOps opcode
= Instruction::BitCast
;
317 const Type
* CastSrcTy
= SIOp0
->getType();
318 const Type
* CastDstTy
= SrcPTy
;
319 if (CastDstTy
->isPointerTy()) {
320 if (CastSrcTy
->isIntegerTy())
321 opcode
= Instruction::IntToPtr
;
322 } else if (CastDstTy
->isIntegerTy()) {
323 if (SIOp0
->getType()->isPointerTy())
324 opcode
= Instruction::PtrToInt
;
327 // SIOp0 is a pointer to aggregate and this is a store to the first field,
328 // emit a GEP to index into its first field.
329 if (!NewGEPIndices
.empty())
330 CastOp
= IC
.Builder
->CreateInBoundsGEP(CastOp
, NewGEPIndices
.begin(),
331 NewGEPIndices
.end());
333 NewCast
= IC
.Builder
->CreateCast(opcode
, SIOp0
, CastDstTy
,
334 SIOp0
->getName()+".c");
335 SI
.setOperand(0, NewCast
);
336 SI
.setOperand(1, CastOp
);
340 /// equivalentAddressValues - Test if A and B will obviously have the same
341 /// value. This includes recognizing that %t0 and %t1 will have the same
342 /// value in code like this:
343 /// %t0 = getelementptr \@a, 0, 3
344 /// store i32 0, i32* %t0
345 /// %t1 = getelementptr \@a, 0, 3
346 /// %t2 = load i32* %t1
348 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
349 // Test if the values are trivially equivalent.
350 if (A
== B
) return true;
352 // Test if the values come form identical arithmetic instructions.
353 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
354 // its only used to compare two uses within the same basic block, which
355 // means that they'll always either have the same value or one of them
356 // will have an undefined value.
357 if (isa
<BinaryOperator
>(A
) ||
360 isa
<GetElementPtrInst
>(A
))
361 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
362 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
365 // Otherwise they may not be equivalent.
369 Instruction
*InstCombiner::visitStoreInst(StoreInst
&SI
) {
370 Value
*Val
= SI
.getOperand(0);
371 Value
*Ptr
= SI
.getOperand(1);
373 // If the RHS is an alloca with a single use, zapify the store, making the
375 if (!SI
.isVolatile()) {
376 if (Ptr
->hasOneUse()) {
377 if (isa
<AllocaInst
>(Ptr
))
378 return EraseInstFromFunction(SI
);
379 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
380 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
381 if (GEP
->getOperand(0)->hasOneUse())
382 return EraseInstFromFunction(SI
);
388 // Attempt to improve the alignment.
390 unsigned KnownAlign
=
391 getOrEnforceKnownAlignment(Ptr
, TD
->getPrefTypeAlignment(Val
->getType()),
393 unsigned StoreAlign
= SI
.getAlignment();
394 unsigned EffectiveStoreAlign
= StoreAlign
!= 0 ? StoreAlign
:
395 TD
->getABITypeAlignment(Val
->getType());
397 if (KnownAlign
> EffectiveStoreAlign
)
398 SI
.setAlignment(KnownAlign
);
399 else if (StoreAlign
== 0)
400 SI
.setAlignment(EffectiveStoreAlign
);
403 // Do really simple DSE, to catch cases where there are several consecutive
404 // stores to the same location, separated by a few arithmetic operations. This
405 // situation often occurs with bitfield accesses.
406 BasicBlock::iterator BBI
= &SI
;
407 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
410 // Don't count debug info directives, lest they affect codegen,
411 // and we skip pointer-to-pointer bitcasts, which are NOPs.
412 if (isa
<DbgInfoIntrinsic
>(BBI
) ||
413 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
418 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
419 // Prev store isn't volatile, and stores to the same location?
420 if (!PrevSI
->isVolatile() &&equivalentAddressValues(PrevSI
->getOperand(1),
424 EraseInstFromFunction(*PrevSI
);
430 // If this is a load, we have to stop. However, if the loaded value is from
431 // the pointer we're loading and is producing the pointer we're storing,
432 // then *this* store is dead (X = load P; store X -> P).
433 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
434 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
) &&
436 return EraseInstFromFunction(SI
);
438 // Otherwise, this is a load from some other location. Stores before it
443 // Don't skip over loads or things that can modify memory.
444 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory())
449 if (SI
.isVolatile()) return 0; // Don't hack volatile stores.
451 // store X, null -> turns into 'unreachable' in SimplifyCFG
452 if (isa
<ConstantPointerNull
>(Ptr
) && SI
.getPointerAddressSpace() == 0) {
453 if (!isa
<UndefValue
>(Val
)) {
454 SI
.setOperand(0, UndefValue::get(Val
->getType()));
455 if (Instruction
*U
= dyn_cast
<Instruction
>(Val
))
456 Worklist
.Add(U
); // Dropped a use.
458 return 0; // Do not modify these!
461 // store undef, Ptr -> noop
462 if (isa
<UndefValue
>(Val
))
463 return EraseInstFromFunction(SI
);
465 // If the pointer destination is a cast, see if we can fold the cast into the
467 if (isa
<CastInst
>(Ptr
))
468 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
470 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
472 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
476 // If this store is the last instruction in the basic block (possibly
477 // excepting debug info instructions), and if the block ends with an
478 // unconditional branch, try to move it to the successor block.
482 } while (isa
<DbgInfoIntrinsic
>(BBI
) ||
483 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy()));
484 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BBI
))
485 if (BI
->isUnconditional())
486 if (SimplifyStoreAtEndOfBlock(SI
))
487 return 0; // xform done!
492 /// SimplifyStoreAtEndOfBlock - Turn things like:
493 /// if () { *P = v1; } else { *P = v2 }
494 /// into a phi node with a store in the successor.
496 /// Simplify things like:
497 /// *P = v1; if () { *P = v2; }
498 /// into a phi node with a store in the successor.
500 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst
&SI
) {
501 BasicBlock
*StoreBB
= SI
.getParent();
503 // Check to see if the successor block has exactly two incoming edges. If
504 // so, see if the other predecessor contains a store to the same location.
505 // if so, insert a PHI node (if needed) and move the stores down.
506 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
508 // Determine whether Dest has exactly two predecessors and, if so, compute
509 // the other predecessor.
510 pred_iterator PI
= pred_begin(DestBB
);
512 BasicBlock
*OtherBB
= 0;
517 if (++PI
== pred_end(DestBB
))
526 if (++PI
!= pred_end(DestBB
))
529 // Bail out if all the relevant blocks aren't distinct (this can happen,
530 // for example, if SI is in an infinite loop)
531 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
534 // Verify that the other block ends in a branch and is not otherwise empty.
535 BasicBlock::iterator BBI
= OtherBB
->getTerminator();
536 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
537 if (!OtherBr
|| BBI
== OtherBB
->begin())
540 // If the other block ends in an unconditional branch, check for the 'if then
541 // else' case. there is an instruction before the branch.
542 StoreInst
*OtherStore
= 0;
543 if (OtherBr
->isUnconditional()) {
545 // Skip over debugging info.
546 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
547 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
548 if (BBI
==OtherBB
->begin())
552 // If this isn't a store, isn't a store to the same location, or if the
553 // alignments differ, bail out.
554 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
555 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1) ||
556 OtherStore
->getAlignment() != SI
.getAlignment())
559 // Otherwise, the other block ended with a conditional branch. If one of the
560 // destinations is StoreBB, then we have the if/then case.
561 if (OtherBr
->getSuccessor(0) != StoreBB
&&
562 OtherBr
->getSuccessor(1) != StoreBB
)
565 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
566 // if/then triangle. See if there is a store to the same ptr as SI that
569 // Check to see if we find the matching store.
570 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
571 if (OtherStore
->getOperand(1) != SI
.getOperand(1) ||
572 OtherStore
->getAlignment() != SI
.getAlignment())
576 // If we find something that may be using or overwriting the stored
577 // value, or if we run out of instructions, we can't do the xform.
578 if (BBI
->mayReadFromMemory() || BBI
->mayWriteToMemory() ||
579 BBI
== OtherBB
->begin())
583 // In order to eliminate the store in OtherBr, we have to
584 // make sure nothing reads or overwrites the stored value in
586 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
587 // FIXME: This should really be AA driven.
588 if (I
->mayReadFromMemory() || I
->mayWriteToMemory())
593 // Insert a PHI node now if we need it.
594 Value
*MergedVal
= OtherStore
->getOperand(0);
595 if (MergedVal
!= SI
.getOperand(0)) {
596 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), 2, "storemerge");
597 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
598 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
599 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
602 // Advance to a place where it is safe to insert the new store and
604 BBI
= DestBB
->getFirstNonPHI();
605 StoreInst
*NewSI
= new StoreInst(MergedVal
, SI
.getOperand(1),
606 OtherStore
->isVolatile(),
608 InsertNewInstBefore(NewSI
, *BBI
);
609 NewSI
->setDebugLoc(OtherStore
->getDebugLoc());
611 // Nuke the old stores.
612 EraseInstFromFunction(SI
);
613 EraseInstFromFunction(*OtherStore
);