1 //===- InstCombineLoadStoreAlloca.cpp -------------------------------------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This file implements the visit functions for load, store and alloca.
12 //===----------------------------------------------------------------------===//
14 #include "InstCombine.h"
15 #include "llvm/IntrinsicInst.h"
16 #include "llvm/Analysis/Loads.h"
17 #include "llvm/Target/TargetData.h"
18 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
19 #include "llvm/Transforms/Utils/Local.h"
20 #include "llvm/ADT/Statistic.h"
23 STATISTIC(NumDeadStore
, "Number of dead stores eliminated");
25 Instruction
*InstCombiner::visitAllocaInst(AllocaInst
&AI
) {
26 // Ensure that the alloca array size argument has type intptr_t, so that
27 // any casting is exposed early.
29 const Type
*IntPtrTy
= TD
->getIntPtrType(AI
.getContext());
30 if (AI
.getArraySize()->getType() != IntPtrTy
) {
31 Value
*V
= Builder
->CreateIntCast(AI
.getArraySize(),
38 // Convert: alloca Ty, C - where C is a constant != 1 into: alloca [C x Ty], 1
39 if (AI
.isArrayAllocation()) { // Check C != 1
40 if (const ConstantInt
*C
= dyn_cast
<ConstantInt
>(AI
.getArraySize())) {
42 ArrayType::get(AI
.getAllocatedType(), C
->getZExtValue());
43 assert(isa
<AllocaInst
>(AI
) && "Unknown type of allocation inst!");
44 AllocaInst
*New
= Builder
->CreateAlloca(NewTy
, 0, AI
.getName());
45 New
->setAlignment(AI
.getAlignment());
47 // Scan to the end of the allocation instructions, to skip over a block of
48 // allocas if possible...also skip interleaved debug info
50 BasicBlock::iterator It
= New
;
51 while (isa
<AllocaInst
>(*It
) || isa
<DbgInfoIntrinsic
>(*It
)) ++It
;
53 // Now that I is pointing to the first non-allocation-inst in the block,
54 // insert our getelementptr instruction...
56 Value
*NullIdx
=Constant::getNullValue(Type::getInt32Ty(AI
.getContext()));
60 Value
*V
= GetElementPtrInst::CreateInBounds(New
, Idx
, Idx
+ 2,
61 New
->getName()+".sub", It
);
63 // Now make everything use the getelementptr instead of the original
65 return ReplaceInstUsesWith(AI
, V
);
66 } else if (isa
<UndefValue
>(AI
.getArraySize())) {
67 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
71 if (TD
&& isa
<AllocaInst
>(AI
) && AI
.getAllocatedType()->isSized()) {
72 // If alloca'ing a zero byte object, replace the alloca with a null pointer.
73 // Note that we only do this for alloca's, because malloc should allocate
74 // and return a unique pointer, even for a zero byte allocation.
75 if (TD
->getTypeAllocSize(AI
.getAllocatedType()) == 0)
76 return ReplaceInstUsesWith(AI
, Constant::getNullValue(AI
.getType()));
78 // If the alignment is 0 (unspecified), assign it the preferred alignment.
79 if (AI
.getAlignment() == 0)
80 AI
.setAlignment(TD
->getPrefTypeAlignment(AI
.getAllocatedType()));
87 /// InstCombineLoadCast - Fold 'load (cast P)' -> cast (load P)' when possible.
88 static Instruction
*InstCombineLoadCast(InstCombiner
&IC
, LoadInst
&LI
,
89 const TargetData
*TD
) {
90 User
*CI
= cast
<User
>(LI
.getOperand(0));
91 Value
*CastOp
= CI
->getOperand(0);
93 const PointerType
*DestTy
= cast
<PointerType
>(CI
->getType());
94 const Type
*DestPTy
= DestTy
->getElementType();
95 if (const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType())) {
97 // If the address spaces don't match, don't eliminate the cast.
98 if (DestTy
->getAddressSpace() != SrcTy
->getAddressSpace())
101 const Type
*SrcPTy
= SrcTy
->getElementType();
103 if (DestPTy
->isIntegerTy() || DestPTy
->isPointerTy() ||
104 DestPTy
->isVectorTy()) {
105 // If the source is an array, the code below will not succeed. Check to
106 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
108 if (const ArrayType
*ASrcTy
= dyn_cast
<ArrayType
>(SrcPTy
))
109 if (Constant
*CSrc
= dyn_cast
<Constant
>(CastOp
))
110 if (ASrcTy
->getNumElements() != 0) {
112 Idxs
[0] = Constant::getNullValue(Type::getInt32Ty(LI
.getContext()));
114 CastOp
= ConstantExpr::getGetElementPtr(CSrc
, Idxs
, 2);
115 SrcTy
= cast
<PointerType
>(CastOp
->getType());
116 SrcPTy
= SrcTy
->getElementType();
119 if (IC
.getTargetData() &&
120 (SrcPTy
->isIntegerTy() || SrcPTy
->isPointerTy() ||
121 SrcPTy
->isVectorTy()) &&
122 // Do not allow turning this into a load of an integer, which is then
123 // casted to a pointer, this pessimizes pointer analysis a lot.
124 (SrcPTy
->isPointerTy() == LI
.getType()->isPointerTy()) &&
125 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) ==
126 IC
.getTargetData()->getTypeSizeInBits(DestPTy
)) {
128 // Okay, we are casting from one integer or pointer type to another of
129 // the same size. Instead of casting the pointer before the load, cast
130 // the result of the loaded value.
132 IC
.Builder
->CreateLoad(CastOp
, LI
.isVolatile(), CI
->getName());
133 NewLoad
->setAlignment(LI
.getAlignment());
134 // Now cast the result of the load.
135 return new BitCastInst(NewLoad
, LI
.getType());
142 Instruction
*InstCombiner::visitLoadInst(LoadInst
&LI
) {
143 Value
*Op
= LI
.getOperand(0);
145 // Attempt to improve the alignment.
147 unsigned KnownAlign
=
148 getOrEnforceKnownAlignment(Op
, TD
->getPrefTypeAlignment(LI
.getType()),TD
);
149 unsigned LoadAlign
= LI
.getAlignment();
150 unsigned EffectiveLoadAlign
= LoadAlign
!= 0 ? LoadAlign
:
151 TD
->getABITypeAlignment(LI
.getType());
153 if (KnownAlign
> EffectiveLoadAlign
)
154 LI
.setAlignment(KnownAlign
);
155 else if (LoadAlign
== 0)
156 LI
.setAlignment(EffectiveLoadAlign
);
159 // load (cast X) --> cast (load X) iff safe.
160 if (isa
<CastInst
>(Op
))
161 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
164 // None of the following transforms are legal for volatile loads.
165 if (LI
.isVolatile()) return 0;
167 // Do really simple store-to-load forwarding and load CSE, to catch cases
168 // where there are several consecutive memory accesses to the same location,
169 // separated by a few arithmetic operations.
170 BasicBlock::iterator BBI
= &LI
;
171 if (Value
*AvailableVal
= FindAvailableLoadedValue(Op
, LI
.getParent(), BBI
,6))
172 return ReplaceInstUsesWith(LI
, AvailableVal
);
174 // load(gep null, ...) -> unreachable
175 if (GetElementPtrInst
*GEPI
= dyn_cast
<GetElementPtrInst
>(Op
)) {
176 const Value
*GEPI0
= GEPI
->getOperand(0);
177 // TODO: Consider a target hook for valid address spaces for this xform.
178 if (isa
<ConstantPointerNull
>(GEPI0
) && GEPI
->getPointerAddressSpace() == 0){
179 // Insert a new store to null instruction before the load to indicate
180 // that this code is not reachable. We do this instead of inserting
181 // an unreachable instruction directly because we cannot modify the
183 new StoreInst(UndefValue::get(LI
.getType()),
184 Constant::getNullValue(Op
->getType()), &LI
);
185 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
189 // load null/undef -> unreachable
190 // TODO: Consider a target hook for valid address spaces for this xform.
191 if (isa
<UndefValue
>(Op
) ||
192 (isa
<ConstantPointerNull
>(Op
) && LI
.getPointerAddressSpace() == 0)) {
193 // Insert a new store to null instruction before the load to indicate that
194 // this code is not reachable. We do this instead of inserting an
195 // unreachable instruction directly because we cannot modify the CFG.
196 new StoreInst(UndefValue::get(LI
.getType()),
197 Constant::getNullValue(Op
->getType()), &LI
);
198 return ReplaceInstUsesWith(LI
, UndefValue::get(LI
.getType()));
201 // Instcombine load (constantexpr_cast global) -> cast (load global)
202 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Op
))
204 if (Instruction
*Res
= InstCombineLoadCast(*this, LI
, TD
))
207 if (Op
->hasOneUse()) {
208 // Change select and PHI nodes to select values instead of addresses: this
209 // helps alias analysis out a lot, allows many others simplifications, and
210 // exposes redundancy in the code.
212 // Note that we cannot do the transformation unless we know that the
213 // introduced loads cannot trap! Something like this is valid as long as
214 // the condition is always false: load (select bool %C, int* null, int* %G),
215 // but it would not be valid if we transformed it to load from null
218 if (SelectInst
*SI
= dyn_cast
<SelectInst
>(Op
)) {
219 // load (select (Cond, &V1, &V2)) --> select(Cond, load &V1, load &V2).
220 unsigned Align
= LI
.getAlignment();
221 if (isSafeToLoadUnconditionally(SI
->getOperand(1), SI
, Align
, TD
) &&
222 isSafeToLoadUnconditionally(SI
->getOperand(2), SI
, Align
, TD
)) {
223 LoadInst
*V1
= Builder
->CreateLoad(SI
->getOperand(1),
224 SI
->getOperand(1)->getName()+".val");
225 LoadInst
*V2
= Builder
->CreateLoad(SI
->getOperand(2),
226 SI
->getOperand(2)->getName()+".val");
227 V1
->setAlignment(Align
);
228 V2
->setAlignment(Align
);
229 return SelectInst::Create(SI
->getCondition(), V1
, V2
);
232 // load (select (cond, null, P)) -> load P
233 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(1)))
234 if (C
->isNullValue()) {
235 LI
.setOperand(0, SI
->getOperand(2));
239 // load (select (cond, P, null)) -> load P
240 if (Constant
*C
= dyn_cast
<Constant
>(SI
->getOperand(2)))
241 if (C
->isNullValue()) {
242 LI
.setOperand(0, SI
->getOperand(1));
250 /// InstCombineStoreToCast - Fold store V, (cast P) -> store (cast V), P
251 /// when possible. This makes it generally easy to do alias analysis and/or
252 /// SROA/mem2reg of the memory object.
253 static Instruction
*InstCombineStoreToCast(InstCombiner
&IC
, StoreInst
&SI
) {
254 User
*CI
= cast
<User
>(SI
.getOperand(1));
255 Value
*CastOp
= CI
->getOperand(0);
257 const Type
*DestPTy
= cast
<PointerType
>(CI
->getType())->getElementType();
258 const PointerType
*SrcTy
= dyn_cast
<PointerType
>(CastOp
->getType());
259 if (SrcTy
== 0) return 0;
261 const Type
*SrcPTy
= SrcTy
->getElementType();
263 if (!DestPTy
->isIntegerTy() && !DestPTy
->isPointerTy())
266 /// NewGEPIndices - If SrcPTy is an aggregate type, we can emit a "noop gep"
267 /// to its first element. This allows us to handle things like:
268 /// store i32 xxx, (bitcast {foo*, float}* %P to i32*)
270 SmallVector
<Value
*, 4> NewGEPIndices
;
272 // If the source is an array, the code below will not succeed. Check to
273 // see if a trivial 'gep P, 0, 0' will help matters. Only do this for
275 if (SrcPTy
->isArrayTy() || SrcPTy
->isStructTy()) {
276 // Index through pointer.
277 Constant
*Zero
= Constant::getNullValue(Type::getInt32Ty(SI
.getContext()));
278 NewGEPIndices
.push_back(Zero
);
281 if (const StructType
*STy
= dyn_cast
<StructType
>(SrcPTy
)) {
282 if (!STy
->getNumElements()) /* Struct can be empty {} */
284 NewGEPIndices
.push_back(Zero
);
285 SrcPTy
= STy
->getElementType(0);
286 } else if (const ArrayType
*ATy
= dyn_cast
<ArrayType
>(SrcPTy
)) {
287 NewGEPIndices
.push_back(Zero
);
288 SrcPTy
= ATy
->getElementType();
294 SrcTy
= PointerType::get(SrcPTy
, SrcTy
->getAddressSpace());
297 if (!SrcPTy
->isIntegerTy() && !SrcPTy
->isPointerTy())
300 // If the pointers point into different address spaces or if they point to
301 // values with different sizes, we can't do the transformation.
302 if (!IC
.getTargetData() ||
303 SrcTy
->getAddressSpace() !=
304 cast
<PointerType
>(CI
->getType())->getAddressSpace() ||
305 IC
.getTargetData()->getTypeSizeInBits(SrcPTy
) !=
306 IC
.getTargetData()->getTypeSizeInBits(DestPTy
))
309 // Okay, we are casting from one integer or pointer type to another of
310 // the same size. Instead of casting the pointer before
311 // the store, cast the value to be stored.
313 Value
*SIOp0
= SI
.getOperand(0);
314 Instruction::CastOps opcode
= Instruction::BitCast
;
315 const Type
* CastSrcTy
= SIOp0
->getType();
316 const Type
* CastDstTy
= SrcPTy
;
317 if (CastDstTy
->isPointerTy()) {
318 if (CastSrcTy
->isIntegerTy())
319 opcode
= Instruction::IntToPtr
;
320 } else if (CastDstTy
->isIntegerTy()) {
321 if (SIOp0
->getType()->isPointerTy())
322 opcode
= Instruction::PtrToInt
;
325 // SIOp0 is a pointer to aggregate and this is a store to the first field,
326 // emit a GEP to index into its first field.
327 if (!NewGEPIndices
.empty())
328 CastOp
= IC
.Builder
->CreateInBoundsGEP(CastOp
, NewGEPIndices
.begin(),
329 NewGEPIndices
.end());
331 NewCast
= IC
.Builder
->CreateCast(opcode
, SIOp0
, CastDstTy
,
332 SIOp0
->getName()+".c");
333 SI
.setOperand(0, NewCast
);
334 SI
.setOperand(1, CastOp
);
338 /// equivalentAddressValues - Test if A and B will obviously have the same
339 /// value. This includes recognizing that %t0 and %t1 will have the same
340 /// value in code like this:
341 /// %t0 = getelementptr \@a, 0, 3
342 /// store i32 0, i32* %t0
343 /// %t1 = getelementptr \@a, 0, 3
344 /// %t2 = load i32* %t1
346 static bool equivalentAddressValues(Value
*A
, Value
*B
) {
347 // Test if the values are trivially equivalent.
348 if (A
== B
) return true;
350 // Test if the values come form identical arithmetic instructions.
351 // This uses isIdenticalToWhenDefined instead of isIdenticalTo because
352 // its only used to compare two uses within the same basic block, which
353 // means that they'll always either have the same value or one of them
354 // will have an undefined value.
355 if (isa
<BinaryOperator
>(A
) ||
358 isa
<GetElementPtrInst
>(A
))
359 if (Instruction
*BI
= dyn_cast
<Instruction
>(B
))
360 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
363 // Otherwise they may not be equivalent.
367 Instruction
*InstCombiner::visitStoreInst(StoreInst
&SI
) {
368 Value
*Val
= SI
.getOperand(0);
369 Value
*Ptr
= SI
.getOperand(1);
371 // If the RHS is an alloca with a single use, zapify the store, making the
373 if (!SI
.isVolatile()) {
374 if (Ptr
->hasOneUse()) {
375 if (isa
<AllocaInst
>(Ptr
))
376 return EraseInstFromFunction(SI
);
377 if (GetElementPtrInst
*GEP
= dyn_cast
<GetElementPtrInst
>(Ptr
)) {
378 if (isa
<AllocaInst
>(GEP
->getOperand(0))) {
379 if (GEP
->getOperand(0)->hasOneUse())
380 return EraseInstFromFunction(SI
);
386 // Attempt to improve the alignment.
388 unsigned KnownAlign
=
389 getOrEnforceKnownAlignment(Ptr
, TD
->getPrefTypeAlignment(Val
->getType()),
391 unsigned StoreAlign
= SI
.getAlignment();
392 unsigned EffectiveStoreAlign
= StoreAlign
!= 0 ? StoreAlign
:
393 TD
->getABITypeAlignment(Val
->getType());
395 if (KnownAlign
> EffectiveStoreAlign
)
396 SI
.setAlignment(KnownAlign
);
397 else if (StoreAlign
== 0)
398 SI
.setAlignment(EffectiveStoreAlign
);
401 // Do really simple DSE, to catch cases where there are several consecutive
402 // stores to the same location, separated by a few arithmetic operations. This
403 // situation often occurs with bitfield accesses.
404 BasicBlock::iterator BBI
= &SI
;
405 for (unsigned ScanInsts
= 6; BBI
!= SI
.getParent()->begin() && ScanInsts
;
408 // Don't count debug info directives, lest they affect codegen,
409 // and we skip pointer-to-pointer bitcasts, which are NOPs.
410 if (isa
<DbgInfoIntrinsic
>(BBI
) ||
411 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
416 if (StoreInst
*PrevSI
= dyn_cast
<StoreInst
>(BBI
)) {
417 // Prev store isn't volatile, and stores to the same location?
418 if (!PrevSI
->isVolatile() &&equivalentAddressValues(PrevSI
->getOperand(1),
422 EraseInstFromFunction(*PrevSI
);
428 // If this is a load, we have to stop. However, if the loaded value is from
429 // the pointer we're loading and is producing the pointer we're storing,
430 // then *this* store is dead (X = load P; store X -> P).
431 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
432 if (LI
== Val
&& equivalentAddressValues(LI
->getOperand(0), Ptr
) &&
434 return EraseInstFromFunction(SI
);
436 // Otherwise, this is a load from some other location. Stores before it
441 // Don't skip over loads or things that can modify memory.
442 if (BBI
->mayWriteToMemory() || BBI
->mayReadFromMemory())
447 if (SI
.isVolatile()) return 0; // Don't hack volatile stores.
449 // store X, null -> turns into 'unreachable' in SimplifyCFG
450 if (isa
<ConstantPointerNull
>(Ptr
) && SI
.getPointerAddressSpace() == 0) {
451 if (!isa
<UndefValue
>(Val
)) {
452 SI
.setOperand(0, UndefValue::get(Val
->getType()));
453 if (Instruction
*U
= dyn_cast
<Instruction
>(Val
))
454 Worklist
.Add(U
); // Dropped a use.
456 return 0; // Do not modify these!
459 // store undef, Ptr -> noop
460 if (isa
<UndefValue
>(Val
))
461 return EraseInstFromFunction(SI
);
463 // If the pointer destination is a cast, see if we can fold the cast into the
465 if (isa
<CastInst
>(Ptr
))
466 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
468 if (ConstantExpr
*CE
= dyn_cast
<ConstantExpr
>(Ptr
))
470 if (Instruction
*Res
= InstCombineStoreToCast(*this, SI
))
474 // If this store is the last instruction in the basic block (possibly
475 // excepting debug info instructions), and if the block ends with an
476 // unconditional branch, try to move it to the successor block.
480 } while (isa
<DbgInfoIntrinsic
>(BBI
) ||
481 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy()));
482 if (BranchInst
*BI
= dyn_cast
<BranchInst
>(BBI
))
483 if (BI
->isUnconditional())
484 if (SimplifyStoreAtEndOfBlock(SI
))
485 return 0; // xform done!
490 /// SimplifyStoreAtEndOfBlock - Turn things like:
491 /// if () { *P = v1; } else { *P = v2 }
492 /// into a phi node with a store in the successor.
494 /// Simplify things like:
495 /// *P = v1; if () { *P = v2; }
496 /// into a phi node with a store in the successor.
498 bool InstCombiner::SimplifyStoreAtEndOfBlock(StoreInst
&SI
) {
499 BasicBlock
*StoreBB
= SI
.getParent();
501 // Check to see if the successor block has exactly two incoming edges. If
502 // so, see if the other predecessor contains a store to the same location.
503 // if so, insert a PHI node (if needed) and move the stores down.
504 BasicBlock
*DestBB
= StoreBB
->getTerminator()->getSuccessor(0);
506 // Determine whether Dest has exactly two predecessors and, if so, compute
507 // the other predecessor.
508 pred_iterator PI
= pred_begin(DestBB
);
510 BasicBlock
*OtherBB
= 0;
515 if (++PI
== pred_end(DestBB
))
524 if (++PI
!= pred_end(DestBB
))
527 // Bail out if all the relevant blocks aren't distinct (this can happen,
528 // for example, if SI is in an infinite loop)
529 if (StoreBB
== DestBB
|| OtherBB
== DestBB
)
532 // Verify that the other block ends in a branch and is not otherwise empty.
533 BasicBlock::iterator BBI
= OtherBB
->getTerminator();
534 BranchInst
*OtherBr
= dyn_cast
<BranchInst
>(BBI
);
535 if (!OtherBr
|| BBI
== OtherBB
->begin())
538 // If the other block ends in an unconditional branch, check for the 'if then
539 // else' case. there is an instruction before the branch.
540 StoreInst
*OtherStore
= 0;
541 if (OtherBr
->isUnconditional()) {
543 // Skip over debugging info.
544 while (isa
<DbgInfoIntrinsic
>(BBI
) ||
545 (isa
<BitCastInst
>(BBI
) && BBI
->getType()->isPointerTy())) {
546 if (BBI
==OtherBB
->begin())
550 // If this isn't a store, isn't a store to the same location, or if the
551 // alignments differ, bail out.
552 OtherStore
= dyn_cast
<StoreInst
>(BBI
);
553 if (!OtherStore
|| OtherStore
->getOperand(1) != SI
.getOperand(1) ||
554 OtherStore
->getAlignment() != SI
.getAlignment())
557 // Otherwise, the other block ended with a conditional branch. If one of the
558 // destinations is StoreBB, then we have the if/then case.
559 if (OtherBr
->getSuccessor(0) != StoreBB
&&
560 OtherBr
->getSuccessor(1) != StoreBB
)
563 // Okay, we know that OtherBr now goes to Dest and StoreBB, so this is an
564 // if/then triangle. See if there is a store to the same ptr as SI that
567 // Check to see if we find the matching store.
568 if ((OtherStore
= dyn_cast
<StoreInst
>(BBI
))) {
569 if (OtherStore
->getOperand(1) != SI
.getOperand(1) ||
570 OtherStore
->getAlignment() != SI
.getAlignment())
574 // If we find something that may be using or overwriting the stored
575 // value, or if we run out of instructions, we can't do the xform.
576 if (BBI
->mayReadFromMemory() || BBI
->mayWriteToMemory() ||
577 BBI
== OtherBB
->begin())
581 // In order to eliminate the store in OtherBr, we have to
582 // make sure nothing reads or overwrites the stored value in
584 for (BasicBlock::iterator I
= StoreBB
->begin(); &*I
!= &SI
; ++I
) {
585 // FIXME: This should really be AA driven.
586 if (I
->mayReadFromMemory() || I
->mayWriteToMemory())
591 // Insert a PHI node now if we need it.
592 Value
*MergedVal
= OtherStore
->getOperand(0);
593 if (MergedVal
!= SI
.getOperand(0)) {
594 PHINode
*PN
= PHINode::Create(MergedVal
->getType(), 2, "storemerge");
595 PN
->addIncoming(SI
.getOperand(0), SI
.getParent());
596 PN
->addIncoming(OtherStore
->getOperand(0), OtherBB
);
597 MergedVal
= InsertNewInstBefore(PN
, DestBB
->front());
600 // Advance to a place where it is safe to insert the new store and
602 BBI
= DestBB
->getFirstNonPHI();
603 InsertNewInstBefore(new StoreInst(MergedVal
, SI
.getOperand(1),
604 OtherStore
->isVolatile(),
605 SI
.getAlignment()), *BBI
);
607 // Nuke the old stores.
608 EraseInstFromFunction(SI
);
609 EraseInstFromFunction(*OtherStore
);