1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "memcpyopt"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/GetElementPtrTypeIterator.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetData.h"
32 STATISTIC(NumMemCpyInstr
, "Number of memcpy instructions deleted");
33 STATISTIC(NumMemSetInfer
, "Number of memsets inferred");
34 STATISTIC(NumMoveToCpy
, "Number of memmoves converted to memcpy");
36 /// isBytewiseValue - If the specified value can be set by repeating the same
37 /// byte in memory, return the i8 value that it is represented with. This is
38 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
39 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
40 /// byte store (e.g. i16 0x1234), return null.
41 static Value
*isBytewiseValue(Value
*V
) {
42 LLVMContext
&Context
= V
->getContext();
44 // All byte-wide stores are splatable, even of arbitrary variables.
45 if (V
->getType()->isIntegerTy(8)) return V
;
47 // Constant float and double values can be handled as integer values if the
48 // corresponding integer value is "byteable". An important case is 0.0.
49 if (ConstantFP
*CFP
= dyn_cast
<ConstantFP
>(V
)) {
50 if (CFP
->getType()->isFloatTy())
51 V
= ConstantExpr::getBitCast(CFP
, Type::getInt32Ty(Context
));
52 if (CFP
->getType()->isDoubleTy())
53 V
= ConstantExpr::getBitCast(CFP
, Type::getInt64Ty(Context
));
54 // Don't handle long double formats, which have strange constraints.
57 // We can handle constant integers that are power of two in size and a
58 // multiple of 8 bits.
59 if (ConstantInt
*CI
= dyn_cast
<ConstantInt
>(V
)) {
60 unsigned Width
= CI
->getBitWidth();
61 if (isPowerOf2_32(Width
) && Width
> 8) {
62 // We can handle this value if the recursive binary decomposition is the
63 // same at all levels.
64 APInt Val
= CI
->getValue();
66 while (Val
.getBitWidth() != 8) {
67 unsigned NextWidth
= Val
.getBitWidth()/2;
68 Val2
= Val
.lshr(NextWidth
);
69 Val2
.trunc(Val
.getBitWidth()/2);
70 Val
.trunc(Val
.getBitWidth()/2);
72 // If the top/bottom halves aren't the same, reject it.
76 return ConstantInt::get(Context
, Val
);
80 // Conceptually, we could handle things like:
81 // %a = zext i8 %X to i16
84 // but until there is an example that actually needs this, it doesn't seem
85 // worth worrying about.
89 static int64_t GetOffsetFromIndex(const GetElementPtrInst
*GEP
, unsigned Idx
,
90 bool &VariableIdxFound
, TargetData
&TD
) {
91 // Skip over the first indices.
92 gep_type_iterator GTI
= gep_type_begin(GEP
);
93 for (unsigned i
= 1; i
!= Idx
; ++i
, ++GTI
)
96 // Compute the offset implied by the rest of the indices.
98 for (unsigned i
= Idx
, e
= GEP
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
99 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
101 return VariableIdxFound
= true;
102 if (OpC
->isZero()) continue; // No offset.
104 // Handle struct indices, which add their field offset to the pointer.
105 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
106 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(OpC
->getZExtValue());
110 // Otherwise, we have a sequential type like an array or vector. Multiply
111 // the index by the ElementSize.
112 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
113 Offset
+= Size
*OpC
->getSExtValue();
119 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
120 /// constant offset, and return that constant offset. For example, Ptr1 might
121 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
122 static bool IsPointerOffset(Value
*Ptr1
, Value
*Ptr2
, int64_t &Offset
,
124 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
125 // base. After that base, they may have some number of common (and
126 // potentially variable) indices. After that they handle some constant
127 // offset, which determines their offset from each other. At this point, we
128 // handle no other case.
129 GetElementPtrInst
*GEP1
= dyn_cast
<GetElementPtrInst
>(Ptr1
);
130 GetElementPtrInst
*GEP2
= dyn_cast
<GetElementPtrInst
>(Ptr2
);
131 if (!GEP1
|| !GEP2
|| GEP1
->getOperand(0) != GEP2
->getOperand(0))
134 // Skip any common indices and track the GEP types.
136 for (; Idx
!= GEP1
->getNumOperands() && Idx
!= GEP2
->getNumOperands(); ++Idx
)
137 if (GEP1
->getOperand(Idx
) != GEP2
->getOperand(Idx
))
140 bool VariableIdxFound
= false;
141 int64_t Offset1
= GetOffsetFromIndex(GEP1
, Idx
, VariableIdxFound
, TD
);
142 int64_t Offset2
= GetOffsetFromIndex(GEP2
, Idx
, VariableIdxFound
, TD
);
143 if (VariableIdxFound
) return false;
145 Offset
= Offset2
-Offset1
;
150 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
151 /// This allows us to analyze stores like:
156 /// which sometimes happens with stores to arrays of structs etc. When we see
157 /// the first store, we make a range [1, 2). The second store extends the range
158 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
159 /// two ranges into [0, 3) which is memset'able.
162 // Start/End - A semi range that describes the span that this range covers.
163 // The range is closed at the start and open at the end: [Start, End).
166 /// StartPtr - The getelementptr instruction that points to the start of the
170 /// Alignment - The known alignment of the first store.
173 /// TheStores - The actual stores that make up this range.
174 SmallVector
<StoreInst
*, 16> TheStores
;
176 bool isProfitableToUseMemset(const TargetData
&TD
) const;
179 } // end anon namespace
181 bool MemsetRange::isProfitableToUseMemset(const TargetData
&TD
) const {
182 // If we found more than 8 stores to merge or 64 bytes, use memset.
183 if (TheStores
.size() >= 8 || End
-Start
>= 64) return true;
185 // Assume that the code generator is capable of merging pairs of stores
186 // together if it wants to.
187 if (TheStores
.size() <= 2) return false;
189 // If we have fewer than 8 stores, it can still be worthwhile to do this.
190 // For example, merging 4 i8 stores into an i32 store is useful almost always.
191 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
192 // memset will be split into 2 32-bit stores anyway) and doing so can
193 // pessimize the llvm optimizer.
195 // Since we don't have perfect knowledge here, make some assumptions: assume
196 // the maximum GPR width is the same size as the pointer size and assume that
197 // this width can be stored. If so, check to see whether we will end up
198 // actually reducing the number of stores used.
199 unsigned Bytes
= unsigned(End
-Start
);
200 unsigned NumPointerStores
= Bytes
/TD
.getPointerSize();
202 // Assume the remaining bytes if any are done a byte at a time.
203 unsigned NumByteStores
= Bytes
- NumPointerStores
*TD
.getPointerSize();
205 // If we will reduce the # stores (according to this heuristic), do the
206 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
208 return TheStores
.size() > NumPointerStores
+NumByteStores
;
214 /// Ranges - A sorted list of the memset ranges. We use std::list here
215 /// because each element is relatively large and expensive to copy.
216 std::list
<MemsetRange
> Ranges
;
217 typedef std::list
<MemsetRange
>::iterator range_iterator
;
220 MemsetRanges(TargetData
&td
) : TD(td
) {}
222 typedef std::list
<MemsetRange
>::const_iterator const_iterator
;
223 const_iterator
begin() const { return Ranges
.begin(); }
224 const_iterator
end() const { return Ranges
.end(); }
225 bool empty() const { return Ranges
.empty(); }
227 void addStore(int64_t OffsetFromFirst
, StoreInst
*SI
);
230 } // end anon namespace
233 /// addStore - Add a new store to the MemsetRanges data structure. This adds a
234 /// new range for the specified store at the specified offset, merging into
235 /// existing ranges as appropriate.
236 void MemsetRanges::addStore(int64_t Start
, StoreInst
*SI
) {
237 int64_t End
= Start
+TD
.getTypeStoreSize(SI
->getOperand(0)->getType());
239 // Do a linear search of the ranges to see if this can be joined and/or to
240 // find the insertion point in the list. We keep the ranges sorted for
241 // simplicity here. This is a linear search of a linked list, which is ugly,
242 // however the number of ranges is limited, so this won't get crazy slow.
243 range_iterator I
= Ranges
.begin(), E
= Ranges
.end();
245 while (I
!= E
&& Start
> I
->End
)
248 // We now know that I == E, in which case we didn't find anything to merge
249 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
250 // to insert a new range. Handle this now.
251 if (I
== E
|| End
< I
->Start
) {
252 MemsetRange
&R
= *Ranges
.insert(I
, MemsetRange());
255 R
.StartPtr
= SI
->getPointerOperand();
256 R
.Alignment
= SI
->getAlignment();
257 R
.TheStores
.push_back(SI
);
261 // This store overlaps with I, add it.
262 I
->TheStores
.push_back(SI
);
264 // At this point, we may have an interval that completely contains our store.
265 // If so, just add it to the interval and return.
266 if (I
->Start
<= Start
&& I
->End
>= End
)
269 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
270 // but is not entirely contained within the range.
272 // See if the range extends the start of the range. In this case, it couldn't
273 // possibly cause it to join the prior range, because otherwise we would have
275 if (Start
< I
->Start
) {
277 I
->StartPtr
= SI
->getPointerOperand();
278 I
->Alignment
= SI
->getAlignment();
281 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
282 // is in or right at the end of I), and that End >= I->Start. Extend I out to
286 range_iterator NextI
= I
;
287 while (++NextI
!= E
&& End
>= NextI
->Start
) {
288 // Merge the range in.
289 I
->TheStores
.append(NextI
->TheStores
.begin(), NextI
->TheStores
.end());
290 if (NextI
->End
> I
->End
)
298 //===----------------------------------------------------------------------===//
300 //===----------------------------------------------------------------------===//
303 class MemCpyOpt
: public FunctionPass
{
304 bool runOnFunction(Function
&F
);
306 static char ID
; // Pass identification, replacement for typeid
307 MemCpyOpt() : FunctionPass(ID
) {
308 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
312 // This transformation requires dominator postdominator info
313 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
314 AU
.setPreservesCFG();
315 AU
.addRequired
<DominatorTree
>();
316 AU
.addRequired
<MemoryDependenceAnalysis
>();
317 AU
.addRequired
<AliasAnalysis
>();
318 AU
.addPreserved
<AliasAnalysis
>();
319 AU
.addPreserved
<MemoryDependenceAnalysis
>();
323 bool processStore(StoreInst
*SI
, BasicBlock::iterator
&BBI
);
324 bool processMemCpy(MemCpyInst
*M
);
325 bool processMemMove(MemMoveInst
*M
);
326 bool performCallSlotOptzn(Instruction
*cpy
, Value
*cpyDst
, Value
*cpySrc
,
327 uint64_t cpyLen
, CallInst
*C
);
328 bool iterateOnFunction(Function
&F
);
331 char MemCpyOpt::ID
= 0;
334 // createMemCpyOptPass - The public interface to this file...
335 FunctionPass
*llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
337 INITIALIZE_PASS_BEGIN(MemCpyOpt
, "memcpyopt", "MemCpy Optimization",
339 INITIALIZE_PASS_DEPENDENCY(DominatorTree
)
340 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis
)
341 INITIALIZE_AG_DEPENDENCY(AliasAnalysis
)
342 INITIALIZE_PASS_END(MemCpyOpt
, "memcpyopt", "MemCpy Optimization",
345 /// processStore - When GVN is scanning forward over instructions, we look for
346 /// some other patterns to fold away. In particular, this looks for stores to
347 /// neighboring locations of memory. If it sees enough consequtive ones
348 /// (currently 4) it attempts to merge them together into a memcpy/memset.
349 bool MemCpyOpt::processStore(StoreInst
*SI
, BasicBlock::iterator
&BBI
) {
350 if (SI
->isVolatile()) return false;
352 TargetData
*TD
= getAnalysisIfAvailable
<TargetData
>();
353 if (!TD
) return false;
355 // Detect cases where we're performing call slot forwarding, but
356 // happen to be using a load-store pair to implement it, rather than
358 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(SI
->getOperand(0))) {
359 if (!LI
->isVolatile() && LI
->hasOneUse()) {
360 MemoryDependenceAnalysis
&MD
= getAnalysis
<MemoryDependenceAnalysis
>();
362 MemDepResult dep
= MD
.getDependency(LI
);
364 if (dep
.isClobber() && !isa
<MemCpyInst
>(dep
.getInst()))
365 C
= dyn_cast
<CallInst
>(dep
.getInst());
368 bool changed
= performCallSlotOptzn(LI
,
369 SI
->getPointerOperand()->stripPointerCasts(),
370 LI
->getPointerOperand()->stripPointerCasts(),
371 TD
->getTypeStoreSize(SI
->getOperand(0)->getType()), C
);
373 MD
.removeInstruction(SI
);
374 SI
->eraseFromParent();
375 LI
->eraseFromParent();
383 LLVMContext
&Context
= SI
->getContext();
385 // There are two cases that are interesting for this code to handle: memcpy
386 // and memset. Right now we only handle memset.
388 // Ensure that the value being stored is something that can be memset'able a
389 // byte at a time like "0" or "-1" or any width, as well as things like
390 // 0xA0A0A0A0 and 0.0.
391 Value
*ByteVal
= isBytewiseValue(SI
->getOperand(0));
395 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
396 Module
*M
= SI
->getParent()->getParent()->getParent();
398 // Okay, so we now have a single store that can be splatable. Scan to find
399 // all subsequent stores of the same value to offset from the same pointer.
400 // Join these together into ranges, so we can decide whether contiguous blocks
402 MemsetRanges
Ranges(*TD
);
404 Value
*StartPtr
= SI
->getPointerOperand();
406 BasicBlock::iterator BI
= SI
;
407 for (++BI
; !isa
<TerminatorInst
>(BI
); ++BI
) {
408 if (isa
<CallInst
>(BI
) || isa
<InvokeInst
>(BI
)) {
409 // If the call is readnone, ignore it, otherwise bail out. We don't even
410 // allow readonly here because we don't want something like:
411 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
412 if (AA
.getModRefBehavior(CallSite(BI
)) ==
413 AliasAnalysis::DoesNotAccessMemory
)
416 // TODO: If this is a memset, try to join it in.
419 } else if (isa
<VAArgInst
>(BI
) || isa
<LoadInst
>(BI
))
422 // If this is a non-store instruction it is fine, ignore it.
423 StoreInst
*NextStore
= dyn_cast
<StoreInst
>(BI
);
424 if (NextStore
== 0) continue;
426 // If this is a store, see if we can merge it in.
427 if (NextStore
->isVolatile()) break;
429 // Check to see if this stored value is of the same byte-splattable value.
430 if (ByteVal
!= isBytewiseValue(NextStore
->getOperand(0)))
433 // Check to see if this store is to a constant offset from the start ptr.
435 if (!IsPointerOffset(StartPtr
, NextStore
->getPointerOperand(), Offset
, *TD
))
438 Ranges
.addStore(Offset
, NextStore
);
441 // If we have no ranges, then we just had a single store with nothing that
442 // could be merged in. This is a very common case of course.
446 // If we had at least one store that could be merged in, add the starting
447 // store as well. We try to avoid this unless there is at least something
448 // interesting as a small compile-time optimization.
449 Ranges
.addStore(0, SI
);
452 // Now that we have full information about ranges, loop over the ranges and
453 // emit memset's for anything big enough to be worthwhile.
454 bool MadeChange
= false;
455 for (MemsetRanges::const_iterator I
= Ranges
.begin(), E
= Ranges
.end();
457 const MemsetRange
&Range
= *I
;
459 if (Range
.TheStores
.size() == 1) continue;
461 // If it is profitable to lower this range to memset, do so now.
462 if (!Range
.isProfitableToUseMemset(*TD
))
465 // Otherwise, we do want to transform this! Create a new memset. We put
466 // the memset right before the first instruction that isn't part of this
467 // memset block. This ensure that the memset is dominated by any addressing
468 // instruction needed by the start of the block.
469 BasicBlock::iterator InsertPt
= BI
;
471 // Get the starting pointer of the block.
472 StartPtr
= Range
.StartPtr
;
474 // Determine alignment
475 unsigned Alignment
= Range
.Alignment
;
476 if (Alignment
== 0) {
477 const Type
*EltType
=
478 cast
<PointerType
>(StartPtr
->getType())->getElementType();
479 Alignment
= TD
->getABITypeAlignment(EltType
);
482 // Cast the start ptr to be i8* as memset requires.
483 const PointerType
* StartPTy
= cast
<PointerType
>(StartPtr
->getType());
484 const PointerType
*i8Ptr
= Type::getInt8PtrTy(Context
,
485 StartPTy
->getAddressSpace());
486 if (StartPTy
!= i8Ptr
)
487 StartPtr
= new BitCastInst(StartPtr
, i8Ptr
, StartPtr
->getName(),
491 StartPtr
, ByteVal
, // Start, value
493 ConstantInt::get(Type::getInt64Ty(Context
), Range
.End
-Range
.Start
),
495 ConstantInt::get(Type::getInt32Ty(Context
), Alignment
),
497 ConstantInt::get(Type::getInt1Ty(Context
), 0),
499 const Type
*Tys
[] = { Ops
[0]->getType(), Ops
[2]->getType() };
501 Function
*MemSetF
= Intrinsic::getDeclaration(M
, Intrinsic::memset
, Tys
, 2);
503 Value
*C
= CallInst::Create(MemSetF
, Ops
, Ops
+5, "", InsertPt
);
504 DEBUG(dbgs() << "Replace stores:\n";
505 for (unsigned i
= 0, e
= Range
.TheStores
.size(); i
!= e
; ++i
)
506 dbgs() << *Range
.TheStores
[i
];
507 dbgs() << "With: " << *C
); C
=C
;
509 // Don't invalidate the iterator
512 // Zap all the stores.
513 for (SmallVector
<StoreInst
*, 16>::const_iterator
514 SI
= Range
.TheStores
.begin(),
515 SE
= Range
.TheStores
.end(); SI
!= SE
; ++SI
)
516 (*SI
)->eraseFromParent();
525 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
526 /// and checks for the possibility of a call slot optimization by having
527 /// the call write its result directly into the destination of the memcpy.
528 bool MemCpyOpt::performCallSlotOptzn(Instruction
*cpy
,
529 Value
*cpyDest
, Value
*cpySrc
,
530 uint64_t cpyLen
, CallInst
*C
) {
531 // The general transformation to keep in mind is
533 // call @func(..., src, ...)
534 // memcpy(dest, src, ...)
538 // memcpy(dest, src, ...)
539 // call @func(..., dest, ...)
541 // Since moving the memcpy is technically awkward, we additionally check that
542 // src only holds uninitialized values at the moment of the call, meaning that
543 // the memcpy can be discarded rather than moved.
545 // Deliberately get the source and destination with bitcasts stripped away,
546 // because we'll need to do type comparisons based on the underlying type.
549 // Require that src be an alloca. This simplifies the reasoning considerably.
550 AllocaInst
*srcAlloca
= dyn_cast
<AllocaInst
>(cpySrc
);
554 // Check that all of src is copied to dest.
555 TargetData
*TD
= getAnalysisIfAvailable
<TargetData
>();
556 if (!TD
) return false;
558 ConstantInt
*srcArraySize
= dyn_cast
<ConstantInt
>(srcAlloca
->getArraySize());
562 uint64_t srcSize
= TD
->getTypeAllocSize(srcAlloca
->getAllocatedType()) *
563 srcArraySize
->getZExtValue();
565 if (cpyLen
< srcSize
)
568 // Check that accessing the first srcSize bytes of dest will not cause a
569 // trap. Otherwise the transform is invalid since it might cause a trap
570 // to occur earlier than it otherwise would.
571 if (AllocaInst
*A
= dyn_cast
<AllocaInst
>(cpyDest
)) {
572 // The destination is an alloca. Check it is larger than srcSize.
573 ConstantInt
*destArraySize
= dyn_cast
<ConstantInt
>(A
->getArraySize());
577 uint64_t destSize
= TD
->getTypeAllocSize(A
->getAllocatedType()) *
578 destArraySize
->getZExtValue();
580 if (destSize
< srcSize
)
582 } else if (Argument
*A
= dyn_cast
<Argument
>(cpyDest
)) {
583 // If the destination is an sret parameter then only accesses that are
584 // outside of the returned struct type can trap.
585 if (!A
->hasStructRetAttr())
588 const Type
*StructTy
= cast
<PointerType
>(A
->getType())->getElementType();
589 uint64_t destSize
= TD
->getTypeAllocSize(StructTy
);
591 if (destSize
< srcSize
)
597 // Check that src is not accessed except via the call and the memcpy. This
598 // guarantees that it holds only undefined values when passed in (so the final
599 // memcpy can be dropped), that it is not read or written between the call and
600 // the memcpy, and that writing beyond the end of it is undefined.
601 SmallVector
<User
*, 8> srcUseList(srcAlloca
->use_begin(),
602 srcAlloca
->use_end());
603 while (!srcUseList
.empty()) {
604 User
*UI
= srcUseList
.pop_back_val();
606 if (isa
<BitCastInst
>(UI
)) {
607 for (User::use_iterator I
= UI
->use_begin(), E
= UI
->use_end();
609 srcUseList
.push_back(*I
);
610 } else if (GetElementPtrInst
*G
= dyn_cast
<GetElementPtrInst
>(UI
)) {
611 if (G
->hasAllZeroIndices())
612 for (User::use_iterator I
= UI
->use_begin(), E
= UI
->use_end();
614 srcUseList
.push_back(*I
);
617 } else if (UI
!= C
&& UI
!= cpy
) {
622 // Since we're changing the parameter to the callsite, we need to make sure
623 // that what would be the new parameter dominates the callsite.
624 DominatorTree
&DT
= getAnalysis
<DominatorTree
>();
625 if (Instruction
*cpyDestInst
= dyn_cast
<Instruction
>(cpyDest
))
626 if (!DT
.dominates(cpyDestInst
, C
))
629 // In addition to knowing that the call does not access src in some
630 // unexpected manner, for example via a global, which we deduce from
631 // the use analysis, we also need to know that it does not sneakily
632 // access dest. We rely on AA to figure this out for us.
633 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
634 if (AA
.getModRefInfo(C
, cpyDest
, srcSize
) !=
635 AliasAnalysis::NoModRef
)
638 // All the checks have passed, so do the transformation.
639 bool changedArgument
= false;
640 for (unsigned i
= 0; i
< CS
.arg_size(); ++i
)
641 if (CS
.getArgument(i
)->stripPointerCasts() == cpySrc
) {
642 if (cpySrc
->getType() != cpyDest
->getType())
643 cpyDest
= CastInst::CreatePointerCast(cpyDest
, cpySrc
->getType(),
644 cpyDest
->getName(), C
);
645 changedArgument
= true;
646 if (CS
.getArgument(i
)->getType() == cpyDest
->getType())
647 CS
.setArgument(i
, cpyDest
);
649 CS
.setArgument(i
, CastInst::CreatePointerCast(cpyDest
,
650 CS
.getArgument(i
)->getType(), cpyDest
->getName(), C
));
653 if (!changedArgument
)
656 // Drop any cached information about the call, because we may have changed
657 // its dependence information by changing its parameter.
658 MemoryDependenceAnalysis
&MD
= getAnalysis
<MemoryDependenceAnalysis
>();
659 MD
.removeInstruction(C
);
662 MD
.removeInstruction(cpy
);
668 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A
669 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
670 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
671 /// circumstances). This allows later passes to remove the first memcpy
673 bool MemCpyOpt::processMemCpy(MemCpyInst
*M
) {
674 MemoryDependenceAnalysis
&MD
= getAnalysis
<MemoryDependenceAnalysis
>();
676 // We can only optimize statically-sized memcpy's.
677 ConstantInt
*cpyLen
= dyn_cast
<ConstantInt
>(M
->getLength());
678 if (!cpyLen
) return false;
680 // The are two possible optimizations we can do for memcpy:
681 // a) memcpy-memcpy xform which exposes redundance for DSE.
682 // b) call-memcpy xform for return slot optimization.
683 MemDepResult dep
= MD
.getDependency(M
);
684 if (!dep
.isClobber())
686 if (!isa
<MemCpyInst
>(dep
.getInst())) {
687 if (CallInst
*C
= dyn_cast
<CallInst
>(dep
.getInst())) {
688 bool changed
= performCallSlotOptzn(M
, M
->getDest(), M
->getSource(),
689 cpyLen
->getZExtValue(), C
);
690 if (changed
) M
->eraseFromParent();
696 MemCpyInst
*MDep
= cast
<MemCpyInst
>(dep
.getInst());
698 // We can only transforms memcpy's where the dest of one is the source of the
700 if (M
->getSource() != MDep
->getDest())
703 // Second, the length of the memcpy's must be the same, or the preceeding one
704 // must be larger than the following one.
705 ConstantInt
*C1
= dyn_cast
<ConstantInt
>(MDep
->getLength());
706 ConstantInt
*C2
= dyn_cast
<ConstantInt
>(M
->getLength());
710 uint64_t DepSize
= C1
->getValue().getZExtValue();
711 uint64_t CpySize
= C2
->getValue().getZExtValue();
713 if (DepSize
< CpySize
)
716 // Finally, we have to make sure that the dest of the second does not
717 // alias the source of the first
718 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
719 if (AA
.alias(M
->getRawDest(), CpySize
, MDep
->getRawSource(), DepSize
) !=
720 AliasAnalysis::NoAlias
)
722 else if (AA
.alias(M
->getRawDest(), CpySize
, M
->getRawSource(), CpySize
) !=
723 AliasAnalysis::NoAlias
)
725 else if (AA
.alias(MDep
->getRawDest(), DepSize
, MDep
->getRawSource(), DepSize
)
726 != AliasAnalysis::NoAlias
)
729 // If all checks passed, then we can transform these memcpy's
730 const Type
*ArgTys
[3] = { M
->getRawDest()->getType(),
731 MDep
->getRawSource()->getType(),
732 M
->getLength()->getType() };
733 Function
*MemCpyFun
= Intrinsic::getDeclaration(
734 M
->getParent()->getParent()->getParent(),
735 M
->getIntrinsicID(), ArgTys
, 3);
737 // Make sure to use the lesser of the alignment of the source and the dest
738 // since we're changing where we're reading from, but don't want to increase
739 // the alignment past what can be read from or written to.
740 // TODO: Is this worth it if we're creating a less aligned memcpy? For
741 // example we could be moving from movaps -> movq on x86.
742 unsigned Align
= std::min(MDep
->getAlignmentCst()->getZExtValue(),
743 M
->getAlignmentCst()->getZExtValue());
744 LLVMContext
&Context
= M
->getContext();
745 ConstantInt
*AlignCI
= ConstantInt::get(Type::getInt32Ty(Context
), Align
);
747 M
->getRawDest(), MDep
->getRawSource(), M
->getLength(),
748 AlignCI
, M
->getVolatileCst()
750 CallInst
*C
= CallInst::Create(MemCpyFun
, Args
, Args
+5, "", M
);
752 // If C and M don't interfere, then this is a valid transformation. If they
753 // did, this would mean that the two sources overlap, which would be bad.
754 if (MD
.getDependency(C
) == dep
) {
755 MD
.removeInstruction(M
);
756 M
->eraseFromParent();
761 // Otherwise, there was no point in doing this, so we remove the call we
762 // inserted and act like nothing happened.
763 MD
.removeInstruction(C
);
764 C
->eraseFromParent();
768 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
769 /// are guaranteed not to alias.
770 bool MemCpyOpt::processMemMove(MemMoveInst
*M
) {
771 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
773 // If the memmove is a constant size, use it for the alias query, this allows
774 // us to optimize things like: memmove(P, P+64, 64);
775 uint64_t MemMoveSize
= AliasAnalysis::UnknownSize
;
776 if (ConstantInt
*Len
= dyn_cast
<ConstantInt
>(M
->getLength()))
777 MemMoveSize
= Len
->getZExtValue();
779 // See if the pointers alias.
780 if (AA
.alias(M
->getRawDest(), MemMoveSize
, M
->getRawSource(), MemMoveSize
) !=
781 AliasAnalysis::NoAlias
)
784 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M
<< "\n");
786 // If not, then we know we can transform this.
787 Module
*Mod
= M
->getParent()->getParent()->getParent();
788 const Type
*ArgTys
[3] = { M
->getRawDest()->getType(),
789 M
->getRawSource()->getType(),
790 M
->getLength()->getType() };
791 M
->setCalledFunction(Intrinsic::getDeclaration(Mod
, Intrinsic::memcpy
,
794 // MemDep may have over conservative information about this instruction, just
795 // conservatively flush it from the cache.
796 getAnalysis
<MemoryDependenceAnalysis
>().removeInstruction(M
);
803 // MemCpyOpt::iterateOnFunction - Executes one iteration of GVN.
804 bool MemCpyOpt::iterateOnFunction(Function
&F
) {
805 bool MadeChange
= false;
807 // Walk all instruction in the function.
808 for (Function::iterator BB
= F
.begin(), BBE
= F
.end(); BB
!= BBE
; ++BB
) {
809 for (BasicBlock::iterator BI
= BB
->begin(), BE
= BB
->end();
811 // Avoid invalidating the iterator.
812 Instruction
*I
= BI
++;
814 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
815 MadeChange
|= processStore(SI
, BI
);
816 else if (MemCpyInst
*M
= dyn_cast
<MemCpyInst
>(I
))
817 MadeChange
|= processMemCpy(M
);
818 else if (MemMoveInst
*M
= dyn_cast
<MemMoveInst
>(I
)) {
819 if (processMemMove(M
)) {
820 --BI
; // Reprocess the new memcpy.
830 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
833 bool MemCpyOpt::runOnFunction(Function
&F
) {
834 bool MadeChange
= false;
836 if (!iterateOnFunction(F
))