1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // The LLVM Compiler Infrastructure
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
8 //===----------------------------------------------------------------------===//
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "memcpyopt"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/GlobalVariable.h"
18 #include "llvm/IntrinsicInst.h"
19 #include "llvm/Instructions.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Analysis/ValueTracking.h"
26 #include "llvm/Transforms/Utils/Local.h"
27 #include "llvm/Support/Debug.h"
28 #include "llvm/Support/GetElementPtrTypeIterator.h"
29 #include "llvm/Support/IRBuilder.h"
30 #include "llvm/Support/raw_ostream.h"
31 #include "llvm/Target/TargetData.h"
32 #include "llvm/Target/TargetLibraryInfo.h"
36 STATISTIC(NumMemCpyInstr
, "Number of memcpy instructions deleted");
37 STATISTIC(NumMemSetInfer
, "Number of memsets inferred");
38 STATISTIC(NumMoveToCpy
, "Number of memmoves converted to memcpy");
39 STATISTIC(NumCpyToSet
, "Number of memcpys converted to memset");
41 static int64_t GetOffsetFromIndex(const GetElementPtrInst
*GEP
, unsigned Idx
,
42 bool &VariableIdxFound
, const TargetData
&TD
){
43 // Skip over the first indices.
44 gep_type_iterator GTI
= gep_type_begin(GEP
);
45 for (unsigned i
= 1; i
!= Idx
; ++i
, ++GTI
)
48 // Compute the offset implied by the rest of the indices.
50 for (unsigned i
= Idx
, e
= GEP
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
51 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
53 return VariableIdxFound
= true;
54 if (OpC
->isZero()) continue; // No offset.
56 // Handle struct indices, which add their field offset to the pointer.
57 if (const StructType
*STy
= dyn_cast
<StructType
>(*GTI
)) {
58 Offset
+= TD
.getStructLayout(STy
)->getElementOffset(OpC
->getZExtValue());
62 // Otherwise, we have a sequential type like an array or vector. Multiply
63 // the index by the ElementSize.
64 uint64_t Size
= TD
.getTypeAllocSize(GTI
.getIndexedType());
65 Offset
+= Size
*OpC
->getSExtValue();
71 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
72 /// constant offset, and return that constant offset. For example, Ptr1 might
73 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
74 static bool IsPointerOffset(Value
*Ptr1
, Value
*Ptr2
, int64_t &Offset
,
75 const TargetData
&TD
) {
76 Ptr1
= Ptr1
->stripPointerCasts();
77 Ptr2
= Ptr2
->stripPointerCasts();
78 GetElementPtrInst
*GEP1
= dyn_cast
<GetElementPtrInst
>(Ptr1
);
79 GetElementPtrInst
*GEP2
= dyn_cast
<GetElementPtrInst
>(Ptr2
);
81 bool VariableIdxFound
= false;
83 // If one pointer is a GEP and the other isn't, then see if the GEP is a
84 // constant offset from the base, as in "P" and "gep P, 1".
85 if (GEP1
&& GEP2
== 0 && GEP1
->getOperand(0)->stripPointerCasts() == Ptr2
) {
86 Offset
= -GetOffsetFromIndex(GEP1
, 1, VariableIdxFound
, TD
);
87 return !VariableIdxFound
;
90 if (GEP2
&& GEP1
== 0 && GEP2
->getOperand(0)->stripPointerCasts() == Ptr1
) {
91 Offset
= GetOffsetFromIndex(GEP2
, 1, VariableIdxFound
, TD
);
92 return !VariableIdxFound
;
95 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
96 // base. After that base, they may have some number of common (and
97 // potentially variable) indices. After that they handle some constant
98 // offset, which determines their offset from each other. At this point, we
99 // handle no other case.
100 if (!GEP1
|| !GEP2
|| GEP1
->getOperand(0) != GEP2
->getOperand(0))
103 // Skip any common indices and track the GEP types.
105 for (; Idx
!= GEP1
->getNumOperands() && Idx
!= GEP2
->getNumOperands(); ++Idx
)
106 if (GEP1
->getOperand(Idx
) != GEP2
->getOperand(Idx
))
109 int64_t Offset1
= GetOffsetFromIndex(GEP1
, Idx
, VariableIdxFound
, TD
);
110 int64_t Offset2
= GetOffsetFromIndex(GEP2
, Idx
, VariableIdxFound
, TD
);
111 if (VariableIdxFound
) return false;
113 Offset
= Offset2
-Offset1
;
118 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
119 /// This allows us to analyze stores like:
124 /// which sometimes happens with stores to arrays of structs etc. When we see
125 /// the first store, we make a range [1, 2). The second store extends the range
126 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
127 /// two ranges into [0, 3) which is memset'able.
130 // Start/End - A semi range that describes the span that this range covers.
131 // The range is closed at the start and open at the end: [Start, End).
134 /// StartPtr - The getelementptr instruction that points to the start of the
138 /// Alignment - The known alignment of the first store.
141 /// TheStores - The actual stores that make up this range.
142 SmallVector
<Instruction
*, 16> TheStores
;
144 bool isProfitableToUseMemset(const TargetData
&TD
) const;
147 } // end anon namespace
149 bool MemsetRange::isProfitableToUseMemset(const TargetData
&TD
) const {
150 // If we found more than 8 stores to merge or 64 bytes, use memset.
151 if (TheStores
.size() >= 8 || End
-Start
>= 64) return true;
153 // If there is nothing to merge, don't do anything.
154 if (TheStores
.size() < 2) return false;
156 // If any of the stores are a memset, then it is always good to extend the
158 for (unsigned i
= 0, e
= TheStores
.size(); i
!= e
; ++i
)
159 if (!isa
<StoreInst
>(TheStores
[i
]))
162 // Assume that the code generator is capable of merging pairs of stores
163 // together if it wants to.
164 if (TheStores
.size() == 2) return false;
166 // If we have fewer than 8 stores, it can still be worthwhile to do this.
167 // For example, merging 4 i8 stores into an i32 store is useful almost always.
168 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
169 // memset will be split into 2 32-bit stores anyway) and doing so can
170 // pessimize the llvm optimizer.
172 // Since we don't have perfect knowledge here, make some assumptions: assume
173 // the maximum GPR width is the same size as the pointer size and assume that
174 // this width can be stored. If so, check to see whether we will end up
175 // actually reducing the number of stores used.
176 unsigned Bytes
= unsigned(End
-Start
);
177 unsigned NumPointerStores
= Bytes
/TD
.getPointerSize();
179 // Assume the remaining bytes if any are done a byte at a time.
180 unsigned NumByteStores
= Bytes
- NumPointerStores
*TD
.getPointerSize();
182 // If we will reduce the # stores (according to this heuristic), do the
183 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
185 return TheStores
.size() > NumPointerStores
+NumByteStores
;
191 /// Ranges - A sorted list of the memset ranges. We use std::list here
192 /// because each element is relatively large and expensive to copy.
193 std::list
<MemsetRange
> Ranges
;
194 typedef std::list
<MemsetRange
>::iterator range_iterator
;
195 const TargetData
&TD
;
197 MemsetRanges(const TargetData
&td
) : TD(td
) {}
199 typedef std::list
<MemsetRange
>::const_iterator const_iterator
;
200 const_iterator
begin() const { return Ranges
.begin(); }
201 const_iterator
end() const { return Ranges
.end(); }
202 bool empty() const { return Ranges
.empty(); }
204 void addInst(int64_t OffsetFromFirst
, Instruction
*Inst
) {
205 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
))
206 addStore(OffsetFromFirst
, SI
);
208 addMemSet(OffsetFromFirst
, cast
<MemSetInst
>(Inst
));
211 void addStore(int64_t OffsetFromFirst
, StoreInst
*SI
) {
212 int64_t StoreSize
= TD
.getTypeStoreSize(SI
->getOperand(0)->getType());
214 addRange(OffsetFromFirst
, StoreSize
,
215 SI
->getPointerOperand(), SI
->getAlignment(), SI
);
218 void addMemSet(int64_t OffsetFromFirst
, MemSetInst
*MSI
) {
219 int64_t Size
= cast
<ConstantInt
>(MSI
->getLength())->getZExtValue();
220 addRange(OffsetFromFirst
, Size
, MSI
->getDest(), MSI
->getAlignment(), MSI
);
223 void addRange(int64_t Start
, int64_t Size
, Value
*Ptr
,
224 unsigned Alignment
, Instruction
*Inst
);
228 } // end anon namespace
231 /// addRange - Add a new store to the MemsetRanges data structure. This adds a
232 /// new range for the specified store at the specified offset, merging into
233 /// existing ranges as appropriate.
235 /// Do a linear search of the ranges to see if this can be joined and/or to
236 /// find the insertion point in the list. We keep the ranges sorted for
237 /// simplicity here. This is a linear search of a linked list, which is ugly,
238 /// however the number of ranges is limited, so this won't get crazy slow.
239 void MemsetRanges::addRange(int64_t Start
, int64_t Size
, Value
*Ptr
,
240 unsigned Alignment
, Instruction
*Inst
) {
241 int64_t End
= Start
+Size
;
242 range_iterator I
= Ranges
.begin(), E
= Ranges
.end();
244 while (I
!= E
&& Start
> I
->End
)
247 // We now know that I == E, in which case we didn't find anything to merge
248 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
249 // to insert a new range. Handle this now.
250 if (I
== E
|| End
< I
->Start
) {
251 MemsetRange
&R
= *Ranges
.insert(I
, MemsetRange());
255 R
.Alignment
= Alignment
;
256 R
.TheStores
.push_back(Inst
);
260 // This store overlaps with I, add it.
261 I
->TheStores
.push_back(Inst
);
263 // At this point, we may have an interval that completely contains our store.
264 // If so, just add it to the interval and return.
265 if (I
->Start
<= Start
&& I
->End
>= End
)
268 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
269 // but is not entirely contained within the range.
271 // See if the range extends the start of the range. In this case, it couldn't
272 // possibly cause it to join the prior range, because otherwise we would have
274 if (Start
< I
->Start
) {
277 I
->Alignment
= Alignment
;
280 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
281 // is in or right at the end of I), and that End >= I->Start. Extend I out to
285 range_iterator NextI
= I
;
286 while (++NextI
!= E
&& End
>= NextI
->Start
) {
287 // Merge the range in.
288 I
->TheStores
.append(NextI
->TheStores
.begin(), NextI
->TheStores
.end());
289 if (NextI
->End
> I
->End
)
297 //===----------------------------------------------------------------------===//
299 //===----------------------------------------------------------------------===//
302 class MemCpyOpt
: public FunctionPass
{
303 MemoryDependenceAnalysis
*MD
;
304 TargetLibraryInfo
*TLI
;
305 const TargetData
*TD
;
307 static char ID
; // Pass identification, replacement for typeid
308 MemCpyOpt() : FunctionPass(ID
) {
309 initializeMemCpyOptPass(*PassRegistry::getPassRegistry());
315 bool runOnFunction(Function
&F
);
318 // This transformation requires dominator postdominator info
319 virtual void getAnalysisUsage(AnalysisUsage
&AU
) const {
320 AU
.setPreservesCFG();
321 AU
.addRequired
<DominatorTree
>();
322 AU
.addRequired
<MemoryDependenceAnalysis
>();
323 AU
.addRequired
<AliasAnalysis
>();
324 AU
.addRequired
<TargetLibraryInfo
>();
325 AU
.addPreserved
<AliasAnalysis
>();
326 AU
.addPreserved
<MemoryDependenceAnalysis
>();
330 bool processStore(StoreInst
*SI
, BasicBlock::iterator
&BBI
);
331 bool processMemSet(MemSetInst
*SI
, BasicBlock::iterator
&BBI
);
332 bool processMemCpy(MemCpyInst
*M
);
333 bool processMemMove(MemMoveInst
*M
);
334 bool performCallSlotOptzn(Instruction
*cpy
, Value
*cpyDst
, Value
*cpySrc
,
335 uint64_t cpyLen
, CallInst
*C
);
336 bool processMemCpyMemCpyDependence(MemCpyInst
*M
, MemCpyInst
*MDep
,
338 bool processByValArgument(CallSite CS
, unsigned ArgNo
);
339 Instruction
*tryMergingIntoMemset(Instruction
*I
, Value
*StartPtr
,
342 bool iterateOnFunction(Function
&F
);
345 char MemCpyOpt::ID
= 0;
348 // createMemCpyOptPass - The public interface to this file...
349 FunctionPass
*llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
351 INITIALIZE_PASS_BEGIN(MemCpyOpt
, "memcpyopt", "MemCpy Optimization",
353 INITIALIZE_PASS_DEPENDENCY(DominatorTree
)
354 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceAnalysis
)
355 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfo
)
356 INITIALIZE_AG_DEPENDENCY(AliasAnalysis
)
357 INITIALIZE_PASS_END(MemCpyOpt
, "memcpyopt", "MemCpy Optimization",
360 /// tryMergingIntoMemset - When scanning forward over instructions, we look for
361 /// some other patterns to fold away. In particular, this looks for stores to
362 /// neighboring locations of memory. If it sees enough consecutive ones, it
363 /// attempts to merge them together into a memcpy/memset.
364 Instruction
*MemCpyOpt::tryMergingIntoMemset(Instruction
*StartInst
,
365 Value
*StartPtr
, Value
*ByteVal
) {
366 if (TD
== 0) return 0;
368 // Okay, so we now have a single store that can be splatable. Scan to find
369 // all subsequent stores of the same value to offset from the same pointer.
370 // Join these together into ranges, so we can decide whether contiguous blocks
372 MemsetRanges
Ranges(*TD
);
374 BasicBlock::iterator BI
= StartInst
;
375 for (++BI
; !isa
<TerminatorInst
>(BI
); ++BI
) {
376 if (!isa
<StoreInst
>(BI
) && !isa
<MemSetInst
>(BI
)) {
377 // If the instruction is readnone, ignore it, otherwise bail out. We
378 // don't even allow readonly here because we don't want something like:
379 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
380 if (BI
->mayWriteToMemory() || BI
->mayReadFromMemory())
385 if (StoreInst
*NextStore
= dyn_cast
<StoreInst
>(BI
)) {
386 // If this is a store, see if we can merge it in.
387 if (NextStore
->isVolatile()) break;
389 // Check to see if this stored value is of the same byte-splattable value.
390 if (ByteVal
!= isBytewiseValue(NextStore
->getOperand(0)))
393 // Check to see if this store is to a constant offset from the start ptr.
395 if (!IsPointerOffset(StartPtr
, NextStore
->getPointerOperand(),
399 Ranges
.addStore(Offset
, NextStore
);
401 MemSetInst
*MSI
= cast
<MemSetInst
>(BI
);
403 if (MSI
->isVolatile() || ByteVal
!= MSI
->getValue() ||
404 !isa
<ConstantInt
>(MSI
->getLength()))
407 // Check to see if this store is to a constant offset from the start ptr.
409 if (!IsPointerOffset(StartPtr
, MSI
->getDest(), Offset
, *TD
))
412 Ranges
.addMemSet(Offset
, MSI
);
416 // If we have no ranges, then we just had a single store with nothing that
417 // could be merged in. This is a very common case of course.
421 // If we had at least one store that could be merged in, add the starting
422 // store as well. We try to avoid this unless there is at least something
423 // interesting as a small compile-time optimization.
424 Ranges
.addInst(0, StartInst
);
426 // If we create any memsets, we put it right before the first instruction that
427 // isn't part of the memset block. This ensure that the memset is dominated
428 // by any addressing instruction needed by the start of the block.
429 IRBuilder
<> Builder(BI
);
431 // Now that we have full information about ranges, loop over the ranges and
432 // emit memset's for anything big enough to be worthwhile.
433 Instruction
*AMemSet
= 0;
434 for (MemsetRanges::const_iterator I
= Ranges
.begin(), E
= Ranges
.end();
436 const MemsetRange
&Range
= *I
;
438 if (Range
.TheStores
.size() == 1) continue;
440 // If it is profitable to lower this range to memset, do so now.
441 if (!Range
.isProfitableToUseMemset(*TD
))
444 // Otherwise, we do want to transform this! Create a new memset.
445 // Get the starting pointer of the block.
446 StartPtr
= Range
.StartPtr
;
448 // Determine alignment
449 unsigned Alignment
= Range
.Alignment
;
450 if (Alignment
== 0) {
451 const Type
*EltType
=
452 cast
<PointerType
>(StartPtr
->getType())->getElementType();
453 Alignment
= TD
->getABITypeAlignment(EltType
);
457 Builder
.CreateMemSet(StartPtr
, ByteVal
, Range
.End
-Range
.Start
, Alignment
);
459 DEBUG(dbgs() << "Replace stores:\n";
460 for (unsigned i
= 0, e
= Range
.TheStores
.size(); i
!= e
; ++i
)
461 dbgs() << *Range
.TheStores
[i
] << '\n';
462 dbgs() << "With: " << *AMemSet
<< '\n');
464 if (!Range
.TheStores
.empty())
465 AMemSet
->setDebugLoc(Range
.TheStores
[0]->getDebugLoc());
467 // Zap all the stores.
468 for (SmallVector
<Instruction
*, 16>::const_iterator
469 SI
= Range
.TheStores
.begin(),
470 SE
= Range
.TheStores
.end(); SI
!= SE
; ++SI
) {
471 MD
->removeInstruction(*SI
);
472 (*SI
)->eraseFromParent();
481 bool MemCpyOpt::processStore(StoreInst
*SI
, BasicBlock::iterator
&BBI
) {
482 if (SI
->isVolatile()) return false;
484 if (TD
== 0) return false;
486 // Detect cases where we're performing call slot forwarding, but
487 // happen to be using a load-store pair to implement it, rather than
489 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(SI
->getOperand(0))) {
490 if (!LI
->isVolatile() && LI
->hasOneUse() &&
491 LI
->getParent() == SI
->getParent()) {
492 MemDepResult ldep
= MD
->getDependency(LI
);
494 if (ldep
.isClobber() && !isa
<MemCpyInst
>(ldep
.getInst()))
495 C
= dyn_cast
<CallInst
>(ldep
.getInst());
498 // Check that nothing touches the dest of the "copy" between
499 // the call and the store.
500 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
501 AliasAnalysis::Location StoreLoc
= AA
.getLocation(SI
);
502 for (BasicBlock::iterator I
= --BasicBlock::iterator(SI
),
503 E
= C
; I
!= E
; --I
) {
504 if (AA
.getModRefInfo(&*I
, StoreLoc
) != AliasAnalysis::NoModRef
) {
512 bool changed
= performCallSlotOptzn(LI
,
513 SI
->getPointerOperand()->stripPointerCasts(),
514 LI
->getPointerOperand()->stripPointerCasts(),
515 TD
->getTypeStoreSize(SI
->getOperand(0)->getType()), C
);
517 MD
->removeInstruction(SI
);
518 SI
->eraseFromParent();
519 MD
->removeInstruction(LI
);
520 LI
->eraseFromParent();
528 // There are two cases that are interesting for this code to handle: memcpy
529 // and memset. Right now we only handle memset.
531 // Ensure that the value being stored is something that can be memset'able a
532 // byte at a time like "0" or "-1" or any width, as well as things like
533 // 0xA0A0A0A0 and 0.0.
534 if (Value
*ByteVal
= isBytewiseValue(SI
->getOperand(0)))
535 if (Instruction
*I
= tryMergingIntoMemset(SI
, SI
->getPointerOperand(),
537 BBI
= I
; // Don't invalidate iterator.
544 bool MemCpyOpt::processMemSet(MemSetInst
*MSI
, BasicBlock::iterator
&BBI
) {
545 // See if there is another memset or store neighboring this memset which
546 // allows us to widen out the memset to do a single larger store.
547 if (isa
<ConstantInt
>(MSI
->getLength()) && !MSI
->isVolatile())
548 if (Instruction
*I
= tryMergingIntoMemset(MSI
, MSI
->getDest(),
550 BBI
= I
; // Don't invalidate iterator.
557 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
558 /// and checks for the possibility of a call slot optimization by having
559 /// the call write its result directly into the destination of the memcpy.
560 bool MemCpyOpt::performCallSlotOptzn(Instruction
*cpy
,
561 Value
*cpyDest
, Value
*cpySrc
,
562 uint64_t cpyLen
, CallInst
*C
) {
563 // The general transformation to keep in mind is
565 // call @func(..., src, ...)
566 // memcpy(dest, src, ...)
570 // memcpy(dest, src, ...)
571 // call @func(..., dest, ...)
573 // Since moving the memcpy is technically awkward, we additionally check that
574 // src only holds uninitialized values at the moment of the call, meaning that
575 // the memcpy can be discarded rather than moved.
577 // Deliberately get the source and destination with bitcasts stripped away,
578 // because we'll need to do type comparisons based on the underlying type.
581 // Require that src be an alloca. This simplifies the reasoning considerably.
582 AllocaInst
*srcAlloca
= dyn_cast
<AllocaInst
>(cpySrc
);
586 // Check that all of src is copied to dest.
587 if (TD
== 0) return false;
589 ConstantInt
*srcArraySize
= dyn_cast
<ConstantInt
>(srcAlloca
->getArraySize());
593 uint64_t srcSize
= TD
->getTypeAllocSize(srcAlloca
->getAllocatedType()) *
594 srcArraySize
->getZExtValue();
596 if (cpyLen
< srcSize
)
599 // Check that accessing the first srcSize bytes of dest will not cause a
600 // trap. Otherwise the transform is invalid since it might cause a trap
601 // to occur earlier than it otherwise would.
602 if (AllocaInst
*A
= dyn_cast
<AllocaInst
>(cpyDest
)) {
603 // The destination is an alloca. Check it is larger than srcSize.
604 ConstantInt
*destArraySize
= dyn_cast
<ConstantInt
>(A
->getArraySize());
608 uint64_t destSize
= TD
->getTypeAllocSize(A
->getAllocatedType()) *
609 destArraySize
->getZExtValue();
611 if (destSize
< srcSize
)
613 } else if (Argument
*A
= dyn_cast
<Argument
>(cpyDest
)) {
614 // If the destination is an sret parameter then only accesses that are
615 // outside of the returned struct type can trap.
616 if (!A
->hasStructRetAttr())
619 const Type
*StructTy
= cast
<PointerType
>(A
->getType())->getElementType();
620 uint64_t destSize
= TD
->getTypeAllocSize(StructTy
);
622 if (destSize
< srcSize
)
628 // Check that src is not accessed except via the call and the memcpy. This
629 // guarantees that it holds only undefined values when passed in (so the final
630 // memcpy can be dropped), that it is not read or written between the call and
631 // the memcpy, and that writing beyond the end of it is undefined.
632 SmallVector
<User
*, 8> srcUseList(srcAlloca
->use_begin(),
633 srcAlloca
->use_end());
634 while (!srcUseList
.empty()) {
635 User
*UI
= srcUseList
.pop_back_val();
637 if (isa
<BitCastInst
>(UI
)) {
638 for (User::use_iterator I
= UI
->use_begin(), E
= UI
->use_end();
640 srcUseList
.push_back(*I
);
641 } else if (GetElementPtrInst
*G
= dyn_cast
<GetElementPtrInst
>(UI
)) {
642 if (G
->hasAllZeroIndices())
643 for (User::use_iterator I
= UI
->use_begin(), E
= UI
->use_end();
645 srcUseList
.push_back(*I
);
648 } else if (UI
!= C
&& UI
!= cpy
) {
653 // Since we're changing the parameter to the callsite, we need to make sure
654 // that what would be the new parameter dominates the callsite.
655 DominatorTree
&DT
= getAnalysis
<DominatorTree
>();
656 if (Instruction
*cpyDestInst
= dyn_cast
<Instruction
>(cpyDest
))
657 if (!DT
.dominates(cpyDestInst
, C
))
660 // In addition to knowing that the call does not access src in some
661 // unexpected manner, for example via a global, which we deduce from
662 // the use analysis, we also need to know that it does not sneakily
663 // access dest. We rely on AA to figure this out for us.
664 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
665 if (AA
.getModRefInfo(C
, cpyDest
, srcSize
) != AliasAnalysis::NoModRef
)
668 // All the checks have passed, so do the transformation.
669 bool changedArgument
= false;
670 for (unsigned i
= 0; i
< CS
.arg_size(); ++i
)
671 if (CS
.getArgument(i
)->stripPointerCasts() == cpySrc
) {
672 if (cpySrc
->getType() != cpyDest
->getType())
673 cpyDest
= CastInst::CreatePointerCast(cpyDest
, cpySrc
->getType(),
674 cpyDest
->getName(), C
);
675 changedArgument
= true;
676 if (CS
.getArgument(i
)->getType() == cpyDest
->getType())
677 CS
.setArgument(i
, cpyDest
);
679 CS
.setArgument(i
, CastInst::CreatePointerCast(cpyDest
,
680 CS
.getArgument(i
)->getType(), cpyDest
->getName(), C
));
683 if (!changedArgument
)
686 // Drop any cached information about the call, because we may have changed
687 // its dependence information by changing its parameter.
688 MD
->removeInstruction(C
);
690 // Remove the memcpy.
691 MD
->removeInstruction(cpy
);
697 /// processMemCpyMemCpyDependence - We've found that the (upward scanning)
698 /// memory dependence of memcpy 'M' is the memcpy 'MDep'. Try to simplify M to
699 /// copy from MDep's input if we can. MSize is the size of M's copy.
701 bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst
*M
, MemCpyInst
*MDep
,
703 // We can only transforms memcpy's where the dest of one is the source of the
705 if (M
->getSource() != MDep
->getDest() || MDep
->isVolatile())
708 // If dep instruction is reading from our current input, then it is a noop
709 // transfer and substituting the input won't change this instruction. Just
710 // ignore the input and let someone else zap MDep. This handles cases like:
713 if (M
->getSource() == MDep
->getSource())
716 // Second, the length of the memcpy's must be the same, or the preceding one
717 // must be larger than the following one.
718 ConstantInt
*MDepLen
= dyn_cast
<ConstantInt
>(MDep
->getLength());
719 ConstantInt
*MLen
= dyn_cast
<ConstantInt
>(M
->getLength());
720 if (!MDepLen
|| !MLen
|| MDepLen
->getZExtValue() < MLen
->getZExtValue())
723 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
725 // Verify that the copied-from memory doesn't change in between the two
726 // transfers. For example, in:
730 // It would be invalid to transform the second memcpy into memcpy(c <- b).
732 // TODO: If the code between M and MDep is transparent to the destination "c",
733 // then we could still perform the xform by moving M up to the first memcpy.
735 // NOTE: This is conservative, it will stop on any read from the source loc,
736 // not just the defining memcpy.
737 MemDepResult SourceDep
=
738 MD
->getPointerDependencyFrom(AA
.getLocationForSource(MDep
),
739 false, M
, M
->getParent());
740 if (!SourceDep
.isClobber() || SourceDep
.getInst() != MDep
)
743 // If the dest of the second might alias the source of the first, then the
744 // source and dest might overlap. We still want to eliminate the intermediate
745 // value, but we have to generate a memmove instead of memcpy.
746 bool UseMemMove
= false;
747 if (!AA
.isNoAlias(AA
.getLocationForDest(M
), AA
.getLocationForSource(MDep
)))
750 // If all checks passed, then we can transform M.
752 // Make sure to use the lesser of the alignment of the source and the dest
753 // since we're changing where we're reading from, but don't want to increase
754 // the alignment past what can be read from or written to.
755 // TODO: Is this worth it if we're creating a less aligned memcpy? For
756 // example we could be moving from movaps -> movq on x86.
757 unsigned Align
= std::min(MDep
->getAlignment(), M
->getAlignment());
759 IRBuilder
<> Builder(M
);
761 Builder
.CreateMemMove(M
->getRawDest(), MDep
->getRawSource(), M
->getLength(),
762 Align
, M
->isVolatile());
764 Builder
.CreateMemCpy(M
->getRawDest(), MDep
->getRawSource(), M
->getLength(),
765 Align
, M
->isVolatile());
767 // Remove the instruction we're replacing.
768 MD
->removeInstruction(M
);
769 M
->eraseFromParent();
775 /// processMemCpy - perform simplification of memcpy's. If we have memcpy A
776 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
777 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
778 /// circumstances). This allows later passes to remove the first memcpy
780 bool MemCpyOpt::processMemCpy(MemCpyInst
*M
) {
781 // We can only optimize statically-sized memcpy's that are non-volatile.
782 ConstantInt
*CopySize
= dyn_cast
<ConstantInt
>(M
->getLength());
783 if (CopySize
== 0 || M
->isVolatile()) return false;
785 // If the source and destination of the memcpy are the same, then zap it.
786 if (M
->getSource() == M
->getDest()) {
787 MD
->removeInstruction(M
);
788 M
->eraseFromParent();
792 // If copying from a constant, try to turn the memcpy into a memset.
793 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(M
->getSource()))
794 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
795 if (Value
*ByteVal
= isBytewiseValue(GV
->getInitializer())) {
796 IRBuilder
<> Builder(M
);
797 Builder
.CreateMemSet(M
->getRawDest(), ByteVal
, CopySize
,
798 M
->getAlignment(), false);
799 MD
->removeInstruction(M
);
800 M
->eraseFromParent();
805 // The are two possible optimizations we can do for memcpy:
806 // a) memcpy-memcpy xform which exposes redundance for DSE.
807 // b) call-memcpy xform for return slot optimization.
808 MemDepResult DepInfo
= MD
->getDependency(M
);
809 if (!DepInfo
.isClobber())
812 if (MemCpyInst
*MDep
= dyn_cast
<MemCpyInst
>(DepInfo
.getInst()))
813 return processMemCpyMemCpyDependence(M
, MDep
, CopySize
->getZExtValue());
815 if (CallInst
*C
= dyn_cast
<CallInst
>(DepInfo
.getInst())) {
816 if (performCallSlotOptzn(M
, M
->getDest(), M
->getSource(),
817 CopySize
->getZExtValue(), C
)) {
818 MD
->removeInstruction(M
);
819 M
->eraseFromParent();
827 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
828 /// are guaranteed not to alias.
829 bool MemCpyOpt::processMemMove(MemMoveInst
*M
) {
830 AliasAnalysis
&AA
= getAnalysis
<AliasAnalysis
>();
832 if (!TLI
->has(LibFunc::memmove
))
835 // See if the pointers alias.
836 if (!AA
.isNoAlias(AA
.getLocationForDest(M
), AA
.getLocationForSource(M
)))
839 DEBUG(dbgs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M
<< "\n");
841 // If not, then we know we can transform this.
842 Module
*Mod
= M
->getParent()->getParent()->getParent();
843 const Type
*ArgTys
[3] = { M
->getRawDest()->getType(),
844 M
->getRawSource()->getType(),
845 M
->getLength()->getType() };
846 M
->setCalledFunction(Intrinsic::getDeclaration(Mod
, Intrinsic::memcpy
,
849 // MemDep may have over conservative information about this instruction, just
850 // conservatively flush it from the cache.
851 MD
->removeInstruction(M
);
857 /// processByValArgument - This is called on every byval argument in call sites.
858 bool MemCpyOpt::processByValArgument(CallSite CS
, unsigned ArgNo
) {
859 if (TD
== 0) return false;
861 // Find out what feeds this byval argument.
862 Value
*ByValArg
= CS
.getArgument(ArgNo
);
863 const Type
*ByValTy
=cast
<PointerType
>(ByValArg
->getType())->getElementType();
864 uint64_t ByValSize
= TD
->getTypeAllocSize(ByValTy
);
865 MemDepResult DepInfo
=
866 MD
->getPointerDependencyFrom(AliasAnalysis::Location(ByValArg
, ByValSize
),
867 true, CS
.getInstruction(),
868 CS
.getInstruction()->getParent());
869 if (!DepInfo
.isClobber())
872 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
873 // a memcpy, see if we can byval from the source of the memcpy instead of the
875 MemCpyInst
*MDep
= dyn_cast
<MemCpyInst
>(DepInfo
.getInst());
876 if (MDep
== 0 || MDep
->isVolatile() ||
877 ByValArg
->stripPointerCasts() != MDep
->getDest())
880 // The length of the memcpy must be larger or equal to the size of the byval.
881 ConstantInt
*C1
= dyn_cast
<ConstantInt
>(MDep
->getLength());
882 if (C1
== 0 || C1
->getValue().getZExtValue() < ByValSize
)
885 // Get the alignment of the byval. If the call doesn't specify the alignment,
886 // then it is some target specific value that we can't know.
887 unsigned ByValAlign
= CS
.getParamAlignment(ArgNo
+1);
888 if (ByValAlign
== 0) return false;
890 // If it is greater than the memcpy, then we check to see if we can force the
891 // source of the memcpy to the alignment we need. If we fail, we bail out.
892 if (MDep
->getAlignment() < ByValAlign
&&
893 getOrEnforceKnownAlignment(MDep
->getSource(),ByValAlign
, TD
) < ByValAlign
)
896 // Verify that the copied-from memory doesn't change in between the memcpy and
901 // It would be invalid to transform the second memcpy into foo(*b).
903 // NOTE: This is conservative, it will stop on any read from the source loc,
904 // not just the defining memcpy.
905 MemDepResult SourceDep
=
906 MD
->getPointerDependencyFrom(AliasAnalysis::getLocationForSource(MDep
),
907 false, CS
.getInstruction(), MDep
->getParent());
908 if (!SourceDep
.isClobber() || SourceDep
.getInst() != MDep
)
911 Value
*TmpCast
= MDep
->getSource();
912 if (MDep
->getSource()->getType() != ByValArg
->getType())
913 TmpCast
= new BitCastInst(MDep
->getSource(), ByValArg
->getType(),
914 "tmpcast", CS
.getInstruction());
916 DEBUG(dbgs() << "MemCpyOpt: Forwarding memcpy to byval:\n"
917 << " " << *MDep
<< "\n"
918 << " " << *CS
.getInstruction() << "\n");
920 // Otherwise we're good! Update the byval argument.
921 CS
.setArgument(ArgNo
, TmpCast
);
926 /// iterateOnFunction - Executes one iteration of MemCpyOpt.
927 bool MemCpyOpt::iterateOnFunction(Function
&F
) {
928 bool MadeChange
= false;
930 // Walk all instruction in the function.
931 for (Function::iterator BB
= F
.begin(), BBE
= F
.end(); BB
!= BBE
; ++BB
) {
932 for (BasicBlock::iterator BI
= BB
->begin(), BE
= BB
->end(); BI
!= BE
;) {
933 // Avoid invalidating the iterator.
934 Instruction
*I
= BI
++;
936 bool RepeatInstruction
= false;
938 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
939 MadeChange
|= processStore(SI
, BI
);
940 else if (MemSetInst
*M
= dyn_cast
<MemSetInst
>(I
))
941 RepeatInstruction
= processMemSet(M
, BI
);
942 else if (MemCpyInst
*M
= dyn_cast
<MemCpyInst
>(I
))
943 RepeatInstruction
= processMemCpy(M
);
944 else if (MemMoveInst
*M
= dyn_cast
<MemMoveInst
>(I
))
945 RepeatInstruction
= processMemMove(M
);
946 else if (CallSite CS
= (Value
*)I
) {
947 for (unsigned i
= 0, e
= CS
.arg_size(); i
!= e
; ++i
)
948 if (CS
.paramHasAttr(i
+1, Attribute::ByVal
))
949 MadeChange
|= processByValArgument(CS
, i
);
952 // Reprocess the instruction if desired.
953 if (RepeatInstruction
) {
954 if (BI
!= BB
->begin()) --BI
;
963 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
966 bool MemCpyOpt::runOnFunction(Function
&F
) {
967 bool MadeChange
= false;
968 MD
= &getAnalysis
<MemoryDependenceAnalysis
>();
969 TD
= getAnalysisIfAvailable
<TargetData
>();
970 TLI
= &getAnalysis
<TargetLibraryInfo
>();
972 // If we don't have at least memset and memcpy, there is little point of doing
973 // anything here. These are required by a freestanding implementation, so if
974 // even they are disabled, there is no point in trying hard.
975 if (!TLI
->has(LibFunc::memset
) || !TLI
->has(LibFunc::memcpy
))
979 if (!iterateOnFunction(F
))