add a new MCInstPrinter class, move the (trivial) MCDisassmbler ctor inline.
[llvm/avr.git] / lib / Transforms / Scalar / MemCpyOptimizer.cpp
blob389b3b740c0f52451b366875b4dd2b35cd9ae380
1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass performs various transformations related to eliminating memcpy
11 // calls, or transforming sets of stores into memset's.
13 //===----------------------------------------------------------------------===//
15 #define DEBUG_TYPE "memcpyopt"
16 #include "llvm/Transforms/Scalar.h"
17 #include "llvm/IntrinsicInst.h"
18 #include "llvm/Instructions.h"
19 #include "llvm/LLVMContext.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/Dominators.h"
23 #include "llvm/Analysis/AliasAnalysis.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Support/Debug.h"
26 #include "llvm/Support/GetElementPtrTypeIterator.h"
27 #include "llvm/Support/raw_ostream.h"
28 #include "llvm/Target/TargetData.h"
29 #include <list>
30 using namespace llvm;
32 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
33 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
34 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
36 /// isBytewiseValue - If the specified value can be set by repeating the same
37 /// byte in memory, return the i8 value that it is represented with. This is
38 /// true for all i8 values obviously, but is also true for i32 0, i32 -1,
39 /// i16 0xF0F0, double 0.0 etc. If the value can't be handled with a repeated
40 /// byte store (e.g. i16 0x1234), return null.
41 static Value *isBytewiseValue(Value *V, LLVMContext &Context) {
42 // All byte-wide stores are splatable, even of arbitrary variables.
43 if (V->getType() == Type::getInt8Ty(Context)) return V;
45 // Constant float and double values can be handled as integer values if the
46 // corresponding integer value is "byteable". An important case is 0.0.
47 if (ConstantFP *CFP = dyn_cast<ConstantFP>(V)) {
48 if (CFP->getType() == Type::getFloatTy(Context))
49 V = ConstantExpr::getBitCast(CFP, Type::getInt32Ty(Context));
50 if (CFP->getType() == Type::getDoubleTy(Context))
51 V = ConstantExpr::getBitCast(CFP, Type::getInt64Ty(Context));
52 // Don't handle long double formats, which have strange constraints.
55 // We can handle constant integers that are power of two in size and a
56 // multiple of 8 bits.
57 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
58 unsigned Width = CI->getBitWidth();
59 if (isPowerOf2_32(Width) && Width > 8) {
60 // We can handle this value if the recursive binary decomposition is the
61 // same at all levels.
62 APInt Val = CI->getValue();
63 APInt Val2;
64 while (Val.getBitWidth() != 8) {
65 unsigned NextWidth = Val.getBitWidth()/2;
66 Val2 = Val.lshr(NextWidth);
67 Val2.trunc(Val.getBitWidth()/2);
68 Val.trunc(Val.getBitWidth()/2);
70 // If the top/bottom halves aren't the same, reject it.
71 if (Val != Val2)
72 return 0;
74 return ConstantInt::get(Context, Val);
78 // Conceptually, we could handle things like:
79 // %a = zext i8 %X to i16
80 // %b = shl i16 %a, 8
81 // %c = or i16 %a, %b
82 // but until there is an example that actually needs this, it doesn't seem
83 // worth worrying about.
84 return 0;
87 static int64_t GetOffsetFromIndex(const GetElementPtrInst *GEP, unsigned Idx,
88 bool &VariableIdxFound, TargetData &TD) {
89 // Skip over the first indices.
90 gep_type_iterator GTI = gep_type_begin(GEP);
91 for (unsigned i = 1; i != Idx; ++i, ++GTI)
92 /*skip along*/;
94 // Compute the offset implied by the rest of the indices.
95 int64_t Offset = 0;
96 for (unsigned i = Idx, e = GEP->getNumOperands(); i != e; ++i, ++GTI) {
97 ConstantInt *OpC = dyn_cast<ConstantInt>(GEP->getOperand(i));
98 if (OpC == 0)
99 return VariableIdxFound = true;
100 if (OpC->isZero()) continue; // No offset.
102 // Handle struct indices, which add their field offset to the pointer.
103 if (const StructType *STy = dyn_cast<StructType>(*GTI)) {
104 Offset += TD.getStructLayout(STy)->getElementOffset(OpC->getZExtValue());
105 continue;
108 // Otherwise, we have a sequential type like an array or vector. Multiply
109 // the index by the ElementSize.
110 uint64_t Size = TD.getTypeAllocSize(GTI.getIndexedType());
111 Offset += Size*OpC->getSExtValue();
114 return Offset;
117 /// IsPointerOffset - Return true if Ptr1 is provably equal to Ptr2 plus a
118 /// constant offset, and return that constant offset. For example, Ptr1 might
119 /// be &A[42], and Ptr2 might be &A[40]. In this case offset would be -8.
120 static bool IsPointerOffset(Value *Ptr1, Value *Ptr2, int64_t &Offset,
121 TargetData &TD) {
122 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
123 // base. After that base, they may have some number of common (and
124 // potentially variable) indices. After that they handle some constant
125 // offset, which determines their offset from each other. At this point, we
126 // handle no other case.
127 GetElementPtrInst *GEP1 = dyn_cast<GetElementPtrInst>(Ptr1);
128 GetElementPtrInst *GEP2 = dyn_cast<GetElementPtrInst>(Ptr2);
129 if (!GEP1 || !GEP2 || GEP1->getOperand(0) != GEP2->getOperand(0))
130 return false;
132 // Skip any common indices and track the GEP types.
133 unsigned Idx = 1;
134 for (; Idx != GEP1->getNumOperands() && Idx != GEP2->getNumOperands(); ++Idx)
135 if (GEP1->getOperand(Idx) != GEP2->getOperand(Idx))
136 break;
138 bool VariableIdxFound = false;
139 int64_t Offset1 = GetOffsetFromIndex(GEP1, Idx, VariableIdxFound, TD);
140 int64_t Offset2 = GetOffsetFromIndex(GEP2, Idx, VariableIdxFound, TD);
141 if (VariableIdxFound) return false;
143 Offset = Offset2-Offset1;
144 return true;
148 /// MemsetRange - Represents a range of memset'd bytes with the ByteVal value.
149 /// This allows us to analyze stores like:
150 /// store 0 -> P+1
151 /// store 0 -> P+0
152 /// store 0 -> P+3
153 /// store 0 -> P+2
154 /// which sometimes happens with stores to arrays of structs etc. When we see
155 /// the first store, we make a range [1, 2). The second store extends the range
156 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
157 /// two ranges into [0, 3) which is memset'able.
158 namespace {
159 struct MemsetRange {
160 // Start/End - A semi range that describes the span that this range covers.
161 // The range is closed at the start and open at the end: [Start, End).
162 int64_t Start, End;
164 /// StartPtr - The getelementptr instruction that points to the start of the
165 /// range.
166 Value *StartPtr;
168 /// Alignment - The known alignment of the first store.
169 unsigned Alignment;
171 /// TheStores - The actual stores that make up this range.
172 SmallVector<StoreInst*, 16> TheStores;
174 bool isProfitableToUseMemset(const TargetData &TD) const;
177 } // end anon namespace
179 bool MemsetRange::isProfitableToUseMemset(const TargetData &TD) const {
180 // If we found more than 8 stores to merge or 64 bytes, use memset.
181 if (TheStores.size() >= 8 || End-Start >= 64) return true;
183 // Assume that the code generator is capable of merging pairs of stores
184 // together if it wants to.
185 if (TheStores.size() <= 2) return false;
187 // If we have fewer than 8 stores, it can still be worthwhile to do this.
188 // For example, merging 4 i8 stores into an i32 store is useful almost always.
189 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
190 // memset will be split into 2 32-bit stores anyway) and doing so can
191 // pessimize the llvm optimizer.
193 // Since we don't have perfect knowledge here, make some assumptions: assume
194 // the maximum GPR width is the same size as the pointer size and assume that
195 // this width can be stored. If so, check to see whether we will end up
196 // actually reducing the number of stores used.
197 unsigned Bytes = unsigned(End-Start);
198 unsigned NumPointerStores = Bytes/TD.getPointerSize();
200 // Assume the remaining bytes if any are done a byte at a time.
201 unsigned NumByteStores = Bytes - NumPointerStores*TD.getPointerSize();
203 // If we will reduce the # stores (according to this heuristic), do the
204 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
205 // etc.
206 return TheStores.size() > NumPointerStores+NumByteStores;
210 namespace {
211 class MemsetRanges {
212 /// Ranges - A sorted list of the memset ranges. We use std::list here
213 /// because each element is relatively large and expensive to copy.
214 std::list<MemsetRange> Ranges;
215 typedef std::list<MemsetRange>::iterator range_iterator;
216 TargetData &TD;
217 public:
218 MemsetRanges(TargetData &td) : TD(td) {}
220 typedef std::list<MemsetRange>::const_iterator const_iterator;
221 const_iterator begin() const { return Ranges.begin(); }
222 const_iterator end() const { return Ranges.end(); }
223 bool empty() const { return Ranges.empty(); }
225 void addStore(int64_t OffsetFromFirst, StoreInst *SI);
228 } // end anon namespace
231 /// addStore - Add a new store to the MemsetRanges data structure. This adds a
232 /// new range for the specified store at the specified offset, merging into
233 /// existing ranges as appropriate.
234 void MemsetRanges::addStore(int64_t Start, StoreInst *SI) {
235 int64_t End = Start+TD.getTypeStoreSize(SI->getOperand(0)->getType());
237 // Do a linear search of the ranges to see if this can be joined and/or to
238 // find the insertion point in the list. We keep the ranges sorted for
239 // simplicity here. This is a linear search of a linked list, which is ugly,
240 // however the number of ranges is limited, so this won't get crazy slow.
241 range_iterator I = Ranges.begin(), E = Ranges.end();
243 while (I != E && Start > I->End)
244 ++I;
246 // We now know that I == E, in which case we didn't find anything to merge
247 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
248 // to insert a new range. Handle this now.
249 if (I == E || End < I->Start) {
250 MemsetRange &R = *Ranges.insert(I, MemsetRange());
251 R.Start = Start;
252 R.End = End;
253 R.StartPtr = SI->getPointerOperand();
254 R.Alignment = SI->getAlignment();
255 R.TheStores.push_back(SI);
256 return;
259 // This store overlaps with I, add it.
260 I->TheStores.push_back(SI);
262 // At this point, we may have an interval that completely contains our store.
263 // If so, just add it to the interval and return.
264 if (I->Start <= Start && I->End >= End)
265 return;
267 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
268 // but is not entirely contained within the range.
270 // See if the range extends the start of the range. In this case, it couldn't
271 // possibly cause it to join the prior range, because otherwise we would have
272 // stopped on *it*.
273 if (Start < I->Start) {
274 I->Start = Start;
275 I->StartPtr = SI->getPointerOperand();
278 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
279 // is in or right at the end of I), and that End >= I->Start. Extend I out to
280 // End.
281 if (End > I->End) {
282 I->End = End;
283 range_iterator NextI = I;
284 while (++NextI != E && End >= NextI->Start) {
285 // Merge the range in.
286 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
287 if (NextI->End > I->End)
288 I->End = NextI->End;
289 Ranges.erase(NextI);
290 NextI = I;
295 //===----------------------------------------------------------------------===//
296 // MemCpyOpt Pass
297 //===----------------------------------------------------------------------===//
299 namespace {
300 class MemCpyOpt : public FunctionPass {
301 bool runOnFunction(Function &F);
302 public:
303 static char ID; // Pass identification, replacement for typeid
304 MemCpyOpt() : FunctionPass(&ID) {}
306 private:
307 // This transformation requires dominator postdominator info
308 virtual void getAnalysisUsage(AnalysisUsage &AU) const {
309 AU.setPreservesCFG();
310 AU.addRequired<DominatorTree>();
311 AU.addRequired<MemoryDependenceAnalysis>();
312 AU.addRequired<AliasAnalysis>();
313 AU.addPreserved<AliasAnalysis>();
314 AU.addPreserved<MemoryDependenceAnalysis>();
317 // Helper fuctions
318 bool processStore(StoreInst *SI, BasicBlock::iterator &BBI);
319 bool processMemCpy(MemCpyInst *M);
320 bool processMemMove(MemMoveInst *M);
321 bool performCallSlotOptzn(MemCpyInst *cpy, CallInst *C);
322 bool iterateOnFunction(Function &F);
325 char MemCpyOpt::ID = 0;
328 // createMemCpyOptPass - The public interface to this file...
329 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOpt(); }
331 static RegisterPass<MemCpyOpt> X("memcpyopt",
332 "MemCpy Optimization");
336 /// processStore - When GVN is scanning forward over instructions, we look for
337 /// some other patterns to fold away. In particular, this looks for stores to
338 /// neighboring locations of memory. If it sees enough consequtive ones
339 /// (currently 4) it attempts to merge them together into a memcpy/memset.
340 bool MemCpyOpt::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
341 if (SI->isVolatile()) return false;
343 LLVMContext &Context = SI->getContext();
345 // There are two cases that are interesting for this code to handle: memcpy
346 // and memset. Right now we only handle memset.
348 // Ensure that the value being stored is something that can be memset'able a
349 // byte at a time like "0" or "-1" or any width, as well as things like
350 // 0xA0A0A0A0 and 0.0.
351 Value *ByteVal = isBytewiseValue(SI->getOperand(0), Context);
352 if (!ByteVal)
353 return false;
355 TargetData *TD = getAnalysisIfAvailable<TargetData>();
356 if (!TD) return false;
357 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
358 Module *M = SI->getParent()->getParent()->getParent();
360 // Okay, so we now have a single store that can be splatable. Scan to find
361 // all subsequent stores of the same value to offset from the same pointer.
362 // Join these together into ranges, so we can decide whether contiguous blocks
363 // are stored.
364 MemsetRanges Ranges(*TD);
366 Value *StartPtr = SI->getPointerOperand();
368 BasicBlock::iterator BI = SI;
369 for (++BI; !isa<TerminatorInst>(BI); ++BI) {
370 if (isa<CallInst>(BI) || isa<InvokeInst>(BI)) {
371 // If the call is readnone, ignore it, otherwise bail out. We don't even
372 // allow readonly here because we don't want something like:
373 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
374 if (AA.getModRefBehavior(CallSite::get(BI)) ==
375 AliasAnalysis::DoesNotAccessMemory)
376 continue;
378 // TODO: If this is a memset, try to join it in.
380 break;
381 } else if (isa<VAArgInst>(BI) || isa<LoadInst>(BI))
382 break;
384 // If this is a non-store instruction it is fine, ignore it.
385 StoreInst *NextStore = dyn_cast<StoreInst>(BI);
386 if (NextStore == 0) continue;
388 // If this is a store, see if we can merge it in.
389 if (NextStore->isVolatile()) break;
391 // Check to see if this stored value is of the same byte-splattable value.
392 if (ByteVal != isBytewiseValue(NextStore->getOperand(0), Context))
393 break;
395 // Check to see if this store is to a constant offset from the start ptr.
396 int64_t Offset;
397 if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(), Offset, *TD))
398 break;
400 Ranges.addStore(Offset, NextStore);
403 // If we have no ranges, then we just had a single store with nothing that
404 // could be merged in. This is a very common case of course.
405 if (Ranges.empty())
406 return false;
408 // If we had at least one store that could be merged in, add the starting
409 // store as well. We try to avoid this unless there is at least something
410 // interesting as a small compile-time optimization.
411 Ranges.addStore(0, SI);
413 Function *MemSetF = 0;
415 // Now that we have full information about ranges, loop over the ranges and
416 // emit memset's for anything big enough to be worthwhile.
417 bool MadeChange = false;
418 for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
419 I != E; ++I) {
420 const MemsetRange &Range = *I;
422 if (Range.TheStores.size() == 1) continue;
424 // If it is profitable to lower this range to memset, do so now.
425 if (!Range.isProfitableToUseMemset(*TD))
426 continue;
428 // Otherwise, we do want to transform this! Create a new memset. We put
429 // the memset right before the first instruction that isn't part of this
430 // memset block. This ensure that the memset is dominated by any addressing
431 // instruction needed by the start of the block.
432 BasicBlock::iterator InsertPt = BI;
434 if (MemSetF == 0) {
435 const Type *Ty = Type::getInt64Ty(Context);
436 MemSetF = Intrinsic::getDeclaration(M, Intrinsic::memset, &Ty, 1);
439 // Get the starting pointer of the block.
440 StartPtr = Range.StartPtr;
442 // Cast the start ptr to be i8* as memset requires.
443 const Type *i8Ptr = PointerType::getUnqual(Type::getInt8Ty(Context));
444 if (StartPtr->getType() != i8Ptr)
445 StartPtr = new BitCastInst(StartPtr, i8Ptr, StartPtr->getName(),
446 InsertPt);
448 Value *Ops[] = {
449 StartPtr, ByteVal, // Start, value
450 // size
451 ConstantInt::get(Type::getInt64Ty(Context), Range.End-Range.Start),
452 // align
453 ConstantInt::get(Type::getInt32Ty(Context), Range.Alignment)
455 Value *C = CallInst::Create(MemSetF, Ops, Ops+4, "", InsertPt);
456 DEBUG(errs() << "Replace stores:\n";
457 for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
458 errs() << *Range.TheStores[i];
459 errs() << "With: " << *C); C=C;
461 // Don't invalidate the iterator
462 BBI = BI;
464 // Zap all the stores.
465 for (SmallVector<StoreInst*, 16>::const_iterator
466 SI = Range.TheStores.begin(),
467 SE = Range.TheStores.end(); SI != SE; ++SI)
468 (*SI)->eraseFromParent();
469 ++NumMemSetInfer;
470 MadeChange = true;
473 return MadeChange;
477 /// performCallSlotOptzn - takes a memcpy and a call that it depends on,
478 /// and checks for the possibility of a call slot optimization by having
479 /// the call write its result directly into the destination of the memcpy.
480 bool MemCpyOpt::performCallSlotOptzn(MemCpyInst *cpy, CallInst *C) {
481 // The general transformation to keep in mind is
483 // call @func(..., src, ...)
484 // memcpy(dest, src, ...)
486 // ->
488 // memcpy(dest, src, ...)
489 // call @func(..., dest, ...)
491 // Since moving the memcpy is technically awkward, we additionally check that
492 // src only holds uninitialized values at the moment of the call, meaning that
493 // the memcpy can be discarded rather than moved.
495 // Deliberately get the source and destination with bitcasts stripped away,
496 // because we'll need to do type comparisons based on the underlying type.
497 Value *cpyDest = cpy->getDest();
498 Value *cpySrc = cpy->getSource();
499 CallSite CS = CallSite::get(C);
501 // We need to be able to reason about the size of the memcpy, so we require
502 // that it be a constant.
503 ConstantInt *cpyLength = dyn_cast<ConstantInt>(cpy->getLength());
504 if (!cpyLength)
505 return false;
507 // Require that src be an alloca. This simplifies the reasoning considerably.
508 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
509 if (!srcAlloca)
510 return false;
512 // Check that all of src is copied to dest.
513 TargetData *TD = getAnalysisIfAvailable<TargetData>();
514 if (!TD) return false;
516 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
517 if (!srcArraySize)
518 return false;
520 uint64_t srcSize = TD->getTypeAllocSize(srcAlloca->getAllocatedType()) *
521 srcArraySize->getZExtValue();
523 if (cpyLength->getZExtValue() < srcSize)
524 return false;
526 // Check that accessing the first srcSize bytes of dest will not cause a
527 // trap. Otherwise the transform is invalid since it might cause a trap
528 // to occur earlier than it otherwise would.
529 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
530 // The destination is an alloca. Check it is larger than srcSize.
531 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
532 if (!destArraySize)
533 return false;
535 uint64_t destSize = TD->getTypeAllocSize(A->getAllocatedType()) *
536 destArraySize->getZExtValue();
538 if (destSize < srcSize)
539 return false;
540 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
541 // If the destination is an sret parameter then only accesses that are
542 // outside of the returned struct type can trap.
543 if (!A->hasStructRetAttr())
544 return false;
546 const Type *StructTy = cast<PointerType>(A->getType())->getElementType();
547 uint64_t destSize = TD->getTypeAllocSize(StructTy);
549 if (destSize < srcSize)
550 return false;
551 } else {
552 return false;
555 // Check that src is not accessed except via the call and the memcpy. This
556 // guarantees that it holds only undefined values when passed in (so the final
557 // memcpy can be dropped), that it is not read or written between the call and
558 // the memcpy, and that writing beyond the end of it is undefined.
559 SmallVector<User*, 8> srcUseList(srcAlloca->use_begin(),
560 srcAlloca->use_end());
561 while (!srcUseList.empty()) {
562 User *UI = srcUseList.back();
563 srcUseList.pop_back();
565 if (isa<BitCastInst>(UI)) {
566 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
567 I != E; ++I)
568 srcUseList.push_back(*I);
569 } else if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(UI)) {
570 if (G->hasAllZeroIndices())
571 for (User::use_iterator I = UI->use_begin(), E = UI->use_end();
572 I != E; ++I)
573 srcUseList.push_back(*I);
574 else
575 return false;
576 } else if (UI != C && UI != cpy) {
577 return false;
581 // Since we're changing the parameter to the callsite, we need to make sure
582 // that what would be the new parameter dominates the callsite.
583 DominatorTree &DT = getAnalysis<DominatorTree>();
584 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
585 if (!DT.dominates(cpyDestInst, C))
586 return false;
588 // In addition to knowing that the call does not access src in some
589 // unexpected manner, for example via a global, which we deduce from
590 // the use analysis, we also need to know that it does not sneakily
591 // access dest. We rely on AA to figure this out for us.
592 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
593 if (AA.getModRefInfo(C, cpy->getRawDest(), srcSize) !=
594 AliasAnalysis::NoModRef)
595 return false;
597 // All the checks have passed, so do the transformation.
598 bool changedArgument = false;
599 for (unsigned i = 0; i < CS.arg_size(); ++i)
600 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
601 if (cpySrc->getType() != cpyDest->getType())
602 cpyDest = CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
603 cpyDest->getName(), C);
604 changedArgument = true;
605 if (CS.getArgument(i)->getType() == cpyDest->getType())
606 CS.setArgument(i, cpyDest);
607 else
608 CS.setArgument(i, CastInst::CreatePointerCast(cpyDest,
609 CS.getArgument(i)->getType(), cpyDest->getName(), C));
612 if (!changedArgument)
613 return false;
615 // Drop any cached information about the call, because we may have changed
616 // its dependence information by changing its parameter.
617 MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
618 MD.removeInstruction(C);
620 // Remove the memcpy
621 MD.removeInstruction(cpy);
622 cpy->eraseFromParent();
623 NumMemCpyInstr++;
625 return true;
628 /// processMemCpy - perform simplication of memcpy's. If we have memcpy A which
629 /// copies X to Y, and memcpy B which copies Y to Z, then we can rewrite B to be
630 /// a memcpy from X to Z (or potentially a memmove, depending on circumstances).
631 /// This allows later passes to remove the first memcpy altogether.
632 bool MemCpyOpt::processMemCpy(MemCpyInst *M) {
633 MemoryDependenceAnalysis &MD = getAnalysis<MemoryDependenceAnalysis>();
635 // The are two possible optimizations we can do for memcpy:
636 // a) memcpy-memcpy xform which exposes redundance for DSE.
637 // b) call-memcpy xform for return slot optimization.
638 MemDepResult dep = MD.getDependency(M);
639 if (!dep.isClobber())
640 return false;
641 if (!isa<MemCpyInst>(dep.getInst())) {
642 if (CallInst *C = dyn_cast<CallInst>(dep.getInst()))
643 return performCallSlotOptzn(M, C);
644 return false;
647 MemCpyInst *MDep = cast<MemCpyInst>(dep.getInst());
649 // We can only transforms memcpy's where the dest of one is the source of the
650 // other
651 if (M->getSource() != MDep->getDest())
652 return false;
654 // Second, the length of the memcpy's must be the same, or the preceeding one
655 // must be larger than the following one.
656 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
657 ConstantInt *C2 = dyn_cast<ConstantInt>(M->getLength());
658 if (!C1 || !C2)
659 return false;
661 uint64_t DepSize = C1->getValue().getZExtValue();
662 uint64_t CpySize = C2->getValue().getZExtValue();
664 if (DepSize < CpySize)
665 return false;
667 // Finally, we have to make sure that the dest of the second does not
668 // alias the source of the first
669 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
670 if (AA.alias(M->getRawDest(), CpySize, MDep->getRawSource(), DepSize) !=
671 AliasAnalysis::NoAlias)
672 return false;
673 else if (AA.alias(M->getRawDest(), CpySize, M->getRawSource(), CpySize) !=
674 AliasAnalysis::NoAlias)
675 return false;
676 else if (AA.alias(MDep->getRawDest(), DepSize, MDep->getRawSource(), DepSize)
677 != AliasAnalysis::NoAlias)
678 return false;
680 // If all checks passed, then we can transform these memcpy's
681 const Type *Ty = M->getLength()->getType();
682 Function *MemCpyFun = Intrinsic::getDeclaration(
683 M->getParent()->getParent()->getParent(),
684 M->getIntrinsicID(), &Ty, 1);
686 Value *Args[4] = {
687 M->getRawDest(), MDep->getRawSource(), M->getLength(), M->getAlignmentCst()
690 CallInst *C = CallInst::Create(MemCpyFun, Args, Args+4, "", M);
693 // If C and M don't interfere, then this is a valid transformation. If they
694 // did, this would mean that the two sources overlap, which would be bad.
695 if (MD.getDependency(C) == dep) {
696 MD.removeInstruction(M);
697 M->eraseFromParent();
698 NumMemCpyInstr++;
699 return true;
702 // Otherwise, there was no point in doing this, so we remove the call we
703 // inserted and act like nothing happened.
704 MD.removeInstruction(C);
705 C->eraseFromParent();
706 return false;
709 /// processMemMove - Transforms memmove calls to memcpy calls when the src/dst
710 /// are guaranteed not to alias.
711 bool MemCpyOpt::processMemMove(MemMoveInst *M) {
712 AliasAnalysis &AA = getAnalysis<AliasAnalysis>();
714 // If the memmove is a constant size, use it for the alias query, this allows
715 // us to optimize things like: memmove(P, P+64, 64);
716 uint64_t MemMoveSize = ~0ULL;
717 if (ConstantInt *Len = dyn_cast<ConstantInt>(M->getLength()))
718 MemMoveSize = Len->getZExtValue();
720 // See if the pointers alias.
721 if (AA.alias(M->getRawDest(), MemMoveSize, M->getRawSource(), MemMoveSize) !=
722 AliasAnalysis::NoAlias)
723 return false;
725 DEBUG(errs() << "MemCpyOpt: Optimizing memmove -> memcpy: " << *M << "\n");
727 // If not, then we know we can transform this.
728 Module *Mod = M->getParent()->getParent()->getParent();
729 const Type *Ty = M->getLength()->getType();
730 M->setOperand(0, Intrinsic::getDeclaration(Mod, Intrinsic::memcpy, &Ty, 1));
732 // MemDep may have over conservative information about this instruction, just
733 // conservatively flush it from the cache.
734 getAnalysis<MemoryDependenceAnalysis>().removeInstruction(M);
736 ++NumMoveToCpy;
737 return true;
741 // MemCpyOpt::iterateOnFunction - Executes one iteration of GVN.
742 bool MemCpyOpt::iterateOnFunction(Function &F) {
743 bool MadeChange = false;
745 // Walk all instruction in the function.
746 for (Function::iterator BB = F.begin(), BBE = F.end(); BB != BBE; ++BB) {
747 for (BasicBlock::iterator BI = BB->begin(), BE = BB->end();
748 BI != BE;) {
749 // Avoid invalidating the iterator.
750 Instruction *I = BI++;
752 if (StoreInst *SI = dyn_cast<StoreInst>(I))
753 MadeChange |= processStore(SI, BI);
754 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
755 MadeChange |= processMemCpy(M);
756 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I)) {
757 if (processMemMove(M)) {
758 --BI; // Reprocess the new memcpy.
759 MadeChange = true;
765 return MadeChange;
768 // MemCpyOpt::runOnFunction - This is the main transformation entry point for a
769 // function.
771 bool MemCpyOpt::runOnFunction(Function &F) {
772 bool MadeChange = false;
773 while (1) {
774 if (!iterateOnFunction(F))
775 break;
776 MadeChange = true;
779 return MadeChange;