[InstCombine] Signed saturation patterns
[llvm-core.git] / lib / Transforms / Scalar / MemCpyOptimizer.cpp
blob2364748efb0579a06596dc3a5e47d24f6c3c4134
1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
58 #include <algorithm>
59 #include <cassert>
60 #include <cstdint>
61 #include <utility>
63 using namespace llvm;
65 #define DEBUG_TYPE "memcpyopt"
67 STATISTIC(NumMemCpyInstr, "Number of memcpy instructions deleted");
68 STATISTIC(NumMemSetInfer, "Number of memsets inferred");
69 STATISTIC(NumMoveToCpy, "Number of memmoves converted to memcpy");
70 STATISTIC(NumCpyToSet, "Number of memcpys converted to memset");
72 namespace {
74 /// Represents a range of memset'd bytes with the ByteVal value.
75 /// This allows us to analyze stores like:
76 /// store 0 -> P+1
77 /// store 0 -> P+0
78 /// store 0 -> P+3
79 /// store 0 -> P+2
80 /// which sometimes happens with stores to arrays of structs etc. When we see
81 /// the first store, we make a range [1, 2). The second store extends the range
82 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
83 /// two ranges into [0, 3) which is memset'able.
84 struct MemsetRange {
85 // Start/End - A semi range that describes the span that this range covers.
86 // The range is closed at the start and open at the end: [Start, End).
87 int64_t Start, End;
89 /// StartPtr - The getelementptr instruction that points to the start of the
90 /// range.
91 Value *StartPtr;
93 /// Alignment - The known alignment of the first store.
94 unsigned Alignment;
96 /// TheStores - The actual stores that make up this range.
97 SmallVector<Instruction*, 16> TheStores;
99 bool isProfitableToUseMemset(const DataLayout &DL) const;
102 } // end anonymous namespace
104 bool MemsetRange::isProfitableToUseMemset(const DataLayout &DL) const {
105 // If we found more than 4 stores to merge or 16 bytes, use memset.
106 if (TheStores.size() >= 4 || End-Start >= 16) return true;
108 // If there is nothing to merge, don't do anything.
109 if (TheStores.size() < 2) return false;
111 // If any of the stores are a memset, then it is always good to extend the
112 // memset.
113 for (Instruction *SI : TheStores)
114 if (!isa<StoreInst>(SI))
115 return true;
117 // Assume that the code generator is capable of merging pairs of stores
118 // together if it wants to.
119 if (TheStores.size() == 2) return false;
121 // If we have fewer than 8 stores, it can still be worthwhile to do this.
122 // For example, merging 4 i8 stores into an i32 store is useful almost always.
123 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
124 // memset will be split into 2 32-bit stores anyway) and doing so can
125 // pessimize the llvm optimizer.
127 // Since we don't have perfect knowledge here, make some assumptions: assume
128 // the maximum GPR width is the same size as the largest legal integer
129 // size. If so, check to see whether we will end up actually reducing the
130 // number of stores used.
131 unsigned Bytes = unsigned(End-Start);
132 unsigned MaxIntSize = DL.getLargestLegalIntTypeSizeInBits() / 8;
133 if (MaxIntSize == 0)
134 MaxIntSize = 1;
135 unsigned NumPointerStores = Bytes / MaxIntSize;
137 // Assume the remaining bytes if any are done a byte at a time.
138 unsigned NumByteStores = Bytes % MaxIntSize;
140 // If we will reduce the # stores (according to this heuristic), do the
141 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
142 // etc.
143 return TheStores.size() > NumPointerStores+NumByteStores;
146 namespace {
148 class MemsetRanges {
149 using range_iterator = SmallVectorImpl<MemsetRange>::iterator;
151 /// A sorted list of the memset ranges.
152 SmallVector<MemsetRange, 8> Ranges;
154 const DataLayout &DL;
156 public:
157 MemsetRanges(const DataLayout &DL) : DL(DL) {}
159 using const_iterator = SmallVectorImpl<MemsetRange>::const_iterator;
161 const_iterator begin() const { return Ranges.begin(); }
162 const_iterator end() const { return Ranges.end(); }
163 bool empty() const { return Ranges.empty(); }
165 void addInst(int64_t OffsetFromFirst, Instruction *Inst) {
166 if (StoreInst *SI = dyn_cast<StoreInst>(Inst))
167 addStore(OffsetFromFirst, SI);
168 else
169 addMemSet(OffsetFromFirst, cast<MemSetInst>(Inst));
172 void addStore(int64_t OffsetFromFirst, StoreInst *SI) {
173 int64_t StoreSize = DL.getTypeStoreSize(SI->getOperand(0)->getType());
175 addRange(OffsetFromFirst, StoreSize,
176 SI->getPointerOperand(), SI->getAlignment(), SI);
179 void addMemSet(int64_t OffsetFromFirst, MemSetInst *MSI) {
180 int64_t Size = cast<ConstantInt>(MSI->getLength())->getZExtValue();
181 addRange(OffsetFromFirst, Size, MSI->getDest(), MSI->getDestAlignment(), MSI);
184 void addRange(int64_t Start, int64_t Size, Value *Ptr,
185 unsigned Alignment, Instruction *Inst);
188 } // end anonymous namespace
190 /// Add a new store to the MemsetRanges data structure. This adds a
191 /// new range for the specified store at the specified offset, merging into
192 /// existing ranges as appropriate.
193 void MemsetRanges::addRange(int64_t Start, int64_t Size, Value *Ptr,
194 unsigned Alignment, Instruction *Inst) {
195 int64_t End = Start+Size;
197 range_iterator I = partition_point(
198 Ranges, [=](const MemsetRange &O) { return O.End < Start; });
200 // We now know that I == E, in which case we didn't find anything to merge
201 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
202 // to insert a new range. Handle this now.
203 if (I == Ranges.end() || End < I->Start) {
204 MemsetRange &R = *Ranges.insert(I, MemsetRange());
205 R.Start = Start;
206 R.End = End;
207 R.StartPtr = Ptr;
208 R.Alignment = Alignment;
209 R.TheStores.push_back(Inst);
210 return;
213 // This store overlaps with I, add it.
214 I->TheStores.push_back(Inst);
216 // At this point, we may have an interval that completely contains our store.
217 // If so, just add it to the interval and return.
218 if (I->Start <= Start && I->End >= End)
219 return;
221 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
222 // but is not entirely contained within the range.
224 // See if the range extends the start of the range. In this case, it couldn't
225 // possibly cause it to join the prior range, because otherwise we would have
226 // stopped on *it*.
227 if (Start < I->Start) {
228 I->Start = Start;
229 I->StartPtr = Ptr;
230 I->Alignment = Alignment;
233 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
234 // is in or right at the end of I), and that End >= I->Start. Extend I out to
235 // End.
236 if (End > I->End) {
237 I->End = End;
238 range_iterator NextI = I;
239 while (++NextI != Ranges.end() && End >= NextI->Start) {
240 // Merge the range in.
241 I->TheStores.append(NextI->TheStores.begin(), NextI->TheStores.end());
242 if (NextI->End > I->End)
243 I->End = NextI->End;
244 Ranges.erase(NextI);
245 NextI = I;
250 //===----------------------------------------------------------------------===//
251 // MemCpyOptLegacyPass Pass
252 //===----------------------------------------------------------------------===//
254 namespace {
256 class MemCpyOptLegacyPass : public FunctionPass {
257 MemCpyOptPass Impl;
259 public:
260 static char ID; // Pass identification, replacement for typeid
262 MemCpyOptLegacyPass() : FunctionPass(ID) {
263 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
266 bool runOnFunction(Function &F) override;
268 private:
269 // This transformation requires dominator postdominator info
270 void getAnalysisUsage(AnalysisUsage &AU) const override {
271 AU.setPreservesCFG();
272 AU.addRequired<AssumptionCacheTracker>();
273 AU.addRequired<DominatorTreeWrapperPass>();
274 AU.addRequired<MemoryDependenceWrapperPass>();
275 AU.addRequired<AAResultsWrapperPass>();
276 AU.addRequired<TargetLibraryInfoWrapperPass>();
277 AU.addPreserved<GlobalsAAWrapperPass>();
278 AU.addPreserved<MemoryDependenceWrapperPass>();
282 } // end anonymous namespace
284 char MemCpyOptLegacyPass::ID = 0;
286 /// The public interface to this file...
287 FunctionPass *llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
289 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
290 false, false)
291 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
292 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
293 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass)
294 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
295 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass)
296 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass)
297 INITIALIZE_PASS_END(MemCpyOptLegacyPass, "memcpyopt", "MemCpy Optimization",
298 false, false)
300 /// When scanning forward over instructions, we look for some other patterns to
301 /// fold away. In particular, this looks for stores to neighboring locations of
302 /// memory. If it sees enough consecutive ones, it attempts to merge them
303 /// together into a memcpy/memset.
304 Instruction *MemCpyOptPass::tryMergingIntoMemset(Instruction *StartInst,
305 Value *StartPtr,
306 Value *ByteVal) {
307 const DataLayout &DL = StartInst->getModule()->getDataLayout();
309 // Okay, so we now have a single store that can be splatable. Scan to find
310 // all subsequent stores of the same value to offset from the same pointer.
311 // Join these together into ranges, so we can decide whether contiguous blocks
312 // are stored.
313 MemsetRanges Ranges(DL);
315 BasicBlock::iterator BI(StartInst);
316 for (++BI; !BI->isTerminator(); ++BI) {
317 if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
318 // If the instruction is readnone, ignore it, otherwise bail out. We
319 // don't even allow readonly here because we don't want something like:
320 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
321 if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
322 break;
323 continue;
326 if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
327 // If this is a store, see if we can merge it in.
328 if (!NextStore->isSimple()) break;
330 // Check to see if this stored value is of the same byte-splattable value.
331 Value *StoredByte = isBytewiseValue(NextStore->getOperand(0), DL);
332 if (isa<UndefValue>(ByteVal) && StoredByte)
333 ByteVal = StoredByte;
334 if (ByteVal != StoredByte)
335 break;
337 // Check to see if this store is to a constant offset from the start ptr.
338 Optional<int64_t> Offset =
339 isPointerOffset(StartPtr, NextStore->getPointerOperand(), DL);
340 if (!Offset)
341 break;
343 Ranges.addStore(*Offset, NextStore);
344 } else {
345 MemSetInst *MSI = cast<MemSetInst>(BI);
347 if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
348 !isa<ConstantInt>(MSI->getLength()))
349 break;
351 // Check to see if this store is to a constant offset from the start ptr.
352 Optional<int64_t> Offset = isPointerOffset(StartPtr, MSI->getDest(), DL);
353 if (!Offset)
354 break;
356 Ranges.addMemSet(*Offset, MSI);
360 // If we have no ranges, then we just had a single store with nothing that
361 // could be merged in. This is a very common case of course.
362 if (Ranges.empty())
363 return nullptr;
365 // If we had at least one store that could be merged in, add the starting
366 // store as well. We try to avoid this unless there is at least something
367 // interesting as a small compile-time optimization.
368 Ranges.addInst(0, StartInst);
370 // If we create any memsets, we put it right before the first instruction that
371 // isn't part of the memset block. This ensure that the memset is dominated
372 // by any addressing instruction needed by the start of the block.
373 IRBuilder<> Builder(&*BI);
375 // Now that we have full information about ranges, loop over the ranges and
376 // emit memset's for anything big enough to be worthwhile.
377 Instruction *AMemSet = nullptr;
378 for (const MemsetRange &Range : Ranges) {
379 if (Range.TheStores.size() == 1) continue;
381 // If it is profitable to lower this range to memset, do so now.
382 if (!Range.isProfitableToUseMemset(DL))
383 continue;
385 // Otherwise, we do want to transform this! Create a new memset.
386 // Get the starting pointer of the block.
387 StartPtr = Range.StartPtr;
389 // Determine alignment
390 unsigned Alignment = Range.Alignment;
391 if (Alignment == 0) {
392 Type *EltType =
393 cast<PointerType>(StartPtr->getType())->getElementType();
394 Alignment = DL.getABITypeAlignment(EltType);
397 AMemSet =
398 Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);
400 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction *SI
401 : Range.TheStores) dbgs()
402 << *SI << '\n';
403 dbgs() << "With: " << *AMemSet << '\n');
405 if (!Range.TheStores.empty())
406 AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());
408 // Zap all the stores.
409 for (Instruction *SI : Range.TheStores) {
410 MD->removeInstruction(SI);
411 SI->eraseFromParent();
413 ++NumMemSetInfer;
416 return AMemSet;
419 static unsigned findStoreAlignment(const DataLayout &DL, const StoreInst *SI) {
420 unsigned StoreAlign = SI->getAlignment();
421 if (!StoreAlign)
422 StoreAlign = DL.getABITypeAlignment(SI->getOperand(0)->getType());
423 return StoreAlign;
426 static unsigned findLoadAlignment(const DataLayout &DL, const LoadInst *LI) {
427 unsigned LoadAlign = LI->getAlignment();
428 if (!LoadAlign)
429 LoadAlign = DL.getABITypeAlignment(LI->getType());
430 return LoadAlign;
433 static unsigned findCommonAlignment(const DataLayout &DL, const StoreInst *SI,
434 const LoadInst *LI) {
435 unsigned StoreAlign = findStoreAlignment(DL, SI);
436 unsigned LoadAlign = findLoadAlignment(DL, LI);
437 return MinAlign(StoreAlign, LoadAlign);
440 // This method try to lift a store instruction before position P.
441 // It will lift the store and its argument + that anything that
442 // may alias with these.
443 // The method returns true if it was successful.
444 static bool moveUp(AliasAnalysis &AA, StoreInst *SI, Instruction *P,
445 const LoadInst *LI) {
446 // If the store alias this position, early bail out.
447 MemoryLocation StoreLoc = MemoryLocation::get(SI);
448 if (isModOrRefSet(AA.getModRefInfo(P, StoreLoc)))
449 return false;
451 // Keep track of the arguments of all instruction we plan to lift
452 // so we can make sure to lift them as well if appropriate.
453 DenseSet<Instruction*> Args;
454 if (auto *Ptr = dyn_cast<Instruction>(SI->getPointerOperand()))
455 if (Ptr->getParent() == SI->getParent())
456 Args.insert(Ptr);
458 // Instruction to lift before P.
459 SmallVector<Instruction*, 8> ToLift;
461 // Memory locations of lifted instructions.
462 SmallVector<MemoryLocation, 8> MemLocs{StoreLoc};
464 // Lifted calls.
465 SmallVector<const CallBase *, 8> Calls;
467 const MemoryLocation LoadLoc = MemoryLocation::get(LI);
469 for (auto I = --SI->getIterator(), E = P->getIterator(); I != E; --I) {
470 auto *C = &*I;
472 bool MayAlias = isModOrRefSet(AA.getModRefInfo(C, None));
474 bool NeedLift = false;
475 if (Args.erase(C))
476 NeedLift = true;
477 else if (MayAlias) {
478 NeedLift = llvm::any_of(MemLocs, [C, &AA](const MemoryLocation &ML) {
479 return isModOrRefSet(AA.getModRefInfo(C, ML));
482 if (!NeedLift)
483 NeedLift = llvm::any_of(Calls, [C, &AA](const CallBase *Call) {
484 return isModOrRefSet(AA.getModRefInfo(C, Call));
488 if (!NeedLift)
489 continue;
491 if (MayAlias) {
492 // Since LI is implicitly moved downwards past the lifted instructions,
493 // none of them may modify its source.
494 if (isModSet(AA.getModRefInfo(C, LoadLoc)))
495 return false;
496 else if (const auto *Call = dyn_cast<CallBase>(C)) {
497 // If we can't lift this before P, it's game over.
498 if (isModOrRefSet(AA.getModRefInfo(P, Call)))
499 return false;
501 Calls.push_back(Call);
502 } else if (isa<LoadInst>(C) || isa<StoreInst>(C) || isa<VAArgInst>(C)) {
503 // If we can't lift this before P, it's game over.
504 auto ML = MemoryLocation::get(C);
505 if (isModOrRefSet(AA.getModRefInfo(P, ML)))
506 return false;
508 MemLocs.push_back(ML);
509 } else
510 // We don't know how to lift this instruction.
511 return false;
514 ToLift.push_back(C);
515 for (unsigned k = 0, e = C->getNumOperands(); k != e; ++k)
516 if (auto *A = dyn_cast<Instruction>(C->getOperand(k))) {
517 if (A->getParent() == SI->getParent()) {
518 // Cannot hoist user of P above P
519 if(A == P) return false;
520 Args.insert(A);
525 // We made it, we need to lift
526 for (auto *I : llvm::reverse(ToLift)) {
527 LLVM_DEBUG(dbgs() << "Lifting " << *I << " before " << *P << "\n");
528 I->moveBefore(P);
531 return true;
534 bool MemCpyOptPass::processStore(StoreInst *SI, BasicBlock::iterator &BBI) {
535 if (!SI->isSimple()) return false;
537 // Avoid merging nontemporal stores since the resulting
538 // memcpy/memset would not be able to preserve the nontemporal hint.
539 // In theory we could teach how to propagate the !nontemporal metadata to
540 // memset calls. However, that change would force the backend to
541 // conservatively expand !nontemporal memset calls back to sequences of
542 // store instructions (effectively undoing the merging).
543 if (SI->getMetadata(LLVMContext::MD_nontemporal))
544 return false;
546 const DataLayout &DL = SI->getModule()->getDataLayout();
548 // Load to store forwarding can be interpreted as memcpy.
549 if (LoadInst *LI = dyn_cast<LoadInst>(SI->getOperand(0))) {
550 if (LI->isSimple() && LI->hasOneUse() &&
551 LI->getParent() == SI->getParent()) {
553 auto *T = LI->getType();
554 if (T->isAggregateType()) {
555 AliasAnalysis &AA = LookupAliasAnalysis();
556 MemoryLocation LoadLoc = MemoryLocation::get(LI);
558 // We use alias analysis to check if an instruction may store to
559 // the memory we load from in between the load and the store. If
560 // such an instruction is found, we try to promote there instead
561 // of at the store position.
562 Instruction *P = SI;
563 for (auto &I : make_range(++LI->getIterator(), SI->getIterator())) {
564 if (isModSet(AA.getModRefInfo(&I, LoadLoc))) {
565 P = &I;
566 break;
570 // We found an instruction that may write to the loaded memory.
571 // We can try to promote at this position instead of the store
572 // position if nothing alias the store memory after this and the store
573 // destination is not in the range.
574 if (P && P != SI) {
575 if (!moveUp(AA, SI, P, LI))
576 P = nullptr;
579 // If a valid insertion position is found, then we can promote
580 // the load/store pair to a memcpy.
581 if (P) {
582 // If we load from memory that may alias the memory we store to,
583 // memmove must be used to preserve semantic. If not, memcpy can
584 // be used.
585 bool UseMemMove = false;
586 if (!AA.isNoAlias(MemoryLocation::get(SI), LoadLoc))
587 UseMemMove = true;
589 uint64_t Size = DL.getTypeStoreSize(T);
591 IRBuilder<> Builder(P);
592 Instruction *M;
593 if (UseMemMove)
594 M = Builder.CreateMemMove(
595 SI->getPointerOperand(), findStoreAlignment(DL, SI),
596 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
597 else
598 M = Builder.CreateMemCpy(
599 SI->getPointerOperand(), findStoreAlignment(DL, SI),
600 LI->getPointerOperand(), findLoadAlignment(DL, LI), Size);
602 LLVM_DEBUG(dbgs() << "Promoting " << *LI << " to " << *SI << " => "
603 << *M << "\n");
605 MD->removeInstruction(SI);
606 SI->eraseFromParent();
607 MD->removeInstruction(LI);
608 LI->eraseFromParent();
609 ++NumMemCpyInstr;
611 // Make sure we do not invalidate the iterator.
612 BBI = M->getIterator();
613 return true;
617 // Detect cases where we're performing call slot forwarding, but
618 // happen to be using a load-store pair to implement it, rather than
619 // a memcpy.
620 MemDepResult ldep = MD->getDependency(LI);
621 CallInst *C = nullptr;
622 if (ldep.isClobber() && !isa<MemCpyInst>(ldep.getInst()))
623 C = dyn_cast<CallInst>(ldep.getInst());
625 if (C) {
626 // Check that nothing touches the dest of the "copy" between
627 // the call and the store.
628 Value *CpyDest = SI->getPointerOperand()->stripPointerCasts();
629 bool CpyDestIsLocal = isa<AllocaInst>(CpyDest);
630 AliasAnalysis &AA = LookupAliasAnalysis();
631 MemoryLocation StoreLoc = MemoryLocation::get(SI);
632 for (BasicBlock::iterator I = --SI->getIterator(), E = C->getIterator();
633 I != E; --I) {
634 if (isModOrRefSet(AA.getModRefInfo(&*I, StoreLoc))) {
635 C = nullptr;
636 break;
638 // The store to dest may never happen if an exception can be thrown
639 // between the load and the store.
640 if (I->mayThrow() && !CpyDestIsLocal) {
641 C = nullptr;
642 break;
647 if (C) {
648 bool changed = performCallSlotOptzn(
649 LI, SI->getPointerOperand()->stripPointerCasts(),
650 LI->getPointerOperand()->stripPointerCasts(),
651 DL.getTypeStoreSize(SI->getOperand(0)->getType()),
652 findCommonAlignment(DL, SI, LI), C);
653 if (changed) {
654 MD->removeInstruction(SI);
655 SI->eraseFromParent();
656 MD->removeInstruction(LI);
657 LI->eraseFromParent();
658 ++NumMemCpyInstr;
659 return true;
665 // There are two cases that are interesting for this code to handle: memcpy
666 // and memset. Right now we only handle memset.
668 // Ensure that the value being stored is something that can be memset'able a
669 // byte at a time like "0" or "-1" or any width, as well as things like
670 // 0xA0A0A0A0 and 0.0.
671 auto *V = SI->getOperand(0);
672 if (Value *ByteVal = isBytewiseValue(V, DL)) {
673 if (Instruction *I = tryMergingIntoMemset(SI, SI->getPointerOperand(),
674 ByteVal)) {
675 BBI = I->getIterator(); // Don't invalidate iterator.
676 return true;
679 // If we have an aggregate, we try to promote it to memset regardless
680 // of opportunity for merging as it can expose optimization opportunities
681 // in subsequent passes.
682 auto *T = V->getType();
683 if (T->isAggregateType()) {
684 uint64_t Size = DL.getTypeStoreSize(T);
685 unsigned Align = SI->getAlignment();
686 if (!Align)
687 Align = DL.getABITypeAlignment(T);
688 IRBuilder<> Builder(SI);
689 auto *M =
690 Builder.CreateMemSet(SI->getPointerOperand(), ByteVal, Size, Align);
692 LLVM_DEBUG(dbgs() << "Promoting " << *SI << " to " << *M << "\n");
694 MD->removeInstruction(SI);
695 SI->eraseFromParent();
696 NumMemSetInfer++;
698 // Make sure we do not invalidate the iterator.
699 BBI = M->getIterator();
700 return true;
704 return false;
707 bool MemCpyOptPass::processMemSet(MemSetInst *MSI, BasicBlock::iterator &BBI) {
708 // See if there is another memset or store neighboring this memset which
709 // allows us to widen out the memset to do a single larger store.
710 if (isa<ConstantInt>(MSI->getLength()) && !MSI->isVolatile())
711 if (Instruction *I = tryMergingIntoMemset(MSI, MSI->getDest(),
712 MSI->getValue())) {
713 BBI = I->getIterator(); // Don't invalidate iterator.
714 return true;
716 return false;
719 /// Takes a memcpy and a call that it depends on,
720 /// and checks for the possibility of a call slot optimization by having
721 /// the call write its result directly into the destination of the memcpy.
722 bool MemCpyOptPass::performCallSlotOptzn(Instruction *cpy, Value *cpyDest,
723 Value *cpySrc, uint64_t cpyLen,
724 unsigned cpyAlign, CallInst *C) {
725 // The general transformation to keep in mind is
727 // call @func(..., src, ...)
728 // memcpy(dest, src, ...)
730 // ->
732 // memcpy(dest, src, ...)
733 // call @func(..., dest, ...)
735 // Since moving the memcpy is technically awkward, we additionally check that
736 // src only holds uninitialized values at the moment of the call, meaning that
737 // the memcpy can be discarded rather than moved.
739 // Lifetime marks shouldn't be operated on.
740 if (Function *F = C->getCalledFunction())
741 if (F->isIntrinsic() && F->getIntrinsicID() == Intrinsic::lifetime_start)
742 return false;
744 // Deliberately get the source and destination with bitcasts stripped away,
745 // because we'll need to do type comparisons based on the underlying type.
746 CallSite CS(C);
748 // Require that src be an alloca. This simplifies the reasoning considerably.
749 AllocaInst *srcAlloca = dyn_cast<AllocaInst>(cpySrc);
750 if (!srcAlloca)
751 return false;
753 ConstantInt *srcArraySize = dyn_cast<ConstantInt>(srcAlloca->getArraySize());
754 if (!srcArraySize)
755 return false;
757 const DataLayout &DL = cpy->getModule()->getDataLayout();
758 uint64_t srcSize = DL.getTypeAllocSize(srcAlloca->getAllocatedType()) *
759 srcArraySize->getZExtValue();
761 if (cpyLen < srcSize)
762 return false;
764 // Check that accessing the first srcSize bytes of dest will not cause a
765 // trap. Otherwise the transform is invalid since it might cause a trap
766 // to occur earlier than it otherwise would.
767 if (AllocaInst *A = dyn_cast<AllocaInst>(cpyDest)) {
768 // The destination is an alloca. Check it is larger than srcSize.
769 ConstantInt *destArraySize = dyn_cast<ConstantInt>(A->getArraySize());
770 if (!destArraySize)
771 return false;
773 uint64_t destSize = DL.getTypeAllocSize(A->getAllocatedType()) *
774 destArraySize->getZExtValue();
776 if (destSize < srcSize)
777 return false;
778 } else if (Argument *A = dyn_cast<Argument>(cpyDest)) {
779 // The store to dest may never happen if the call can throw.
780 if (C->mayThrow())
781 return false;
783 if (A->getDereferenceableBytes() < srcSize) {
784 // If the destination is an sret parameter then only accesses that are
785 // outside of the returned struct type can trap.
786 if (!A->hasStructRetAttr())
787 return false;
789 Type *StructTy = cast<PointerType>(A->getType())->getElementType();
790 if (!StructTy->isSized()) {
791 // The call may never return and hence the copy-instruction may never
792 // be executed, and therefore it's not safe to say "the destination
793 // has at least <cpyLen> bytes, as implied by the copy-instruction",
794 return false;
797 uint64_t destSize = DL.getTypeAllocSize(StructTy);
798 if (destSize < srcSize)
799 return false;
801 } else {
802 return false;
805 // Check that dest points to memory that is at least as aligned as src.
806 unsigned srcAlign = srcAlloca->getAlignment();
807 if (!srcAlign)
808 srcAlign = DL.getABITypeAlignment(srcAlloca->getAllocatedType());
809 bool isDestSufficientlyAligned = srcAlign <= cpyAlign;
810 // If dest is not aligned enough and we can't increase its alignment then
811 // bail out.
812 if (!isDestSufficientlyAligned && !isa<AllocaInst>(cpyDest))
813 return false;
815 // Check that src is not accessed except via the call and the memcpy. This
816 // guarantees that it holds only undefined values when passed in (so the final
817 // memcpy can be dropped), that it is not read or written between the call and
818 // the memcpy, and that writing beyond the end of it is undefined.
819 SmallVector<User*, 8> srcUseList(srcAlloca->user_begin(),
820 srcAlloca->user_end());
821 while (!srcUseList.empty()) {
822 User *U = srcUseList.pop_back_val();
824 if (isa<BitCastInst>(U) || isa<AddrSpaceCastInst>(U)) {
825 for (User *UU : U->users())
826 srcUseList.push_back(UU);
827 continue;
829 if (GetElementPtrInst *G = dyn_cast<GetElementPtrInst>(U)) {
830 if (!G->hasAllZeroIndices())
831 return false;
833 for (User *UU : U->users())
834 srcUseList.push_back(UU);
835 continue;
837 if (const IntrinsicInst *IT = dyn_cast<IntrinsicInst>(U))
838 if (IT->isLifetimeStartOrEnd())
839 continue;
841 if (U != C && U != cpy)
842 return false;
845 // Check that src isn't captured by the called function since the
846 // transformation can cause aliasing issues in that case.
847 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
848 if (CS.getArgument(i) == cpySrc && !CS.doesNotCapture(i))
849 return false;
851 // Since we're changing the parameter to the callsite, we need to make sure
852 // that what would be the new parameter dominates the callsite.
853 DominatorTree &DT = LookupDomTree();
854 if (Instruction *cpyDestInst = dyn_cast<Instruction>(cpyDest))
855 if (!DT.dominates(cpyDestInst, C))
856 return false;
858 // In addition to knowing that the call does not access src in some
859 // unexpected manner, for example via a global, which we deduce from
860 // the use analysis, we also need to know that it does not sneakily
861 // access dest. We rely on AA to figure this out for us.
862 AliasAnalysis &AA = LookupAliasAnalysis();
863 ModRefInfo MR = AA.getModRefInfo(C, cpyDest, LocationSize::precise(srcSize));
864 // If necessary, perform additional analysis.
865 if (isModOrRefSet(MR))
866 MR = AA.callCapturesBefore(C, cpyDest, LocationSize::precise(srcSize), &DT);
867 if (isModOrRefSet(MR))
868 return false;
870 // We can't create address space casts here because we don't know if they're
871 // safe for the target.
872 if (cpySrc->getType()->getPointerAddressSpace() !=
873 cpyDest->getType()->getPointerAddressSpace())
874 return false;
875 for (unsigned i = 0; i < CS.arg_size(); ++i)
876 if (CS.getArgument(i)->stripPointerCasts() == cpySrc &&
877 cpySrc->getType()->getPointerAddressSpace() !=
878 CS.getArgument(i)->getType()->getPointerAddressSpace())
879 return false;
881 // All the checks have passed, so do the transformation.
882 bool changedArgument = false;
883 for (unsigned i = 0; i < CS.arg_size(); ++i)
884 if (CS.getArgument(i)->stripPointerCasts() == cpySrc) {
885 Value *Dest = cpySrc->getType() == cpyDest->getType() ? cpyDest
886 : CastInst::CreatePointerCast(cpyDest, cpySrc->getType(),
887 cpyDest->getName(), C);
888 changedArgument = true;
889 if (CS.getArgument(i)->getType() == Dest->getType())
890 CS.setArgument(i, Dest);
891 else
892 CS.setArgument(i, CastInst::CreatePointerCast(Dest,
893 CS.getArgument(i)->getType(), Dest->getName(), C));
896 if (!changedArgument)
897 return false;
899 // If the destination wasn't sufficiently aligned then increase its alignment.
900 if (!isDestSufficientlyAligned) {
901 assert(isa<AllocaInst>(cpyDest) && "Can only increase alloca alignment!");
902 cast<AllocaInst>(cpyDest)->setAlignment(MaybeAlign(srcAlign));
905 // Drop any cached information about the call, because we may have changed
906 // its dependence information by changing its parameter.
907 MD->removeInstruction(C);
909 // Update AA metadata
910 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
911 // handled here, but combineMetadata doesn't support them yet
912 unsigned KnownIDs[] = {LLVMContext::MD_tbaa, LLVMContext::MD_alias_scope,
913 LLVMContext::MD_noalias,
914 LLVMContext::MD_invariant_group,
915 LLVMContext::MD_access_group};
916 combineMetadata(C, cpy, KnownIDs, true);
918 // Remove the memcpy.
919 MD->removeInstruction(cpy);
920 ++NumMemCpyInstr;
922 return true;
925 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
926 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
927 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst *M,
928 MemCpyInst *MDep) {
929 // We can only transforms memcpy's where the dest of one is the source of the
930 // other.
931 if (M->getSource() != MDep->getDest() || MDep->isVolatile())
932 return false;
934 // If dep instruction is reading from our current input, then it is a noop
935 // transfer and substituting the input won't change this instruction. Just
936 // ignore the input and let someone else zap MDep. This handles cases like:
937 // memcpy(a <- a)
938 // memcpy(b <- a)
939 if (M->getSource() == MDep->getSource())
940 return false;
942 // Second, the length of the memcpy's must be the same, or the preceding one
943 // must be larger than the following one.
944 ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength());
945 ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength());
946 if (!MDepLen || !MLen || MDepLen->getZExtValue() < MLen->getZExtValue())
947 return false;
949 AliasAnalysis &AA = LookupAliasAnalysis();
951 // Verify that the copied-from memory doesn't change in between the two
952 // transfers. For example, in:
953 // memcpy(a <- b)
954 // *b = 42;
955 // memcpy(c <- a)
956 // It would be invalid to transform the second memcpy into memcpy(c <- b).
958 // TODO: If the code between M and MDep is transparent to the destination "c",
959 // then we could still perform the xform by moving M up to the first memcpy.
961 // NOTE: This is conservative, it will stop on any read from the source loc,
962 // not just the defining memcpy.
963 MemDepResult SourceDep =
964 MD->getPointerDependencyFrom(MemoryLocation::getForSource(MDep), false,
965 M->getIterator(), M->getParent());
966 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
967 return false;
969 // If the dest of the second might alias the source of the first, then the
970 // source and dest might overlap. We still want to eliminate the intermediate
971 // value, but we have to generate a memmove instead of memcpy.
972 bool UseMemMove = false;
973 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
974 MemoryLocation::getForSource(MDep)))
975 UseMemMove = true;
977 // If all checks passed, then we can transform M.
978 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
979 << *MDep << '\n' << *M << '\n');
981 // TODO: Is this worth it if we're creating a less aligned memcpy? For
982 // example we could be moving from movaps -> movq on x86.
983 IRBuilder<> Builder(M);
984 if (UseMemMove)
985 Builder.CreateMemMove(M->getRawDest(), M->getDestAlignment(),
986 MDep->getRawSource(), MDep->getSourceAlignment(),
987 M->getLength(), M->isVolatile());
988 else
989 Builder.CreateMemCpy(M->getRawDest(), M->getDestAlignment(),
990 MDep->getRawSource(), MDep->getSourceAlignment(),
991 M->getLength(), M->isVolatile());
993 // Remove the instruction we're replacing.
994 MD->removeInstruction(M);
995 M->eraseFromParent();
996 ++NumMemCpyInstr;
997 return true;
1000 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1001 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1002 /// weren't copied over by \p MemCpy.
1004 /// In other words, transform:
1005 /// \code
1006 /// memset(dst, c, dst_size);
1007 /// memcpy(dst, src, src_size);
1008 /// \endcode
1009 /// into:
1010 /// \code
1011 /// memcpy(dst, src, src_size);
1012 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1013 /// \endcode
1014 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst *MemCpy,
1015 MemSetInst *MemSet) {
1016 // We can only transform memset/memcpy with the same destination.
1017 if (MemSet->getDest() != MemCpy->getDest())
1018 return false;
1020 // Check that there are no other dependencies on the memset destination.
1021 MemDepResult DstDepInfo =
1022 MD->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet), false,
1023 MemCpy->getIterator(), MemCpy->getParent());
1024 if (DstDepInfo.getInst() != MemSet)
1025 return false;
1027 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1028 Value *Dest = MemCpy->getRawDest();
1029 Value *DestSize = MemSet->getLength();
1030 Value *SrcSize = MemCpy->getLength();
1032 // By default, create an unaligned memset.
1033 unsigned Align = 1;
1034 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1035 // of the sum.
1036 const unsigned DestAlign =
1037 std::max(MemSet->getDestAlignment(), MemCpy->getDestAlignment());
1038 if (DestAlign > 1)
1039 if (ConstantInt *SrcSizeC = dyn_cast<ConstantInt>(SrcSize))
1040 Align = MinAlign(SrcSizeC->getZExtValue(), DestAlign);
1042 IRBuilder<> Builder(MemCpy);
1044 // If the sizes have different types, zext the smaller one.
1045 if (DestSize->getType() != SrcSize->getType()) {
1046 if (DestSize->getType()->getIntegerBitWidth() >
1047 SrcSize->getType()->getIntegerBitWidth())
1048 SrcSize = Builder.CreateZExt(SrcSize, DestSize->getType());
1049 else
1050 DestSize = Builder.CreateZExt(DestSize, SrcSize->getType());
1053 Value *Ule = Builder.CreateICmpULE(DestSize, SrcSize);
1054 Value *SizeDiff = Builder.CreateSub(DestSize, SrcSize);
1055 Value *MemsetLen = Builder.CreateSelect(
1056 Ule, ConstantInt::getNullValue(DestSize->getType()), SizeDiff);
1057 Builder.CreateMemSet(
1058 Builder.CreateGEP(Dest->getType()->getPointerElementType(), Dest,
1059 SrcSize),
1060 MemSet->getOperand(1), MemsetLen, Align);
1062 MD->removeInstruction(MemSet);
1063 MemSet->eraseFromParent();
1064 return true;
1067 /// Determine whether the instruction has undefined content for the given Size,
1068 /// either because it was freshly alloca'd or started its lifetime.
1069 static bool hasUndefContents(Instruction *I, ConstantInt *Size) {
1070 if (isa<AllocaInst>(I))
1071 return true;
1073 if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
1074 if (II->getIntrinsicID() == Intrinsic::lifetime_start)
1075 if (ConstantInt *LTSize = dyn_cast<ConstantInt>(II->getArgOperand(0)))
1076 if (LTSize->getZExtValue() >= Size->getZExtValue())
1077 return true;
1079 return false;
1082 /// Transform memcpy to memset when its source was just memset.
1083 /// In other words, turn:
1084 /// \code
1085 /// memset(dst1, c, dst1_size);
1086 /// memcpy(dst2, dst1, dst2_size);
1087 /// \endcode
1088 /// into:
1089 /// \code
1090 /// memset(dst1, c, dst1_size);
1091 /// memset(dst2, c, dst2_size);
1092 /// \endcode
1093 /// When dst2_size <= dst1_size.
1095 /// The \p MemCpy must have a Constant length.
1096 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst *MemCpy,
1097 MemSetInst *MemSet) {
1098 AliasAnalysis &AA = LookupAliasAnalysis();
1100 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1101 // memcpying from the same address. Otherwise it is hard to reason about.
1102 if (!AA.isMustAlias(MemSet->getRawDest(), MemCpy->getRawSource()))
1103 return false;
1105 // A known memset size is required.
1106 ConstantInt *MemSetSize = dyn_cast<ConstantInt>(MemSet->getLength());
1107 if (!MemSetSize)
1108 return false;
1110 // Make sure the memcpy doesn't read any more than what the memset wrote.
1111 // Don't worry about sizes larger than i64.
1112 ConstantInt *CopySize = cast<ConstantInt>(MemCpy->getLength());
1113 if (CopySize->getZExtValue() > MemSetSize->getZExtValue()) {
1114 // If the memcpy is larger than the memset, but the memory was undef prior
1115 // to the memset, we can just ignore the tail. Technically we're only
1116 // interested in the bytes from MemSetSize..CopySize here, but as we can't
1117 // easily represent this location, we use the full 0..CopySize range.
1118 MemoryLocation MemCpyLoc = MemoryLocation::getForSource(MemCpy);
1119 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1120 MemCpyLoc, true, MemSet->getIterator(), MemSet->getParent());
1121 if (DepInfo.isDef() && hasUndefContents(DepInfo.getInst(), CopySize))
1122 CopySize = MemSetSize;
1123 else
1124 return false;
1127 IRBuilder<> Builder(MemCpy);
1128 Builder.CreateMemSet(MemCpy->getRawDest(), MemSet->getOperand(1),
1129 CopySize, MemCpy->getDestAlignment());
1130 return true;
1133 /// Perform simplification of memcpy's. If we have memcpy A
1134 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1135 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1136 /// circumstances). This allows later passes to remove the first memcpy
1137 /// altogether.
1138 bool MemCpyOptPass::processMemCpy(MemCpyInst *M) {
1139 // We can only optimize non-volatile memcpy's.
1140 if (M->isVolatile()) return false;
1142 // If the source and destination of the memcpy are the same, then zap it.
1143 if (M->getSource() == M->getDest()) {
1144 MD->removeInstruction(M);
1145 M->eraseFromParent();
1146 return false;
1149 // If copying from a constant, try to turn the memcpy into a memset.
1150 if (GlobalVariable *GV = dyn_cast<GlobalVariable>(M->getSource()))
1151 if (GV->isConstant() && GV->hasDefinitiveInitializer())
1152 if (Value *ByteVal = isBytewiseValue(GV->getInitializer(),
1153 M->getModule()->getDataLayout())) {
1154 IRBuilder<> Builder(M);
1155 Builder.CreateMemSet(M->getRawDest(), ByteVal, M->getLength(),
1156 M->getDestAlignment(), false);
1157 MD->removeInstruction(M);
1158 M->eraseFromParent();
1159 ++NumCpyToSet;
1160 return true;
1163 MemDepResult DepInfo = MD->getDependency(M);
1165 // Try to turn a partially redundant memset + memcpy into
1166 // memcpy + smaller memset. We don't need the memcpy size for this.
1167 if (DepInfo.isClobber())
1168 if (MemSetInst *MDep = dyn_cast<MemSetInst>(DepInfo.getInst()))
1169 if (processMemSetMemCpyDependence(M, MDep))
1170 return true;
1172 // The optimizations after this point require the memcpy size.
1173 ConstantInt *CopySize = dyn_cast<ConstantInt>(M->getLength());
1174 if (!CopySize) return false;
1176 // There are four possible optimizations we can do for memcpy:
1177 // a) memcpy-memcpy xform which exposes redundance for DSE.
1178 // b) call-memcpy xform for return slot optimization.
1179 // c) memcpy from freshly alloca'd space or space that has just started its
1180 // lifetime copies undefined data, and we can therefore eliminate the
1181 // memcpy in favor of the data that was already at the destination.
1182 // d) memcpy from a just-memset'd source can be turned into memset.
1183 if (DepInfo.isClobber()) {
1184 if (CallInst *C = dyn_cast<CallInst>(DepInfo.getInst())) {
1185 // FIXME: Can we pass in either of dest/src alignment here instead
1186 // of conservatively taking the minimum?
1187 unsigned Align = MinAlign(M->getDestAlignment(), M->getSourceAlignment());
1188 if (performCallSlotOptzn(M, M->getDest(), M->getSource(),
1189 CopySize->getZExtValue(), Align,
1190 C)) {
1191 MD->removeInstruction(M);
1192 M->eraseFromParent();
1193 return true;
1198 MemoryLocation SrcLoc = MemoryLocation::getForSource(M);
1199 MemDepResult SrcDepInfo = MD->getPointerDependencyFrom(
1200 SrcLoc, true, M->getIterator(), M->getParent());
1202 if (SrcDepInfo.isClobber()) {
1203 if (MemCpyInst *MDep = dyn_cast<MemCpyInst>(SrcDepInfo.getInst()))
1204 return processMemCpyMemCpyDependence(M, MDep);
1205 } else if (SrcDepInfo.isDef()) {
1206 if (hasUndefContents(SrcDepInfo.getInst(), CopySize)) {
1207 MD->removeInstruction(M);
1208 M->eraseFromParent();
1209 ++NumMemCpyInstr;
1210 return true;
1214 if (SrcDepInfo.isClobber())
1215 if (MemSetInst *MDep = dyn_cast<MemSetInst>(SrcDepInfo.getInst()))
1216 if (performMemCpyToMemSetOptzn(M, MDep)) {
1217 MD->removeInstruction(M);
1218 M->eraseFromParent();
1219 ++NumCpyToSet;
1220 return true;
1223 return false;
1226 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1227 /// not to alias.
1228 bool MemCpyOptPass::processMemMove(MemMoveInst *M) {
1229 AliasAnalysis &AA = LookupAliasAnalysis();
1231 if (!TLI->has(LibFunc_memmove))
1232 return false;
1234 // See if the pointers alias.
1235 if (!AA.isNoAlias(MemoryLocation::getForDest(M),
1236 MemoryLocation::getForSource(M)))
1237 return false;
1239 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1240 << "\n");
1242 // If not, then we know we can transform this.
1243 Type *ArgTys[3] = { M->getRawDest()->getType(),
1244 M->getRawSource()->getType(),
1245 M->getLength()->getType() };
1246 M->setCalledFunction(Intrinsic::getDeclaration(M->getModule(),
1247 Intrinsic::memcpy, ArgTys));
1249 // MemDep may have over conservative information about this instruction, just
1250 // conservatively flush it from the cache.
1251 MD->removeInstruction(M);
1253 ++NumMoveToCpy;
1254 return true;
1257 /// This is called on every byval argument in call sites.
1258 bool MemCpyOptPass::processByValArgument(CallSite CS, unsigned ArgNo) {
1259 const DataLayout &DL = CS.getCaller()->getParent()->getDataLayout();
1260 // Find out what feeds this byval argument.
1261 Value *ByValArg = CS.getArgument(ArgNo);
1262 Type *ByValTy = cast<PointerType>(ByValArg->getType())->getElementType();
1263 uint64_t ByValSize = DL.getTypeAllocSize(ByValTy);
1264 MemDepResult DepInfo = MD->getPointerDependencyFrom(
1265 MemoryLocation(ByValArg, LocationSize::precise(ByValSize)), true,
1266 CS.getInstruction()->getIterator(), CS.getInstruction()->getParent());
1267 if (!DepInfo.isClobber())
1268 return false;
1270 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1271 // a memcpy, see if we can byval from the source of the memcpy instead of the
1272 // result.
1273 MemCpyInst *MDep = dyn_cast<MemCpyInst>(DepInfo.getInst());
1274 if (!MDep || MDep->isVolatile() ||
1275 ByValArg->stripPointerCasts() != MDep->getDest())
1276 return false;
1278 // The length of the memcpy must be larger or equal to the size of the byval.
1279 ConstantInt *C1 = dyn_cast<ConstantInt>(MDep->getLength());
1280 if (!C1 || C1->getValue().getZExtValue() < ByValSize)
1281 return false;
1283 // Get the alignment of the byval. If the call doesn't specify the alignment,
1284 // then it is some target specific value that we can't know.
1285 unsigned ByValAlign = CS.getParamAlignment(ArgNo);
1286 if (ByValAlign == 0) return false;
1288 // If it is greater than the memcpy, then we check to see if we can force the
1289 // source of the memcpy to the alignment we need. If we fail, we bail out.
1290 AssumptionCache &AC = LookupAssumptionCache();
1291 DominatorTree &DT = LookupDomTree();
1292 if (MDep->getSourceAlignment() < ByValAlign &&
1293 getOrEnforceKnownAlignment(MDep->getSource(), ByValAlign, DL,
1294 CS.getInstruction(), &AC, &DT) < ByValAlign)
1295 return false;
1297 // The address space of the memcpy source must match the byval argument
1298 if (MDep->getSource()->getType()->getPointerAddressSpace() !=
1299 ByValArg->getType()->getPointerAddressSpace())
1300 return false;
1302 // Verify that the copied-from memory doesn't change in between the memcpy and
1303 // the byval call.
1304 // memcpy(a <- b)
1305 // *b = 42;
1306 // foo(*a)
1307 // It would be invalid to transform the second memcpy into foo(*b).
1309 // NOTE: This is conservative, it will stop on any read from the source loc,
1310 // not just the defining memcpy.
1311 MemDepResult SourceDep = MD->getPointerDependencyFrom(
1312 MemoryLocation::getForSource(MDep), false,
1313 CS.getInstruction()->getIterator(), MDep->getParent());
1314 if (!SourceDep.isClobber() || SourceDep.getInst() != MDep)
1315 return false;
1317 Value *TmpCast = MDep->getSource();
1318 if (MDep->getSource()->getType() != ByValArg->getType())
1319 TmpCast = new BitCastInst(MDep->getSource(), ByValArg->getType(),
1320 "tmpcast", CS.getInstruction());
1322 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1323 << " " << *MDep << "\n"
1324 << " " << *CS.getInstruction() << "\n");
1326 // Otherwise we're good! Update the byval argument.
1327 CS.setArgument(ArgNo, TmpCast);
1328 ++NumMemCpyInstr;
1329 return true;
1332 /// Executes one iteration of MemCpyOptPass.
1333 bool MemCpyOptPass::iterateOnFunction(Function &F) {
1334 bool MadeChange = false;
1336 DominatorTree &DT = LookupDomTree();
1338 // Walk all instruction in the function.
1339 for (BasicBlock &BB : F) {
1340 // Skip unreachable blocks. For example processStore assumes that an
1341 // instruction in a BB can't be dominated by a later instruction in the
1342 // same BB (which is a scenario that can happen for an unreachable BB that
1343 // has itself as a predecessor).
1344 if (!DT.isReachableFromEntry(&BB))
1345 continue;
1347 for (BasicBlock::iterator BI = BB.begin(), BE = BB.end(); BI != BE;) {
1348 // Avoid invalidating the iterator.
1349 Instruction *I = &*BI++;
1351 bool RepeatInstruction = false;
1353 if (StoreInst *SI = dyn_cast<StoreInst>(I))
1354 MadeChange |= processStore(SI, BI);
1355 else if (MemSetInst *M = dyn_cast<MemSetInst>(I))
1356 RepeatInstruction = processMemSet(M, BI);
1357 else if (MemCpyInst *M = dyn_cast<MemCpyInst>(I))
1358 RepeatInstruction = processMemCpy(M);
1359 else if (MemMoveInst *M = dyn_cast<MemMoveInst>(I))
1360 RepeatInstruction = processMemMove(M);
1361 else if (auto CS = CallSite(I)) {
1362 for (unsigned i = 0, e = CS.arg_size(); i != e; ++i)
1363 if (CS.isByValArgument(i))
1364 MadeChange |= processByValArgument(CS, i);
1367 // Reprocess the instruction if desired.
1368 if (RepeatInstruction) {
1369 if (BI != BB.begin())
1370 --BI;
1371 MadeChange = true;
1376 return MadeChange;
1379 PreservedAnalyses MemCpyOptPass::run(Function &F, FunctionAnalysisManager &AM) {
1380 auto &MD = AM.getResult<MemoryDependenceAnalysis>(F);
1381 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1383 auto LookupAliasAnalysis = [&]() -> AliasAnalysis & {
1384 return AM.getResult<AAManager>(F);
1386 auto LookupAssumptionCache = [&]() -> AssumptionCache & {
1387 return AM.getResult<AssumptionAnalysis>(F);
1389 auto LookupDomTree = [&]() -> DominatorTree & {
1390 return AM.getResult<DominatorTreeAnalysis>(F);
1393 bool MadeChange = runImpl(F, &MD, &TLI, LookupAliasAnalysis,
1394 LookupAssumptionCache, LookupDomTree);
1395 if (!MadeChange)
1396 return PreservedAnalyses::all();
1398 PreservedAnalyses PA;
1399 PA.preserveSet<CFGAnalyses>();
1400 PA.preserve<GlobalsAA>();
1401 PA.preserve<MemoryDependenceAnalysis>();
1402 return PA;
1405 bool MemCpyOptPass::runImpl(
1406 Function &F, MemoryDependenceResults *MD_, TargetLibraryInfo *TLI_,
1407 std::function<AliasAnalysis &()> LookupAliasAnalysis_,
1408 std::function<AssumptionCache &()> LookupAssumptionCache_,
1409 std::function<DominatorTree &()> LookupDomTree_) {
1410 bool MadeChange = false;
1411 MD = MD_;
1412 TLI = TLI_;
1413 LookupAliasAnalysis = std::move(LookupAliasAnalysis_);
1414 LookupAssumptionCache = std::move(LookupAssumptionCache_);
1415 LookupDomTree = std::move(LookupDomTree_);
1417 // If we don't have at least memset and memcpy, there is little point of doing
1418 // anything here. These are required by a freestanding implementation, so if
1419 // even they are disabled, there is no point in trying hard.
1420 if (!TLI->has(LibFunc_memset) || !TLI->has(LibFunc_memcpy))
1421 return false;
1423 while (true) {
1424 if (!iterateOnFunction(F))
1425 break;
1426 MadeChange = true;
1429 MD = nullptr;
1430 return MadeChange;
1433 /// This is the main transformation entry point for a function.
1434 bool MemCpyOptLegacyPass::runOnFunction(Function &F) {
1435 if (skipFunction(F))
1436 return false;
1438 auto *MD = &getAnalysis<MemoryDependenceWrapperPass>().getMemDep();
1439 auto *TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1441 auto LookupAliasAnalysis = [this]() -> AliasAnalysis & {
1442 return getAnalysis<AAResultsWrapperPass>().getAAResults();
1444 auto LookupAssumptionCache = [this, &F]() -> AssumptionCache & {
1445 return getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
1447 auto LookupDomTree = [this]() -> DominatorTree & {
1448 return getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1451 return Impl.runImpl(F, MD, TLI, LookupAliasAnalysis, LookupAssumptionCache,
1452 LookupDomTree);