1 //===- MemCpyOptimizer.cpp - Optimize use of memcpy and friends -----------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This pass performs various transformations related to eliminating memcpy
10 // calls, or transforming sets of stores into memset's.
12 //===----------------------------------------------------------------------===//
14 #include "llvm/Transforms/Scalar/MemCpyOptimizer.h"
15 #include "llvm/ADT/DenseSet.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/ADT/STLExtras.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/ADT/iterator_range.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/MemoryDependenceAnalysis.h"
25 #include "llvm/Analysis/MemoryLocation.h"
26 #include "llvm/Analysis/TargetLibraryInfo.h"
27 #include "llvm/Transforms/Utils/Local.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/BasicBlock.h"
31 #include "llvm/IR/CallSite.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Dominators.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GetElementPtrTypeIterator.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/IRBuilder.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instruction.h"
42 #include "llvm/IR/Instructions.h"
43 #include "llvm/IR/IntrinsicInst.h"
44 #include "llvm/IR/Intrinsics.h"
45 #include "llvm/IR/LLVMContext.h"
46 #include "llvm/IR/Module.h"
47 #include "llvm/IR/Operator.h"
48 #include "llvm/IR/PassManager.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/Debug.h"
55 #include "llvm/Support/MathExtras.h"
56 #include "llvm/Support/raw_ostream.h"
57 #include "llvm/Transforms/Scalar.h"
65 #define DEBUG_TYPE "memcpyopt"
67 STATISTIC(NumMemCpyInstr
, "Number of memcpy instructions deleted");
68 STATISTIC(NumMemSetInfer
, "Number of memsets inferred");
69 STATISTIC(NumMoveToCpy
, "Number of memmoves converted to memcpy");
70 STATISTIC(NumCpyToSet
, "Number of memcpys converted to memset");
72 static int64_t GetOffsetFromIndex(const GEPOperator
*GEP
, unsigned Idx
,
73 bool &VariableIdxFound
,
74 const DataLayout
&DL
) {
75 // Skip over the first indices.
76 gep_type_iterator GTI
= gep_type_begin(GEP
);
77 for (unsigned i
= 1; i
!= Idx
; ++i
, ++GTI
)
80 // Compute the offset implied by the rest of the indices.
82 for (unsigned i
= Idx
, e
= GEP
->getNumOperands(); i
!= e
; ++i
, ++GTI
) {
83 ConstantInt
*OpC
= dyn_cast
<ConstantInt
>(GEP
->getOperand(i
));
85 return VariableIdxFound
= true;
86 if (OpC
->isZero()) continue; // No offset.
88 // Handle struct indices, which add their field offset to the pointer.
89 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
90 Offset
+= DL
.getStructLayout(STy
)->getElementOffset(OpC
->getZExtValue());
94 // Otherwise, we have a sequential type like an array or vector. Multiply
95 // the index by the ElementSize.
96 uint64_t Size
= DL
.getTypeAllocSize(GTI
.getIndexedType());
97 Offset
+= Size
*OpC
->getSExtValue();
103 /// Return true if Ptr1 is provably equal to Ptr2 plus a constant offset, and
104 /// return that constant offset. For example, Ptr1 might be &A[42], and Ptr2
105 /// might be &A[40]. In this case offset would be -8.
106 static bool IsPointerOffset(Value
*Ptr1
, Value
*Ptr2
, int64_t &Offset
,
107 const DataLayout
&DL
) {
108 Ptr1
= Ptr1
->stripPointerCasts();
109 Ptr2
= Ptr2
->stripPointerCasts();
111 // Handle the trivial case first.
117 GEPOperator
*GEP1
= dyn_cast
<GEPOperator
>(Ptr1
);
118 GEPOperator
*GEP2
= dyn_cast
<GEPOperator
>(Ptr2
);
120 bool VariableIdxFound
= false;
122 // If one pointer is a GEP and the other isn't, then see if the GEP is a
123 // constant offset from the base, as in "P" and "gep P, 1".
124 if (GEP1
&& !GEP2
&& GEP1
->getOperand(0)->stripPointerCasts() == Ptr2
) {
125 Offset
= -GetOffsetFromIndex(GEP1
, 1, VariableIdxFound
, DL
);
126 return !VariableIdxFound
;
129 if (GEP2
&& !GEP1
&& GEP2
->getOperand(0)->stripPointerCasts() == Ptr1
) {
130 Offset
= GetOffsetFromIndex(GEP2
, 1, VariableIdxFound
, DL
);
131 return !VariableIdxFound
;
134 // Right now we handle the case when Ptr1/Ptr2 are both GEPs with an identical
135 // base. After that base, they may have some number of common (and
136 // potentially variable) indices. After that they handle some constant
137 // offset, which determines their offset from each other. At this point, we
138 // handle no other case.
139 if (!GEP1
|| !GEP2
|| GEP1
->getOperand(0) != GEP2
->getOperand(0))
142 // Skip any common indices and track the GEP types.
144 for (; Idx
!= GEP1
->getNumOperands() && Idx
!= GEP2
->getNumOperands(); ++Idx
)
145 if (GEP1
->getOperand(Idx
) != GEP2
->getOperand(Idx
))
148 int64_t Offset1
= GetOffsetFromIndex(GEP1
, Idx
, VariableIdxFound
, DL
);
149 int64_t Offset2
= GetOffsetFromIndex(GEP2
, Idx
, VariableIdxFound
, DL
);
150 if (VariableIdxFound
) return false;
152 Offset
= Offset2
-Offset1
;
158 /// Represents a range of memset'd bytes with the ByteVal value.
159 /// This allows us to analyze stores like:
164 /// which sometimes happens with stores to arrays of structs etc. When we see
165 /// the first store, we make a range [1, 2). The second store extends the range
166 /// to [0, 2). The third makes a new range [2, 3). The fourth store joins the
167 /// two ranges into [0, 3) which is memset'able.
169 // Start/End - A semi range that describes the span that this range covers.
170 // The range is closed at the start and open at the end: [Start, End).
173 /// StartPtr - The getelementptr instruction that points to the start of the
177 /// Alignment - The known alignment of the first store.
180 /// TheStores - The actual stores that make up this range.
181 SmallVector
<Instruction
*, 16> TheStores
;
183 bool isProfitableToUseMemset(const DataLayout
&DL
) const;
186 } // end anonymous namespace
188 bool MemsetRange::isProfitableToUseMemset(const DataLayout
&DL
) const {
189 // If we found more than 4 stores to merge or 16 bytes, use memset.
190 if (TheStores
.size() >= 4 || End
-Start
>= 16) return true;
192 // If there is nothing to merge, don't do anything.
193 if (TheStores
.size() < 2) return false;
195 // If any of the stores are a memset, then it is always good to extend the
197 for (Instruction
*SI
: TheStores
)
198 if (!isa
<StoreInst
>(SI
))
201 // Assume that the code generator is capable of merging pairs of stores
202 // together if it wants to.
203 if (TheStores
.size() == 2) return false;
205 // If we have fewer than 8 stores, it can still be worthwhile to do this.
206 // For example, merging 4 i8 stores into an i32 store is useful almost always.
207 // However, merging 2 32-bit stores isn't useful on a 32-bit architecture (the
208 // memset will be split into 2 32-bit stores anyway) and doing so can
209 // pessimize the llvm optimizer.
211 // Since we don't have perfect knowledge here, make some assumptions: assume
212 // the maximum GPR width is the same size as the largest legal integer
213 // size. If so, check to see whether we will end up actually reducing the
214 // number of stores used.
215 unsigned Bytes
= unsigned(End
-Start
);
216 unsigned MaxIntSize
= DL
.getLargestLegalIntTypeSizeInBits() / 8;
219 unsigned NumPointerStores
= Bytes
/ MaxIntSize
;
221 // Assume the remaining bytes if any are done a byte at a time.
222 unsigned NumByteStores
= Bytes
% MaxIntSize
;
224 // If we will reduce the # stores (according to this heuristic), do the
225 // transformation. This encourages merging 4 x i8 -> i32 and 2 x i16 -> i32
227 return TheStores
.size() > NumPointerStores
+NumByteStores
;
233 using range_iterator
= SmallVectorImpl
<MemsetRange
>::iterator
;
235 /// A sorted list of the memset ranges.
236 SmallVector
<MemsetRange
, 8> Ranges
;
238 const DataLayout
&DL
;
241 MemsetRanges(const DataLayout
&DL
) : DL(DL
) {}
243 using const_iterator
= SmallVectorImpl
<MemsetRange
>::const_iterator
;
245 const_iterator
begin() const { return Ranges
.begin(); }
246 const_iterator
end() const { return Ranges
.end(); }
247 bool empty() const { return Ranges
.empty(); }
249 void addInst(int64_t OffsetFromFirst
, Instruction
*Inst
) {
250 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
))
251 addStore(OffsetFromFirst
, SI
);
253 addMemSet(OffsetFromFirst
, cast
<MemSetInst
>(Inst
));
256 void addStore(int64_t OffsetFromFirst
, StoreInst
*SI
) {
257 int64_t StoreSize
= DL
.getTypeStoreSize(SI
->getOperand(0)->getType());
259 addRange(OffsetFromFirst
, StoreSize
,
260 SI
->getPointerOperand(), SI
->getAlignment(), SI
);
263 void addMemSet(int64_t OffsetFromFirst
, MemSetInst
*MSI
) {
264 int64_t Size
= cast
<ConstantInt
>(MSI
->getLength())->getZExtValue();
265 addRange(OffsetFromFirst
, Size
, MSI
->getDest(), MSI
->getDestAlignment(), MSI
);
268 void addRange(int64_t Start
, int64_t Size
, Value
*Ptr
,
269 unsigned Alignment
, Instruction
*Inst
);
272 } // end anonymous namespace
274 /// Add a new store to the MemsetRanges data structure. This adds a
275 /// new range for the specified store at the specified offset, merging into
276 /// existing ranges as appropriate.
277 void MemsetRanges::addRange(int64_t Start
, int64_t Size
, Value
*Ptr
,
278 unsigned Alignment
, Instruction
*Inst
) {
279 int64_t End
= Start
+Size
;
281 range_iterator I
= std::lower_bound(Ranges
.begin(), Ranges
.end(), Start
,
282 [](const MemsetRange
&LHS
, int64_t RHS
) { return LHS
.End
< RHS
; });
284 // We now know that I == E, in which case we didn't find anything to merge
285 // with, or that Start <= I->End. If End < I->Start or I == E, then we need
286 // to insert a new range. Handle this now.
287 if (I
== Ranges
.end() || End
< I
->Start
) {
288 MemsetRange
&R
= *Ranges
.insert(I
, MemsetRange());
292 R
.Alignment
= Alignment
;
293 R
.TheStores
.push_back(Inst
);
297 // This store overlaps with I, add it.
298 I
->TheStores
.push_back(Inst
);
300 // At this point, we may have an interval that completely contains our store.
301 // If so, just add it to the interval and return.
302 if (I
->Start
<= Start
&& I
->End
>= End
)
305 // Now we know that Start <= I->End and End >= I->Start so the range overlaps
306 // but is not entirely contained within the range.
308 // See if the range extends the start of the range. In this case, it couldn't
309 // possibly cause it to join the prior range, because otherwise we would have
311 if (Start
< I
->Start
) {
314 I
->Alignment
= Alignment
;
317 // Now we know that Start <= I->End and Start >= I->Start (so the startpoint
318 // is in or right at the end of I), and that End >= I->Start. Extend I out to
322 range_iterator NextI
= I
;
323 while (++NextI
!= Ranges
.end() && End
>= NextI
->Start
) {
324 // Merge the range in.
325 I
->TheStores
.append(NextI
->TheStores
.begin(), NextI
->TheStores
.end());
326 if (NextI
->End
> I
->End
)
334 //===----------------------------------------------------------------------===//
335 // MemCpyOptLegacyPass Pass
336 //===----------------------------------------------------------------------===//
340 class MemCpyOptLegacyPass
: public FunctionPass
{
344 static char ID
; // Pass identification, replacement for typeid
346 MemCpyOptLegacyPass() : FunctionPass(ID
) {
347 initializeMemCpyOptLegacyPassPass(*PassRegistry::getPassRegistry());
350 bool runOnFunction(Function
&F
) override
;
353 // This transformation requires dominator postdominator info
354 void getAnalysisUsage(AnalysisUsage
&AU
) const override
{
355 AU
.setPreservesCFG();
356 AU
.addRequired
<AssumptionCacheTracker
>();
357 AU
.addRequired
<DominatorTreeWrapperPass
>();
358 AU
.addRequired
<MemoryDependenceWrapperPass
>();
359 AU
.addRequired
<AAResultsWrapperPass
>();
360 AU
.addRequired
<TargetLibraryInfoWrapperPass
>();
361 AU
.addPreserved
<GlobalsAAWrapperPass
>();
362 AU
.addPreserved
<MemoryDependenceWrapperPass
>();
366 } // end anonymous namespace
368 char MemCpyOptLegacyPass::ID
= 0;
370 /// The public interface to this file...
371 FunctionPass
*llvm::createMemCpyOptPass() { return new MemCpyOptLegacyPass(); }
373 INITIALIZE_PASS_BEGIN(MemCpyOptLegacyPass
, "memcpyopt", "MemCpy Optimization",
375 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
376 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
377 INITIALIZE_PASS_DEPENDENCY(MemoryDependenceWrapperPass
)
378 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
379 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass
)
380 INITIALIZE_PASS_DEPENDENCY(GlobalsAAWrapperPass
)
381 INITIALIZE_PASS_END(MemCpyOptLegacyPass
, "memcpyopt", "MemCpy Optimization",
384 /// When scanning forward over instructions, we look for some other patterns to
385 /// fold away. In particular, this looks for stores to neighboring locations of
386 /// memory. If it sees enough consecutive ones, it attempts to merge them
387 /// together into a memcpy/memset.
388 Instruction
*MemCpyOptPass::tryMergingIntoMemset(Instruction
*StartInst
,
391 const DataLayout
&DL
= StartInst
->getModule()->getDataLayout();
393 // Okay, so we now have a single store that can be splatable. Scan to find
394 // all subsequent stores of the same value to offset from the same pointer.
395 // Join these together into ranges, so we can decide whether contiguous blocks
397 MemsetRanges
Ranges(DL
);
399 BasicBlock::iterator
BI(StartInst
);
400 for (++BI
; !BI
->isTerminator(); ++BI
) {
401 if (!isa
<StoreInst
>(BI
) && !isa
<MemSetInst
>(BI
)) {
402 // If the instruction is readnone, ignore it, otherwise bail out. We
403 // don't even allow readonly here because we don't want something like:
404 // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
405 if (BI
->mayWriteToMemory() || BI
->mayReadFromMemory())
410 if (StoreInst
*NextStore
= dyn_cast
<StoreInst
>(BI
)) {
411 // If this is a store, see if we can merge it in.
412 if (!NextStore
->isSimple()) break;
414 // Check to see if this stored value is of the same byte-splattable value.
415 Value
*StoredByte
= isBytewiseValue(NextStore
->getOperand(0));
416 if (isa
<UndefValue
>(ByteVal
) && StoredByte
)
417 ByteVal
= StoredByte
;
418 if (ByteVal
!= StoredByte
)
421 // Check to see if this store is to a constant offset from the start ptr.
423 if (!IsPointerOffset(StartPtr
, NextStore
->getPointerOperand(), Offset
,
427 Ranges
.addStore(Offset
, NextStore
);
429 MemSetInst
*MSI
= cast
<MemSetInst
>(BI
);
431 if (MSI
->isVolatile() || ByteVal
!= MSI
->getValue() ||
432 !isa
<ConstantInt
>(MSI
->getLength()))
435 // Check to see if this store is to a constant offset from the start ptr.
437 if (!IsPointerOffset(StartPtr
, MSI
->getDest(), Offset
, DL
))
440 Ranges
.addMemSet(Offset
, MSI
);
444 // If we have no ranges, then we just had a single store with nothing that
445 // could be merged in. This is a very common case of course.
449 // If we had at least one store that could be merged in, add the starting
450 // store as well. We try to avoid this unless there is at least something
451 // interesting as a small compile-time optimization.
452 Ranges
.addInst(0, StartInst
);
454 // If we create any memsets, we put it right before the first instruction that
455 // isn't part of the memset block. This ensure that the memset is dominated
456 // by any addressing instruction needed by the start of the block.
457 IRBuilder
<> Builder(&*BI
);
459 // Now that we have full information about ranges, loop over the ranges and
460 // emit memset's for anything big enough to be worthwhile.
461 Instruction
*AMemSet
= nullptr;
462 for (const MemsetRange
&Range
: Ranges
) {
463 if (Range
.TheStores
.size() == 1) continue;
465 // If it is profitable to lower this range to memset, do so now.
466 if (!Range
.isProfitableToUseMemset(DL
))
469 // Otherwise, we do want to transform this! Create a new memset.
470 // Get the starting pointer of the block.
471 StartPtr
= Range
.StartPtr
;
473 // Determine alignment
474 unsigned Alignment
= Range
.Alignment
;
475 if (Alignment
== 0) {
477 cast
<PointerType
>(StartPtr
->getType())->getElementType();
478 Alignment
= DL
.getABITypeAlignment(EltType
);
482 Builder
.CreateMemSet(StartPtr
, ByteVal
, Range
.End
-Range
.Start
, Alignment
);
484 LLVM_DEBUG(dbgs() << "Replace stores:\n"; for (Instruction
*SI
485 : Range
.TheStores
) dbgs()
487 dbgs() << "With: " << *AMemSet
<< '\n');
489 if (!Range
.TheStores
.empty())
490 AMemSet
->setDebugLoc(Range
.TheStores
[0]->getDebugLoc());
492 // Zap all the stores.
493 for (Instruction
*SI
: Range
.TheStores
) {
494 MD
->removeInstruction(SI
);
495 SI
->eraseFromParent();
503 static unsigned findStoreAlignment(const DataLayout
&DL
, const StoreInst
*SI
) {
504 unsigned StoreAlign
= SI
->getAlignment();
506 StoreAlign
= DL
.getABITypeAlignment(SI
->getOperand(0)->getType());
510 static unsigned findLoadAlignment(const DataLayout
&DL
, const LoadInst
*LI
) {
511 unsigned LoadAlign
= LI
->getAlignment();
513 LoadAlign
= DL
.getABITypeAlignment(LI
->getType());
517 static unsigned findCommonAlignment(const DataLayout
&DL
, const StoreInst
*SI
,
518 const LoadInst
*LI
) {
519 unsigned StoreAlign
= findStoreAlignment(DL
, SI
);
520 unsigned LoadAlign
= findLoadAlignment(DL
, LI
);
521 return MinAlign(StoreAlign
, LoadAlign
);
524 // This method try to lift a store instruction before position P.
525 // It will lift the store and its argument + that anything that
526 // may alias with these.
527 // The method returns true if it was successful.
528 static bool moveUp(AliasAnalysis
&AA
, StoreInst
*SI
, Instruction
*P
,
529 const LoadInst
*LI
) {
530 // If the store alias this position, early bail out.
531 MemoryLocation StoreLoc
= MemoryLocation::get(SI
);
532 if (isModOrRefSet(AA
.getModRefInfo(P
, StoreLoc
)))
535 // Keep track of the arguments of all instruction we plan to lift
536 // so we can make sure to lift them as well if appropriate.
537 DenseSet
<Instruction
*> Args
;
538 if (auto *Ptr
= dyn_cast
<Instruction
>(SI
->getPointerOperand()))
539 if (Ptr
->getParent() == SI
->getParent())
542 // Instruction to lift before P.
543 SmallVector
<Instruction
*, 8> ToLift
;
545 // Memory locations of lifted instructions.
546 SmallVector
<MemoryLocation
, 8> MemLocs
{StoreLoc
};
549 SmallVector
<const CallBase
*, 8> Calls
;
551 const MemoryLocation LoadLoc
= MemoryLocation::get(LI
);
553 for (auto I
= --SI
->getIterator(), E
= P
->getIterator(); I
!= E
; --I
) {
556 bool MayAlias
= isModOrRefSet(AA
.getModRefInfo(C
, None
));
558 bool NeedLift
= false;
562 NeedLift
= llvm::any_of(MemLocs
, [C
, &AA
](const MemoryLocation
&ML
) {
563 return isModOrRefSet(AA
.getModRefInfo(C
, ML
));
567 NeedLift
= llvm::any_of(Calls
, [C
, &AA
](const CallBase
*Call
) {
568 return isModOrRefSet(AA
.getModRefInfo(C
, Call
));
576 // Since LI is implicitly moved downwards past the lifted instructions,
577 // none of them may modify its source.
578 if (isModSet(AA
.getModRefInfo(C
, LoadLoc
)))
580 else if (const auto *Call
= dyn_cast
<CallBase
>(C
)) {
581 // If we can't lift this before P, it's game over.
582 if (isModOrRefSet(AA
.getModRefInfo(P
, Call
)))
585 Calls
.push_back(Call
);
586 } else if (isa
<LoadInst
>(C
) || isa
<StoreInst
>(C
) || isa
<VAArgInst
>(C
)) {
587 // If we can't lift this before P, it's game over.
588 auto ML
= MemoryLocation::get(C
);
589 if (isModOrRefSet(AA
.getModRefInfo(P
, ML
)))
592 MemLocs
.push_back(ML
);
594 // We don't know how to lift this instruction.
599 for (unsigned k
= 0, e
= C
->getNumOperands(); k
!= e
; ++k
)
600 if (auto *A
= dyn_cast
<Instruction
>(C
->getOperand(k
)))
601 if (A
->getParent() == SI
->getParent())
605 // We made it, we need to lift
606 for (auto *I
: llvm::reverse(ToLift
)) {
607 LLVM_DEBUG(dbgs() << "Lifting " << *I
<< " before " << *P
<< "\n");
614 bool MemCpyOptPass::processStore(StoreInst
*SI
, BasicBlock::iterator
&BBI
) {
615 if (!SI
->isSimple()) return false;
617 // Avoid merging nontemporal stores since the resulting
618 // memcpy/memset would not be able to preserve the nontemporal hint.
619 // In theory we could teach how to propagate the !nontemporal metadata to
620 // memset calls. However, that change would force the backend to
621 // conservatively expand !nontemporal memset calls back to sequences of
622 // store instructions (effectively undoing the merging).
623 if (SI
->getMetadata(LLVMContext::MD_nontemporal
))
626 const DataLayout
&DL
= SI
->getModule()->getDataLayout();
628 // Load to store forwarding can be interpreted as memcpy.
629 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(SI
->getOperand(0))) {
630 if (LI
->isSimple() && LI
->hasOneUse() &&
631 LI
->getParent() == SI
->getParent()) {
633 auto *T
= LI
->getType();
634 if (T
->isAggregateType()) {
635 AliasAnalysis
&AA
= LookupAliasAnalysis();
636 MemoryLocation LoadLoc
= MemoryLocation::get(LI
);
638 // We use alias analysis to check if an instruction may store to
639 // the memory we load from in between the load and the store. If
640 // such an instruction is found, we try to promote there instead
641 // of at the store position.
643 for (auto &I
: make_range(++LI
->getIterator(), SI
->getIterator())) {
644 if (isModSet(AA
.getModRefInfo(&I
, LoadLoc
))) {
650 // We found an instruction that may write to the loaded memory.
651 // We can try to promote at this position instead of the store
652 // position if nothing alias the store memory after this and the store
653 // destination is not in the range.
655 if (!moveUp(AA
, SI
, P
, LI
))
659 // If a valid insertion position is found, then we can promote
660 // the load/store pair to a memcpy.
662 // If we load from memory that may alias the memory we store to,
663 // memmove must be used to preserve semantic. If not, memcpy can
665 bool UseMemMove
= false;
666 if (!AA
.isNoAlias(MemoryLocation::get(SI
), LoadLoc
))
669 uint64_t Size
= DL
.getTypeStoreSize(T
);
671 IRBuilder
<> Builder(P
);
674 M
= Builder
.CreateMemMove(
675 SI
->getPointerOperand(), findStoreAlignment(DL
, SI
),
676 LI
->getPointerOperand(), findLoadAlignment(DL
, LI
), Size
);
678 M
= Builder
.CreateMemCpy(
679 SI
->getPointerOperand(), findStoreAlignment(DL
, SI
),
680 LI
->getPointerOperand(), findLoadAlignment(DL
, LI
), Size
);
682 LLVM_DEBUG(dbgs() << "Promoting " << *LI
<< " to " << *SI
<< " => "
685 MD
->removeInstruction(SI
);
686 SI
->eraseFromParent();
687 MD
->removeInstruction(LI
);
688 LI
->eraseFromParent();
691 // Make sure we do not invalidate the iterator.
692 BBI
= M
->getIterator();
697 // Detect cases where we're performing call slot forwarding, but
698 // happen to be using a load-store pair to implement it, rather than
700 MemDepResult ldep
= MD
->getDependency(LI
);
701 CallInst
*C
= nullptr;
702 if (ldep
.isClobber() && !isa
<MemCpyInst
>(ldep
.getInst()))
703 C
= dyn_cast
<CallInst
>(ldep
.getInst());
706 // Check that nothing touches the dest of the "copy" between
707 // the call and the store.
708 Value
*CpyDest
= SI
->getPointerOperand()->stripPointerCasts();
709 bool CpyDestIsLocal
= isa
<AllocaInst
>(CpyDest
);
710 AliasAnalysis
&AA
= LookupAliasAnalysis();
711 MemoryLocation StoreLoc
= MemoryLocation::get(SI
);
712 for (BasicBlock::iterator I
= --SI
->getIterator(), E
= C
->getIterator();
714 if (isModOrRefSet(AA
.getModRefInfo(&*I
, StoreLoc
))) {
718 // The store to dest may never happen if an exception can be thrown
719 // between the load and the store.
720 if (I
->mayThrow() && !CpyDestIsLocal
) {
728 bool changed
= performCallSlotOptzn(
729 LI
, SI
->getPointerOperand()->stripPointerCasts(),
730 LI
->getPointerOperand()->stripPointerCasts(),
731 DL
.getTypeStoreSize(SI
->getOperand(0)->getType()),
732 findCommonAlignment(DL
, SI
, LI
), C
);
734 MD
->removeInstruction(SI
);
735 SI
->eraseFromParent();
736 MD
->removeInstruction(LI
);
737 LI
->eraseFromParent();
745 // There are two cases that are interesting for this code to handle: memcpy
746 // and memset. Right now we only handle memset.
748 // Ensure that the value being stored is something that can be memset'able a
749 // byte at a time like "0" or "-1" or any width, as well as things like
750 // 0xA0A0A0A0 and 0.0.
751 auto *V
= SI
->getOperand(0);
752 if (Value
*ByteVal
= isBytewiseValue(V
)) {
753 if (Instruction
*I
= tryMergingIntoMemset(SI
, SI
->getPointerOperand(),
755 BBI
= I
->getIterator(); // Don't invalidate iterator.
759 // If we have an aggregate, we try to promote it to memset regardless
760 // of opportunity for merging as it can expose optimization opportunities
761 // in subsequent passes.
762 auto *T
= V
->getType();
763 if (T
->isAggregateType()) {
764 uint64_t Size
= DL
.getTypeStoreSize(T
);
765 unsigned Align
= SI
->getAlignment();
767 Align
= DL
.getABITypeAlignment(T
);
768 IRBuilder
<> Builder(SI
);
770 Builder
.CreateMemSet(SI
->getPointerOperand(), ByteVal
, Size
, Align
);
772 LLVM_DEBUG(dbgs() << "Promoting " << *SI
<< " to " << *M
<< "\n");
774 MD
->removeInstruction(SI
);
775 SI
->eraseFromParent();
778 // Make sure we do not invalidate the iterator.
779 BBI
= M
->getIterator();
787 bool MemCpyOptPass::processMemSet(MemSetInst
*MSI
, BasicBlock::iterator
&BBI
) {
788 // See if there is another memset or store neighboring this memset which
789 // allows us to widen out the memset to do a single larger store.
790 if (isa
<ConstantInt
>(MSI
->getLength()) && !MSI
->isVolatile())
791 if (Instruction
*I
= tryMergingIntoMemset(MSI
, MSI
->getDest(),
793 BBI
= I
->getIterator(); // Don't invalidate iterator.
799 /// Takes a memcpy and a call that it depends on,
800 /// and checks for the possibility of a call slot optimization by having
801 /// the call write its result directly into the destination of the memcpy.
802 bool MemCpyOptPass::performCallSlotOptzn(Instruction
*cpy
, Value
*cpyDest
,
803 Value
*cpySrc
, uint64_t cpyLen
,
804 unsigned cpyAlign
, CallInst
*C
) {
805 // The general transformation to keep in mind is
807 // call @func(..., src, ...)
808 // memcpy(dest, src, ...)
812 // memcpy(dest, src, ...)
813 // call @func(..., dest, ...)
815 // Since moving the memcpy is technically awkward, we additionally check that
816 // src only holds uninitialized values at the moment of the call, meaning that
817 // the memcpy can be discarded rather than moved.
819 // Lifetime marks shouldn't be operated on.
820 if (Function
*F
= C
->getCalledFunction())
821 if (F
->isIntrinsic() && F
->getIntrinsicID() == Intrinsic::lifetime_start
)
824 // Deliberately get the source and destination with bitcasts stripped away,
825 // because we'll need to do type comparisons based on the underlying type.
828 // Require that src be an alloca. This simplifies the reasoning considerably.
829 AllocaInst
*srcAlloca
= dyn_cast
<AllocaInst
>(cpySrc
);
833 ConstantInt
*srcArraySize
= dyn_cast
<ConstantInt
>(srcAlloca
->getArraySize());
837 const DataLayout
&DL
= cpy
->getModule()->getDataLayout();
838 uint64_t srcSize
= DL
.getTypeAllocSize(srcAlloca
->getAllocatedType()) *
839 srcArraySize
->getZExtValue();
841 if (cpyLen
< srcSize
)
844 // Check that accessing the first srcSize bytes of dest will not cause a
845 // trap. Otherwise the transform is invalid since it might cause a trap
846 // to occur earlier than it otherwise would.
847 if (AllocaInst
*A
= dyn_cast
<AllocaInst
>(cpyDest
)) {
848 // The destination is an alloca. Check it is larger than srcSize.
849 ConstantInt
*destArraySize
= dyn_cast
<ConstantInt
>(A
->getArraySize());
853 uint64_t destSize
= DL
.getTypeAllocSize(A
->getAllocatedType()) *
854 destArraySize
->getZExtValue();
856 if (destSize
< srcSize
)
858 } else if (Argument
*A
= dyn_cast
<Argument
>(cpyDest
)) {
859 // The store to dest may never happen if the call can throw.
863 if (A
->getDereferenceableBytes() < srcSize
) {
864 // If the destination is an sret parameter then only accesses that are
865 // outside of the returned struct type can trap.
866 if (!A
->hasStructRetAttr())
869 Type
*StructTy
= cast
<PointerType
>(A
->getType())->getElementType();
870 if (!StructTy
->isSized()) {
871 // The call may never return and hence the copy-instruction may never
872 // be executed, and therefore it's not safe to say "the destination
873 // has at least <cpyLen> bytes, as implied by the copy-instruction",
877 uint64_t destSize
= DL
.getTypeAllocSize(StructTy
);
878 if (destSize
< srcSize
)
885 // Check that dest points to memory that is at least as aligned as src.
886 unsigned srcAlign
= srcAlloca
->getAlignment();
888 srcAlign
= DL
.getABITypeAlignment(srcAlloca
->getAllocatedType());
889 bool isDestSufficientlyAligned
= srcAlign
<= cpyAlign
;
890 // If dest is not aligned enough and we can't increase its alignment then
892 if (!isDestSufficientlyAligned
&& !isa
<AllocaInst
>(cpyDest
))
895 // Check that src is not accessed except via the call and the memcpy. This
896 // guarantees that it holds only undefined values when passed in (so the final
897 // memcpy can be dropped), that it is not read or written between the call and
898 // the memcpy, and that writing beyond the end of it is undefined.
899 SmallVector
<User
*, 8> srcUseList(srcAlloca
->user_begin(),
900 srcAlloca
->user_end());
901 while (!srcUseList
.empty()) {
902 User
*U
= srcUseList
.pop_back_val();
904 if (isa
<BitCastInst
>(U
) || isa
<AddrSpaceCastInst
>(U
)) {
905 for (User
*UU
: U
->users())
906 srcUseList
.push_back(UU
);
909 if (GetElementPtrInst
*G
= dyn_cast
<GetElementPtrInst
>(U
)) {
910 if (!G
->hasAllZeroIndices())
913 for (User
*UU
: U
->users())
914 srcUseList
.push_back(UU
);
917 if (const IntrinsicInst
*IT
= dyn_cast
<IntrinsicInst
>(U
))
918 if (IT
->isLifetimeStartOrEnd())
921 if (U
!= C
&& U
!= cpy
)
925 // Check that src isn't captured by the called function since the
926 // transformation can cause aliasing issues in that case.
927 for (unsigned i
= 0, e
= CS
.arg_size(); i
!= e
; ++i
)
928 if (CS
.getArgument(i
) == cpySrc
&& !CS
.doesNotCapture(i
))
931 // Since we're changing the parameter to the callsite, we need to make sure
932 // that what would be the new parameter dominates the callsite.
933 DominatorTree
&DT
= LookupDomTree();
934 if (Instruction
*cpyDestInst
= dyn_cast
<Instruction
>(cpyDest
))
935 if (!DT
.dominates(cpyDestInst
, C
))
938 // In addition to knowing that the call does not access src in some
939 // unexpected manner, for example via a global, which we deduce from
940 // the use analysis, we also need to know that it does not sneakily
941 // access dest. We rely on AA to figure this out for us.
942 AliasAnalysis
&AA
= LookupAliasAnalysis();
943 ModRefInfo MR
= AA
.getModRefInfo(C
, cpyDest
, LocationSize::precise(srcSize
));
944 // If necessary, perform additional analysis.
945 if (isModOrRefSet(MR
))
946 MR
= AA
.callCapturesBefore(C
, cpyDest
, LocationSize::precise(srcSize
), &DT
);
947 if (isModOrRefSet(MR
))
950 // We can't create address space casts here because we don't know if they're
951 // safe for the target.
952 if (cpySrc
->getType()->getPointerAddressSpace() !=
953 cpyDest
->getType()->getPointerAddressSpace())
955 for (unsigned i
= 0; i
< CS
.arg_size(); ++i
)
956 if (CS
.getArgument(i
)->stripPointerCasts() == cpySrc
&&
957 cpySrc
->getType()->getPointerAddressSpace() !=
958 CS
.getArgument(i
)->getType()->getPointerAddressSpace())
961 // All the checks have passed, so do the transformation.
962 bool changedArgument
= false;
963 for (unsigned i
= 0; i
< CS
.arg_size(); ++i
)
964 if (CS
.getArgument(i
)->stripPointerCasts() == cpySrc
) {
965 Value
*Dest
= cpySrc
->getType() == cpyDest
->getType() ? cpyDest
966 : CastInst::CreatePointerCast(cpyDest
, cpySrc
->getType(),
967 cpyDest
->getName(), C
);
968 changedArgument
= true;
969 if (CS
.getArgument(i
)->getType() == Dest
->getType())
970 CS
.setArgument(i
, Dest
);
972 CS
.setArgument(i
, CastInst::CreatePointerCast(Dest
,
973 CS
.getArgument(i
)->getType(), Dest
->getName(), C
));
976 if (!changedArgument
)
979 // If the destination wasn't sufficiently aligned then increase its alignment.
980 if (!isDestSufficientlyAligned
) {
981 assert(isa
<AllocaInst
>(cpyDest
) && "Can only increase alloca alignment!");
982 cast
<AllocaInst
>(cpyDest
)->setAlignment(srcAlign
);
985 // Drop any cached information about the call, because we may have changed
986 // its dependence information by changing its parameter.
987 MD
->removeInstruction(C
);
989 // Update AA metadata
990 // FIXME: MD_tbaa_struct and MD_mem_parallel_loop_access should also be
991 // handled here, but combineMetadata doesn't support them yet
992 unsigned KnownIDs
[] = {LLVMContext::MD_tbaa
, LLVMContext::MD_alias_scope
,
993 LLVMContext::MD_noalias
,
994 LLVMContext::MD_invariant_group
,
995 LLVMContext::MD_access_group
};
996 combineMetadata(C
, cpy
, KnownIDs
, true);
998 // Remove the memcpy.
999 MD
->removeInstruction(cpy
);
1005 /// We've found that the (upward scanning) memory dependence of memcpy 'M' is
1006 /// the memcpy 'MDep'. Try to simplify M to copy from MDep's input if we can.
1007 bool MemCpyOptPass::processMemCpyMemCpyDependence(MemCpyInst
*M
,
1009 // We can only transforms memcpy's where the dest of one is the source of the
1011 if (M
->getSource() != MDep
->getDest() || MDep
->isVolatile())
1014 // If dep instruction is reading from our current input, then it is a noop
1015 // transfer and substituting the input won't change this instruction. Just
1016 // ignore the input and let someone else zap MDep. This handles cases like:
1019 if (M
->getSource() == MDep
->getSource())
1022 // Second, the length of the memcpy's must be the same, or the preceding one
1023 // must be larger than the following one.
1024 ConstantInt
*MDepLen
= dyn_cast
<ConstantInt
>(MDep
->getLength());
1025 ConstantInt
*MLen
= dyn_cast
<ConstantInt
>(M
->getLength());
1026 if (!MDepLen
|| !MLen
|| MDepLen
->getZExtValue() < MLen
->getZExtValue())
1029 AliasAnalysis
&AA
= LookupAliasAnalysis();
1031 // Verify that the copied-from memory doesn't change in between the two
1032 // transfers. For example, in:
1036 // It would be invalid to transform the second memcpy into memcpy(c <- b).
1038 // TODO: If the code between M and MDep is transparent to the destination "c",
1039 // then we could still perform the xform by moving M up to the first memcpy.
1041 // NOTE: This is conservative, it will stop on any read from the source loc,
1042 // not just the defining memcpy.
1043 MemDepResult SourceDep
=
1044 MD
->getPointerDependencyFrom(MemoryLocation::getForSource(MDep
), false,
1045 M
->getIterator(), M
->getParent());
1046 if (!SourceDep
.isClobber() || SourceDep
.getInst() != MDep
)
1049 // If the dest of the second might alias the source of the first, then the
1050 // source and dest might overlap. We still want to eliminate the intermediate
1051 // value, but we have to generate a memmove instead of memcpy.
1052 bool UseMemMove
= false;
1053 if (!AA
.isNoAlias(MemoryLocation::getForDest(M
),
1054 MemoryLocation::getForSource(MDep
)))
1057 // If all checks passed, then we can transform M.
1058 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy->memcpy src:\n"
1059 << *MDep
<< '\n' << *M
<< '\n');
1061 // TODO: Is this worth it if we're creating a less aligned memcpy? For
1062 // example we could be moving from movaps -> movq on x86.
1063 IRBuilder
<> Builder(M
);
1065 Builder
.CreateMemMove(M
->getRawDest(), M
->getDestAlignment(),
1066 MDep
->getRawSource(), MDep
->getSourceAlignment(),
1067 M
->getLength(), M
->isVolatile());
1069 Builder
.CreateMemCpy(M
->getRawDest(), M
->getDestAlignment(),
1070 MDep
->getRawSource(), MDep
->getSourceAlignment(),
1071 M
->getLength(), M
->isVolatile());
1073 // Remove the instruction we're replacing.
1074 MD
->removeInstruction(M
);
1075 M
->eraseFromParent();
1080 /// We've found that the (upward scanning) memory dependence of \p MemCpy is
1081 /// \p MemSet. Try to simplify \p MemSet to only set the trailing bytes that
1082 /// weren't copied over by \p MemCpy.
1084 /// In other words, transform:
1086 /// memset(dst, c, dst_size);
1087 /// memcpy(dst, src, src_size);
1091 /// memcpy(dst, src, src_size);
1092 /// memset(dst + src_size, c, dst_size <= src_size ? 0 : dst_size - src_size);
1094 bool MemCpyOptPass::processMemSetMemCpyDependence(MemCpyInst
*MemCpy
,
1095 MemSetInst
*MemSet
) {
1096 // We can only transform memset/memcpy with the same destination.
1097 if (MemSet
->getDest() != MemCpy
->getDest())
1100 // Check that there are no other dependencies on the memset destination.
1101 MemDepResult DstDepInfo
=
1102 MD
->getPointerDependencyFrom(MemoryLocation::getForDest(MemSet
), false,
1103 MemCpy
->getIterator(), MemCpy
->getParent());
1104 if (DstDepInfo
.getInst() != MemSet
)
1107 // Use the same i8* dest as the memcpy, killing the memset dest if different.
1108 Value
*Dest
= MemCpy
->getRawDest();
1109 Value
*DestSize
= MemSet
->getLength();
1110 Value
*SrcSize
= MemCpy
->getLength();
1112 // By default, create an unaligned memset.
1114 // If Dest is aligned, and SrcSize is constant, use the minimum alignment
1116 const unsigned DestAlign
=
1117 std::max(MemSet
->getDestAlignment(), MemCpy
->getDestAlignment());
1119 if (ConstantInt
*SrcSizeC
= dyn_cast
<ConstantInt
>(SrcSize
))
1120 Align
= MinAlign(SrcSizeC
->getZExtValue(), DestAlign
);
1122 IRBuilder
<> Builder(MemCpy
);
1124 // If the sizes have different types, zext the smaller one.
1125 if (DestSize
->getType() != SrcSize
->getType()) {
1126 if (DestSize
->getType()->getIntegerBitWidth() >
1127 SrcSize
->getType()->getIntegerBitWidth())
1128 SrcSize
= Builder
.CreateZExt(SrcSize
, DestSize
->getType());
1130 DestSize
= Builder
.CreateZExt(DestSize
, SrcSize
->getType());
1133 Value
*Ule
= Builder
.CreateICmpULE(DestSize
, SrcSize
);
1134 Value
*SizeDiff
= Builder
.CreateSub(DestSize
, SrcSize
);
1135 Value
*MemsetLen
= Builder
.CreateSelect(
1136 Ule
, ConstantInt::getNullValue(DestSize
->getType()), SizeDiff
);
1137 Builder
.CreateMemSet(
1138 Builder
.CreateGEP(Dest
->getType()->getPointerElementType(), Dest
,
1140 MemSet
->getOperand(1), MemsetLen
, Align
);
1142 MD
->removeInstruction(MemSet
);
1143 MemSet
->eraseFromParent();
1147 /// Determine whether the instruction has undefined content for the given Size,
1148 /// either because it was freshly alloca'd or started its lifetime.
1149 static bool hasUndefContents(Instruction
*I
, ConstantInt
*Size
) {
1150 if (isa
<AllocaInst
>(I
))
1153 if (IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(I
))
1154 if (II
->getIntrinsicID() == Intrinsic::lifetime_start
)
1155 if (ConstantInt
*LTSize
= dyn_cast
<ConstantInt
>(II
->getArgOperand(0)))
1156 if (LTSize
->getZExtValue() >= Size
->getZExtValue())
1162 /// Transform memcpy to memset when its source was just memset.
1163 /// In other words, turn:
1165 /// memset(dst1, c, dst1_size);
1166 /// memcpy(dst2, dst1, dst2_size);
1170 /// memset(dst1, c, dst1_size);
1171 /// memset(dst2, c, dst2_size);
1173 /// When dst2_size <= dst1_size.
1175 /// The \p MemCpy must have a Constant length.
1176 bool MemCpyOptPass::performMemCpyToMemSetOptzn(MemCpyInst
*MemCpy
,
1177 MemSetInst
*MemSet
) {
1178 AliasAnalysis
&AA
= LookupAliasAnalysis();
1180 // Make sure that memcpy(..., memset(...), ...), that is we are memsetting and
1181 // memcpying from the same address. Otherwise it is hard to reason about.
1182 if (!AA
.isMustAlias(MemSet
->getRawDest(), MemCpy
->getRawSource()))
1185 // A known memset size is required.
1186 ConstantInt
*MemSetSize
= dyn_cast
<ConstantInt
>(MemSet
->getLength());
1190 // Make sure the memcpy doesn't read any more than what the memset wrote.
1191 // Don't worry about sizes larger than i64.
1192 ConstantInt
*CopySize
= cast
<ConstantInt
>(MemCpy
->getLength());
1193 if (CopySize
->getZExtValue() > MemSetSize
->getZExtValue()) {
1194 // If the memcpy is larger than the memset, but the memory was undef prior
1195 // to the memset, we can just ignore the tail. Technically we're only
1196 // interested in the bytes from MemSetSize..CopySize here, but as we can't
1197 // easily represent this location, we use the full 0..CopySize range.
1198 MemoryLocation MemCpyLoc
= MemoryLocation::getForSource(MemCpy
);
1199 MemDepResult DepInfo
= MD
->getPointerDependencyFrom(
1200 MemCpyLoc
, true, MemSet
->getIterator(), MemSet
->getParent());
1201 if (DepInfo
.isDef() && hasUndefContents(DepInfo
.getInst(), CopySize
))
1202 CopySize
= MemSetSize
;
1207 IRBuilder
<> Builder(MemCpy
);
1208 Builder
.CreateMemSet(MemCpy
->getRawDest(), MemSet
->getOperand(1),
1209 CopySize
, MemCpy
->getDestAlignment());
1213 /// Perform simplification of memcpy's. If we have memcpy A
1214 /// which copies X to Y, and memcpy B which copies Y to Z, then we can rewrite
1215 /// B to be a memcpy from X to Z (or potentially a memmove, depending on
1216 /// circumstances). This allows later passes to remove the first memcpy
1218 bool MemCpyOptPass::processMemCpy(MemCpyInst
*M
) {
1219 // We can only optimize non-volatile memcpy's.
1220 if (M
->isVolatile()) return false;
1222 // If the source and destination of the memcpy are the same, then zap it.
1223 if (M
->getSource() == M
->getDest()) {
1224 MD
->removeInstruction(M
);
1225 M
->eraseFromParent();
1229 // If copying from a constant, try to turn the memcpy into a memset.
1230 if (GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(M
->getSource()))
1231 if (GV
->isConstant() && GV
->hasDefinitiveInitializer())
1232 if (Value
*ByteVal
= isBytewiseValue(GV
->getInitializer())) {
1233 IRBuilder
<> Builder(M
);
1234 Builder
.CreateMemSet(M
->getRawDest(), ByteVal
, M
->getLength(),
1235 M
->getDestAlignment(), false);
1236 MD
->removeInstruction(M
);
1237 M
->eraseFromParent();
1242 MemDepResult DepInfo
= MD
->getDependency(M
);
1244 // Try to turn a partially redundant memset + memcpy into
1245 // memcpy + smaller memset. We don't need the memcpy size for this.
1246 if (DepInfo
.isClobber())
1247 if (MemSetInst
*MDep
= dyn_cast
<MemSetInst
>(DepInfo
.getInst()))
1248 if (processMemSetMemCpyDependence(M
, MDep
))
1251 // The optimizations after this point require the memcpy size.
1252 ConstantInt
*CopySize
= dyn_cast
<ConstantInt
>(M
->getLength());
1253 if (!CopySize
) return false;
1255 // There are four possible optimizations we can do for memcpy:
1256 // a) memcpy-memcpy xform which exposes redundance for DSE.
1257 // b) call-memcpy xform for return slot optimization.
1258 // c) memcpy from freshly alloca'd space or space that has just started its
1259 // lifetime copies undefined data, and we can therefore eliminate the
1260 // memcpy in favor of the data that was already at the destination.
1261 // d) memcpy from a just-memset'd source can be turned into memset.
1262 if (DepInfo
.isClobber()) {
1263 if (CallInst
*C
= dyn_cast
<CallInst
>(DepInfo
.getInst())) {
1264 // FIXME: Can we pass in either of dest/src alignment here instead
1265 // of conservatively taking the minimum?
1266 unsigned Align
= MinAlign(M
->getDestAlignment(), M
->getSourceAlignment());
1267 if (performCallSlotOptzn(M
, M
->getDest(), M
->getSource(),
1268 CopySize
->getZExtValue(), Align
,
1270 MD
->removeInstruction(M
);
1271 M
->eraseFromParent();
1277 MemoryLocation SrcLoc
= MemoryLocation::getForSource(M
);
1278 MemDepResult SrcDepInfo
= MD
->getPointerDependencyFrom(
1279 SrcLoc
, true, M
->getIterator(), M
->getParent());
1281 if (SrcDepInfo
.isClobber()) {
1282 if (MemCpyInst
*MDep
= dyn_cast
<MemCpyInst
>(SrcDepInfo
.getInst()))
1283 return processMemCpyMemCpyDependence(M
, MDep
);
1284 } else if (SrcDepInfo
.isDef()) {
1285 if (hasUndefContents(SrcDepInfo
.getInst(), CopySize
)) {
1286 MD
->removeInstruction(M
);
1287 M
->eraseFromParent();
1293 if (SrcDepInfo
.isClobber())
1294 if (MemSetInst
*MDep
= dyn_cast
<MemSetInst
>(SrcDepInfo
.getInst()))
1295 if (performMemCpyToMemSetOptzn(M
, MDep
)) {
1296 MD
->removeInstruction(M
);
1297 M
->eraseFromParent();
1305 /// Transforms memmove calls to memcpy calls when the src/dst are guaranteed
1307 bool MemCpyOptPass::processMemMove(MemMoveInst
*M
) {
1308 AliasAnalysis
&AA
= LookupAliasAnalysis();
1310 if (!TLI
->has(LibFunc_memmove
))
1313 // See if the pointers alias.
1314 if (!AA
.isNoAlias(MemoryLocation::getForDest(M
),
1315 MemoryLocation::getForSource(M
)))
1318 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Optimizing memmove -> memcpy: " << *M
1321 // If not, then we know we can transform this.
1322 Type
*ArgTys
[3] = { M
->getRawDest()->getType(),
1323 M
->getRawSource()->getType(),
1324 M
->getLength()->getType() };
1325 M
->setCalledFunction(Intrinsic::getDeclaration(M
->getModule(),
1326 Intrinsic::memcpy
, ArgTys
));
1328 // MemDep may have over conservative information about this instruction, just
1329 // conservatively flush it from the cache.
1330 MD
->removeInstruction(M
);
1336 /// This is called on every byval argument in call sites.
1337 bool MemCpyOptPass::processByValArgument(CallSite CS
, unsigned ArgNo
) {
1338 const DataLayout
&DL
= CS
.getCaller()->getParent()->getDataLayout();
1339 // Find out what feeds this byval argument.
1340 Value
*ByValArg
= CS
.getArgument(ArgNo
);
1341 Type
*ByValTy
= cast
<PointerType
>(ByValArg
->getType())->getElementType();
1342 uint64_t ByValSize
= DL
.getTypeAllocSize(ByValTy
);
1343 MemDepResult DepInfo
= MD
->getPointerDependencyFrom(
1344 MemoryLocation(ByValArg
, LocationSize::precise(ByValSize
)), true,
1345 CS
.getInstruction()->getIterator(), CS
.getInstruction()->getParent());
1346 if (!DepInfo
.isClobber())
1349 // If the byval argument isn't fed by a memcpy, ignore it. If it is fed by
1350 // a memcpy, see if we can byval from the source of the memcpy instead of the
1352 MemCpyInst
*MDep
= dyn_cast
<MemCpyInst
>(DepInfo
.getInst());
1353 if (!MDep
|| MDep
->isVolatile() ||
1354 ByValArg
->stripPointerCasts() != MDep
->getDest())
1357 // The length of the memcpy must be larger or equal to the size of the byval.
1358 ConstantInt
*C1
= dyn_cast
<ConstantInt
>(MDep
->getLength());
1359 if (!C1
|| C1
->getValue().getZExtValue() < ByValSize
)
1362 // Get the alignment of the byval. If the call doesn't specify the alignment,
1363 // then it is some target specific value that we can't know.
1364 unsigned ByValAlign
= CS
.getParamAlignment(ArgNo
);
1365 if (ByValAlign
== 0) return false;
1367 // If it is greater than the memcpy, then we check to see if we can force the
1368 // source of the memcpy to the alignment we need. If we fail, we bail out.
1369 AssumptionCache
&AC
= LookupAssumptionCache();
1370 DominatorTree
&DT
= LookupDomTree();
1371 if (MDep
->getSourceAlignment() < ByValAlign
&&
1372 getOrEnforceKnownAlignment(MDep
->getSource(), ByValAlign
, DL
,
1373 CS
.getInstruction(), &AC
, &DT
) < ByValAlign
)
1376 // The address space of the memcpy source must match the byval argument
1377 if (MDep
->getSource()->getType()->getPointerAddressSpace() !=
1378 ByValArg
->getType()->getPointerAddressSpace())
1381 // Verify that the copied-from memory doesn't change in between the memcpy and
1386 // It would be invalid to transform the second memcpy into foo(*b).
1388 // NOTE: This is conservative, it will stop on any read from the source loc,
1389 // not just the defining memcpy.
1390 MemDepResult SourceDep
= MD
->getPointerDependencyFrom(
1391 MemoryLocation::getForSource(MDep
), false,
1392 CS
.getInstruction()->getIterator(), MDep
->getParent());
1393 if (!SourceDep
.isClobber() || SourceDep
.getInst() != MDep
)
1396 Value
*TmpCast
= MDep
->getSource();
1397 if (MDep
->getSource()->getType() != ByValArg
->getType())
1398 TmpCast
= new BitCastInst(MDep
->getSource(), ByValArg
->getType(),
1399 "tmpcast", CS
.getInstruction());
1401 LLVM_DEBUG(dbgs() << "MemCpyOptPass: Forwarding memcpy to byval:\n"
1402 << " " << *MDep
<< "\n"
1403 << " " << *CS
.getInstruction() << "\n");
1405 // Otherwise we're good! Update the byval argument.
1406 CS
.setArgument(ArgNo
, TmpCast
);
1411 /// Executes one iteration of MemCpyOptPass.
1412 bool MemCpyOptPass::iterateOnFunction(Function
&F
) {
1413 bool MadeChange
= false;
1415 DominatorTree
&DT
= LookupDomTree();
1417 // Walk all instruction in the function.
1418 for (BasicBlock
&BB
: F
) {
1419 // Skip unreachable blocks. For example processStore assumes that an
1420 // instruction in a BB can't be dominated by a later instruction in the
1421 // same BB (which is a scenario that can happen for an unreachable BB that
1422 // has itself as a predecessor).
1423 if (!DT
.isReachableFromEntry(&BB
))
1426 for (BasicBlock::iterator BI
= BB
.begin(), BE
= BB
.end(); BI
!= BE
;) {
1427 // Avoid invalidating the iterator.
1428 Instruction
*I
= &*BI
++;
1430 bool RepeatInstruction
= false;
1432 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(I
))
1433 MadeChange
|= processStore(SI
, BI
);
1434 else if (MemSetInst
*M
= dyn_cast
<MemSetInst
>(I
))
1435 RepeatInstruction
= processMemSet(M
, BI
);
1436 else if (MemCpyInst
*M
= dyn_cast
<MemCpyInst
>(I
))
1437 RepeatInstruction
= processMemCpy(M
);
1438 else if (MemMoveInst
*M
= dyn_cast
<MemMoveInst
>(I
))
1439 RepeatInstruction
= processMemMove(M
);
1440 else if (auto CS
= CallSite(I
)) {
1441 for (unsigned i
= 0, e
= CS
.arg_size(); i
!= e
; ++i
)
1442 if (CS
.isByValArgument(i
))
1443 MadeChange
|= processByValArgument(CS
, i
);
1446 // Reprocess the instruction if desired.
1447 if (RepeatInstruction
) {
1448 if (BI
!= BB
.begin())
1458 PreservedAnalyses
MemCpyOptPass::run(Function
&F
, FunctionAnalysisManager
&AM
) {
1459 auto &MD
= AM
.getResult
<MemoryDependenceAnalysis
>(F
);
1460 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1462 auto LookupAliasAnalysis
= [&]() -> AliasAnalysis
& {
1463 return AM
.getResult
<AAManager
>(F
);
1465 auto LookupAssumptionCache
= [&]() -> AssumptionCache
& {
1466 return AM
.getResult
<AssumptionAnalysis
>(F
);
1468 auto LookupDomTree
= [&]() -> DominatorTree
& {
1469 return AM
.getResult
<DominatorTreeAnalysis
>(F
);
1472 bool MadeChange
= runImpl(F
, &MD
, &TLI
, LookupAliasAnalysis
,
1473 LookupAssumptionCache
, LookupDomTree
);
1475 return PreservedAnalyses::all();
1477 PreservedAnalyses PA
;
1478 PA
.preserveSet
<CFGAnalyses
>();
1479 PA
.preserve
<GlobalsAA
>();
1480 PA
.preserve
<MemoryDependenceAnalysis
>();
1484 bool MemCpyOptPass::runImpl(
1485 Function
&F
, MemoryDependenceResults
*MD_
, TargetLibraryInfo
*TLI_
,
1486 std::function
<AliasAnalysis
&()> LookupAliasAnalysis_
,
1487 std::function
<AssumptionCache
&()> LookupAssumptionCache_
,
1488 std::function
<DominatorTree
&()> LookupDomTree_
) {
1489 bool MadeChange
= false;
1492 LookupAliasAnalysis
= std::move(LookupAliasAnalysis_
);
1493 LookupAssumptionCache
= std::move(LookupAssumptionCache_
);
1494 LookupDomTree
= std::move(LookupDomTree_
);
1496 // If we don't have at least memset and memcpy, there is little point of doing
1497 // anything here. These are required by a freestanding implementation, so if
1498 // even they are disabled, there is no point in trying hard.
1499 if (!TLI
->has(LibFunc_memset
) || !TLI
->has(LibFunc_memcpy
))
1503 if (!iterateOnFunction(F
))
1512 /// This is the main transformation entry point for a function.
1513 bool MemCpyOptLegacyPass::runOnFunction(Function
&F
) {
1514 if (skipFunction(F
))
1517 auto *MD
= &getAnalysis
<MemoryDependenceWrapperPass
>().getMemDep();
1518 auto *TLI
= &getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI();
1520 auto LookupAliasAnalysis
= [this]() -> AliasAnalysis
& {
1521 return getAnalysis
<AAResultsWrapperPass
>().getAAResults();
1523 auto LookupAssumptionCache
= [this, &F
]() -> AssumptionCache
& {
1524 return getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
);
1526 auto LookupDomTree
= [this]() -> DominatorTree
& {
1527 return getAnalysis
<DominatorTreeWrapperPass
>().getDomTree();
1530 return Impl
.runImpl(F
, MD
, TLI
, LookupAliasAnalysis
, LookupAssumptionCache
,