1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Analysis/MemoryLocation.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
29 static bool isAligned(const Value
*Base
, const APInt
&Offset
, Align Alignment
,
30 const DataLayout
&DL
) {
31 Align BA
= Base
->getPointerAlignment(DL
);
32 return BA
>= Alignment
&& Offset
.isAligned(BA
);
35 /// Test if V is always a pointer to allocated and suitably aligned memory for
36 /// a simple load or store.
37 static bool isDereferenceableAndAlignedPointer(
38 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
39 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
40 const TargetLibraryInfo
*TLI
, SmallPtrSetImpl
<const Value
*> &Visited
,
42 assert(V
->getType()->isPointerTy() && "Base must be pointer");
48 // Already visited? Bail out, we've likely hit unreachable code.
49 if (!Visited
.insert(V
).second
)
52 // Note that it is not safe to speculate into a malloc'd region because
53 // malloc may return null.
55 // For GEPs, determine if the indexing lands within the allocated object.
56 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
57 const Value
*Base
= GEP
->getPointerOperand();
59 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
60 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
61 !Offset
.urem(APInt(Offset
.getBitWidth(), Alignment
.value()))
65 // If the base pointer is dereferenceable for Offset+Size bytes, then the
66 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
67 // pointer is aligned to Align bytes, and the Offset is divisible by Align
68 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
69 // aligned to Align bytes.
71 // Offset and Size may have different bit widths if we have visited an
72 // addrspacecast, so we can't do arithmetic directly on the APInt values.
73 return isDereferenceableAndAlignedPointer(
74 Base
, Alignment
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()), DL
,
75 CtxI
, AC
, DT
, TLI
, Visited
, MaxDepth
);
78 // bitcast instructions are no-ops as far as dereferenceability is concerned.
79 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
)) {
80 if (BC
->getSrcTy()->isPointerTy())
81 return isDereferenceableAndAlignedPointer(
82 BC
->getOperand(0), Alignment
, Size
, DL
, CtxI
, AC
, DT
, TLI
,
86 // Recurse into both hands of select.
87 if (const SelectInst
*Sel
= dyn_cast
<SelectInst
>(V
)) {
88 return isDereferenceableAndAlignedPointer(Sel
->getTrueValue(), Alignment
,
89 Size
, DL
, CtxI
, AC
, DT
, TLI
,
91 isDereferenceableAndAlignedPointer(Sel
->getFalseValue(), Alignment
,
92 Size
, DL
, CtxI
, AC
, DT
, TLI
,
96 bool CheckForNonNull
, CheckForFreed
;
97 APInt
KnownDerefBytes(Size
.getBitWidth(),
98 V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
,
100 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
102 if (!CheckForNonNull
||
103 isKnownNonZero(V
, SimplifyQuery(DL
, DT
, AC
, CtxI
))) {
104 // As we recursed through GEPs to get here, we've incrementally checked
105 // that each step advanced by a multiple of the alignment. If our base is
106 // properly aligned, then the original offset accessed must also be.
107 APInt
Offset(DL
.getTypeStoreSizeInBits(V
->getType()), 0);
108 return isAligned(V
, Offset
, Alignment
, DL
);
111 /// TODO refactor this function to be able to search independently for
112 /// Dereferencability and Alignment requirements.
115 if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
116 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
117 return isDereferenceableAndAlignedPointer(RP
, Alignment
, Size
, DL
, CtxI
,
118 AC
, DT
, TLI
, Visited
, MaxDepth
);
120 // If we have a call we can't recurse through, check to see if this is an
121 // allocation function for which we can establish an minimum object size.
122 // Such a minimum object size is analogous to a deref_or_null attribute in
123 // that we still need to prove the result non-null at point of use.
124 // NOTE: We can only use the object size as a base fact as we a) need to
125 // prove alignment too, and b) don't want the compile time impact of a
126 // separate recursive walk.
128 // TODO: It may be okay to round to align, but that would imply that
129 // accessing slightly out of bounds was legal, and we're currently
130 // inconsistent about that. For the moment, be conservative.
131 Opts
.RoundToAlign
= false;
132 Opts
.NullIsUnknownSize
= true;
134 if (getObjectSize(V
, ObjSize
, DL
, TLI
, Opts
)) {
135 APInt
KnownDerefBytes(Size
.getBitWidth(), ObjSize
);
136 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
137 isKnownNonZero(V
, SimplifyQuery(DL
, DT
, AC
, CtxI
)) &&
139 // As we recursed through GEPs to get here, we've incrementally
140 // checked that each step advanced by a multiple of the alignment. If
141 // our base is properly aligned, then the original offset accessed
143 APInt
Offset(DL
.getTypeStoreSizeInBits(V
->getType()), 0);
144 return isAligned(V
, Offset
, Alignment
, DL
);
149 // For gc.relocate, look through relocations
150 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
151 return isDereferenceableAndAlignedPointer(RelocateInst
->getDerivedPtr(),
152 Alignment
, Size
, DL
, CtxI
, AC
, DT
,
153 TLI
, Visited
, MaxDepth
);
155 if (const AddrSpaceCastOperator
*ASC
= dyn_cast
<AddrSpaceCastOperator
>(V
))
156 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Alignment
,
157 Size
, DL
, CtxI
, AC
, DT
, TLI
,
161 /// Look through assumes to see if both dereferencability and alignment can
162 /// be provent by an assume
163 RetainedKnowledge AlignRK
;
164 RetainedKnowledge DerefRK
;
165 if (getKnowledgeForValue(
166 V
, {Attribute::Dereferenceable
, Attribute::Alignment
}, AC
,
167 [&](RetainedKnowledge RK
, Instruction
*Assume
, auto) {
168 if (!isValidAssumeForContext(Assume
, CtxI
, DT
))
170 if (RK
.AttrKind
== Attribute::Alignment
)
171 AlignRK
= std::max(AlignRK
, RK
);
172 if (RK
.AttrKind
== Attribute::Dereferenceable
)
173 DerefRK
= std::max(DerefRK
, RK
);
174 if (AlignRK
&& DerefRK
&& AlignRK
.ArgValue
>= Alignment
.value() &&
175 DerefRK
.ArgValue
>= Size
.getZExtValue())
176 return true; // We have found what we needed so we stop looking
177 return false; // Other assumes may have better information. so
183 // If we don't know, assume the worst.
187 bool llvm::isDereferenceableAndAlignedPointer(
188 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
189 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
190 const TargetLibraryInfo
*TLI
) {
191 // Note: At the moment, Size can be zero. This ends up being interpreted as
192 // a query of whether [Base, V] is dereferenceable and V is aligned (since
193 // that's what the implementation happened to do). It's unclear if this is
194 // the desired semantic, but at least SelectionDAG does exercise this case.
196 SmallPtrSet
<const Value
*, 32> Visited
;
197 return ::isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
,
198 DT
, TLI
, Visited
, 16);
201 bool llvm::isDereferenceableAndAlignedPointer(
202 const Value
*V
, Type
*Ty
, Align Alignment
, const DataLayout
&DL
,
203 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
204 const TargetLibraryInfo
*TLI
) {
205 // For unsized types or scalable vectors we don't know exactly how many bytes
206 // are dereferenced, so bail out.
207 if (!Ty
->isSized() || Ty
->isScalableTy())
210 // When dereferenceability information is provided by a dereferenceable
211 // attribute, we know exactly how many bytes are dereferenceable. If we can
212 // determine the exact offset to the attributed variable, we can use that
215 APInt
AccessSize(DL
.getPointerTypeSizeInBits(V
->getType()),
216 DL
.getTypeStoreSize(Ty
));
217 return isDereferenceableAndAlignedPointer(V
, Alignment
, AccessSize
, DL
, CtxI
,
221 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
222 const DataLayout
&DL
,
223 const Instruction
*CtxI
,
225 const DominatorTree
*DT
,
226 const TargetLibraryInfo
*TLI
) {
227 return isDereferenceableAndAlignedPointer(V
, Ty
, Align(1), DL
, CtxI
, AC
, DT
,
231 /// Test if A and B will obviously have the same value.
233 /// This includes recognizing that %t0 and %t1 will have the same
234 /// value in code like this:
236 /// %t0 = getelementptr \@a, 0, 3
237 /// store i32 0, i32* %t0
238 /// %t1 = getelementptr \@a, 0, 3
239 /// %t2 = load i32* %t1
242 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
243 // Test if the values are trivially equivalent.
247 // Test if the values come from identical arithmetic instructions.
248 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
249 // this function is only used when one address use dominates the
250 // other, which means that they'll always either have the same
251 // value or one of them will have an undefined value.
252 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
253 isa
<GetElementPtrInst
>(A
))
254 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
255 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
258 // Otherwise they may not be equivalent.
262 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst
*LI
, Loop
*L
,
265 AssumptionCache
*AC
) {
266 auto &DL
= LI
->getDataLayout();
267 Value
*Ptr
= LI
->getPointerOperand();
269 APInt
EltSize(DL
.getIndexTypeSizeInBits(Ptr
->getType()),
270 DL
.getTypeStoreSize(LI
->getType()).getFixedValue());
271 const Align Alignment
= LI
->getAlign();
273 Instruction
*HeaderFirstNonPHI
= L
->getHeader()->getFirstNonPHI();
275 // If given a uniform (i.e. non-varying) address, see if we can prove the
276 // access is safe within the loop w/o needing predication.
277 if (L
->isLoopInvariant(Ptr
))
278 return isDereferenceableAndAlignedPointer(Ptr
, Alignment
, EltSize
, DL
,
279 HeaderFirstNonPHI
, AC
, &DT
);
281 // Otherwise, check to see if we have a repeating access pattern where we can
282 // prove that all accesses are well aligned and dereferenceable.
283 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(Ptr
));
284 if (!AddRec
|| AddRec
->getLoop() != L
|| !AddRec
->isAffine())
286 auto* Step
= dyn_cast
<SCEVConstant
>(AddRec
->getStepRecurrence(SE
));
290 auto TC
= SE
.getSmallConstantMaxTripCount(L
);
294 // TODO: Handle overlapping accesses.
295 // We should be computing AccessSize as (TC - 1) * Step + EltSize.
296 if (EltSize
.sgt(Step
->getAPInt()))
299 // Compute the total access size for access patterns with unit stride and
300 // patterns with gaps. For patterns with unit stride, Step and EltSize are the
302 // For patterns with gaps (i.e. non unit stride), we are
303 // accessing EltSize bytes at every Step.
304 APInt AccessSize
= TC
* Step
->getAPInt();
306 assert(SE
.isLoopInvariant(AddRec
->getStart(), L
) &&
307 "implied by addrec definition");
308 Value
*Base
= nullptr;
309 if (auto *StartS
= dyn_cast
<SCEVUnknown
>(AddRec
->getStart())) {
310 Base
= StartS
->getValue();
311 } else if (auto *StartS
= dyn_cast
<SCEVAddExpr
>(AddRec
->getStart())) {
312 // Handle (NewBase + offset) as start value.
313 const auto *Offset
= dyn_cast
<SCEVConstant
>(StartS
->getOperand(0));
314 const auto *NewBase
= dyn_cast
<SCEVUnknown
>(StartS
->getOperand(1));
315 if (StartS
->getNumOperands() == 2 && Offset
&& NewBase
) {
316 // The following code below assumes the offset is unsigned, but GEP
317 // offsets are treated as signed so we can end up with a signed value
318 // here too. For example, suppose the initial PHI value is (i8 255),
319 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
320 if (Offset
->getAPInt().isNegative())
323 // For the moment, restrict ourselves to the case where the offset is a
324 // multiple of the requested alignment and the base is aligned.
325 // TODO: generalize if a case found which warrants
326 if (Offset
->getAPInt().urem(Alignment
.value()) != 0)
328 Base
= NewBase
->getValue();
329 bool Overflow
= false;
330 AccessSize
= AccessSize
.uadd_ov(Offset
->getAPInt(), Overflow
);
339 // For the moment, restrict ourselves to the case where the access size is a
340 // multiple of the requested alignment and the base is aligned.
341 // TODO: generalize if a case found which warrants
342 if (EltSize
.urem(Alignment
.value()) != 0)
344 return isDereferenceableAndAlignedPointer(Base
, Alignment
, AccessSize
, DL
,
345 HeaderFirstNonPHI
, AC
, &DT
);
348 /// Check if executing a load of this pointer value cannot trap.
350 /// If DT and ScanFrom are specified this method performs context-sensitive
351 /// analysis and returns true if it is safe to load immediately before ScanFrom.
353 /// If it is not obviously safe to load from the specified pointer, we do
354 /// a quick local scan of the basic block containing \c ScanFrom, to determine
355 /// if the address is already accessed.
357 /// This uses the pointee type to determine how many bytes need to be safe to
358 /// load from the pointer.
359 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Align Alignment
, const APInt
&Size
,
360 const DataLayout
&DL
,
361 Instruction
*ScanFrom
,
363 const DominatorTree
*DT
,
364 const TargetLibraryInfo
*TLI
) {
365 // If DT is not specified we can't make context-sensitive query
366 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
367 if (isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
, DT
,
374 if (Size
.getBitWidth() > 64)
376 const TypeSize LoadSize
= TypeSize::getFixed(Size
.getZExtValue());
378 // Otherwise, be a little bit aggressive by scanning the local block where we
379 // want to check to see if the pointer is already being loaded or stored
380 // from/to. If so, the previous load or store would have already trapped,
381 // so there is no harm doing an extra load (also, CSE will later eliminate
382 // the load entirely).
383 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
384 E
= ScanFrom
->getParent()->begin();
386 // We can at least always strip pointer casts even though we can't use the
388 V
= V
->stripPointerCasts();
393 // If we see a free or a call which may write to memory (i.e. which might do
394 // a free) the pointer could be marked invalid.
395 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
396 !isa
<LifetimeIntrinsic
>(BBI
) && !isa
<DbgInfoIntrinsic
>(BBI
))
402 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
403 // Ignore volatile loads. The execution of a volatile load cannot
404 // be used to prove an address is backed by regular memory; it can,
405 // for example, point to an MMIO register.
406 if (LI
->isVolatile())
408 AccessedPtr
= LI
->getPointerOperand();
409 AccessedTy
= LI
->getType();
410 AccessedAlign
= LI
->getAlign();
411 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
412 // Ignore volatile stores (see comment for loads).
413 if (SI
->isVolatile())
415 AccessedPtr
= SI
->getPointerOperand();
416 AccessedTy
= SI
->getValueOperand()->getType();
417 AccessedAlign
= SI
->getAlign();
421 if (AccessedAlign
< Alignment
)
424 // Handle trivial cases.
425 if (AccessedPtr
== V
&&
426 TypeSize::isKnownLE(LoadSize
, DL
.getTypeStoreSize(AccessedTy
)))
429 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
430 TypeSize::isKnownLE(LoadSize
, DL
.getTypeStoreSize(AccessedTy
)))
436 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, Align Alignment
,
437 const DataLayout
&DL
,
438 Instruction
*ScanFrom
,
440 const DominatorTree
*DT
,
441 const TargetLibraryInfo
*TLI
) {
442 TypeSize TySize
= DL
.getTypeStoreSize(Ty
);
443 if (TySize
.isScalable())
445 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), TySize
.getFixedValue());
446 return isSafeToLoadUnconditionally(V
, Alignment
, Size
, DL
, ScanFrom
, AC
, DT
,
450 /// DefMaxInstsToScan - the default number of maximum instructions
451 /// to scan in the block, used by FindAvailableLoadedValue().
452 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
453 /// threading in part by eliminating partially redundant loads.
454 /// At that point, the value of MaxInstsToScan was already set to '6'
455 /// without documented explanation.
457 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
458 cl::desc("Use this to specify the default maximum number of instructions "
459 "to scan backward from a given instruction, when searching for "
460 "available loaded value"));
462 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, BasicBlock
*ScanBB
,
463 BasicBlock::iterator
&ScanFrom
,
464 unsigned MaxInstsToScan
,
465 BatchAAResults
*AA
, bool *IsLoad
,
466 unsigned *NumScanedInst
) {
467 // Don't CSE load that is volatile or anything stronger than unordered.
468 if (!Load
->isUnordered())
471 MemoryLocation Loc
= MemoryLocation::get(Load
);
472 return findAvailablePtrLoadStore(Loc
, Load
->getType(), Load
->isAtomic(),
473 ScanBB
, ScanFrom
, MaxInstsToScan
, AA
, IsLoad
,
477 // Check if the load and the store have the same base, constant offsets and
478 // non-overlapping access ranges.
479 static bool areNonOverlapSameBaseLoadAndStore(const Value
*LoadPtr
,
481 const Value
*StorePtr
,
483 const DataLayout
&DL
) {
484 APInt
LoadOffset(DL
.getIndexTypeSizeInBits(LoadPtr
->getType()), 0);
485 APInt
StoreOffset(DL
.getIndexTypeSizeInBits(StorePtr
->getType()), 0);
486 const Value
*LoadBase
= LoadPtr
->stripAndAccumulateConstantOffsets(
487 DL
, LoadOffset
, /* AllowNonInbounds */ false);
488 const Value
*StoreBase
= StorePtr
->stripAndAccumulateConstantOffsets(
489 DL
, StoreOffset
, /* AllowNonInbounds */ false);
490 if (LoadBase
!= StoreBase
)
492 auto LoadAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(LoadTy
));
493 auto StoreAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(StoreTy
));
494 ConstantRange
LoadRange(LoadOffset
,
495 LoadOffset
+ LoadAccessSize
.toRaw());
496 ConstantRange
StoreRange(StoreOffset
,
497 StoreOffset
+ StoreAccessSize
.toRaw());
498 return LoadRange
.intersectWith(StoreRange
).isEmptySet();
501 static Value
*getAvailableLoadStore(Instruction
*Inst
, const Value
*Ptr
,
502 Type
*AccessTy
, bool AtLeastAtomic
,
503 const DataLayout
&DL
, bool *IsLoadCSE
) {
504 // If this is a load of Ptr, the loaded value is available.
505 // (This is true even if the load is volatile or atomic, although
506 // those cases are unlikely.)
507 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
508 // We can value forward from an atomic to a non-atomic, but not the
510 if (LI
->isAtomic() < AtLeastAtomic
)
513 Value
*LoadPtr
= LI
->getPointerOperand()->stripPointerCasts();
514 if (!AreEquivalentAddressValues(LoadPtr
, Ptr
))
517 if (CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
524 // If this is a store through Ptr, the value is available!
525 // (This is true even if the store is volatile or atomic, although
526 // those cases are unlikely.)
527 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
528 // We can value forward from an atomic to a non-atomic, but not the
530 if (SI
->isAtomic() < AtLeastAtomic
)
533 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
534 if (!AreEquivalentAddressValues(StorePtr
, Ptr
))
540 Value
*Val
= SI
->getValueOperand();
541 if (CastInst::isBitOrNoopPointerCastable(Val
->getType(), AccessTy
, DL
))
544 TypeSize StoreSize
= DL
.getTypeSizeInBits(Val
->getType());
545 TypeSize LoadSize
= DL
.getTypeSizeInBits(AccessTy
);
546 if (TypeSize::isKnownLE(LoadSize
, StoreSize
))
547 if (auto *C
= dyn_cast
<Constant
>(Val
))
548 return ConstantFoldLoadFromConst(C
, AccessTy
, DL
);
551 if (auto *MSI
= dyn_cast
<MemSetInst
>(Inst
)) {
552 // Don't forward from (non-atomic) memset to atomic load.
556 // Only handle constant memsets.
557 auto *Val
= dyn_cast
<ConstantInt
>(MSI
->getValue());
558 auto *Len
= dyn_cast
<ConstantInt
>(MSI
->getLength());
562 // TODO: Handle offsets.
563 Value
*Dst
= MSI
->getDest();
564 if (!AreEquivalentAddressValues(Dst
, Ptr
))
570 TypeSize LoadTypeSize
= DL
.getTypeSizeInBits(AccessTy
);
571 if (LoadTypeSize
.isScalable())
574 // Make sure the read bytes are contained in the memset.
575 uint64_t LoadSize
= LoadTypeSize
.getFixedValue();
576 if ((Len
->getValue() * 8).ult(LoadSize
))
579 APInt Splat
= LoadSize
>= 8 ? APInt::getSplat(LoadSize
, Val
->getValue())
580 : Val
->getValue().trunc(LoadSize
);
581 ConstantInt
*SplatC
= ConstantInt::get(MSI
->getContext(), Splat
);
582 if (CastInst::isBitOrNoopPointerCastable(SplatC
->getType(), AccessTy
, DL
))
591 Value
*llvm::findAvailablePtrLoadStore(
592 const MemoryLocation
&Loc
, Type
*AccessTy
, bool AtLeastAtomic
,
593 BasicBlock
*ScanBB
, BasicBlock::iterator
&ScanFrom
, unsigned MaxInstsToScan
,
594 BatchAAResults
*AA
, bool *IsLoadCSE
, unsigned *NumScanedInst
) {
595 if (MaxInstsToScan
== 0)
596 MaxInstsToScan
= ~0U;
598 const DataLayout
&DL
= ScanBB
->getDataLayout();
599 const Value
*StrippedPtr
= Loc
.Ptr
->stripPointerCasts();
601 while (ScanFrom
!= ScanBB
->begin()) {
602 // We must ignore debug info directives when counting (otherwise they
603 // would affect codegen).
604 Instruction
*Inst
= &*--ScanFrom
;
605 if (Inst
->isDebugOrPseudoInst())
608 // Restore ScanFrom to expected value in case next test succeeds
614 // Don't scan huge blocks.
615 if (MaxInstsToScan
-- == 0)
620 if (Value
*Available
= getAvailableLoadStore(Inst
, StrippedPtr
, AccessTy
,
621 AtLeastAtomic
, DL
, IsLoadCSE
))
624 // Try to get the store size for the type.
625 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
626 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
628 // If both StrippedPtr and StorePtr reach all the way to an alloca or
629 // global and they are different, ignore the store. This is a trivial form
630 // of alias analysis that is important for reg2mem'd code.
631 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
632 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
633 StrippedPtr
!= StorePtr
)
637 // When AA isn't available, but if the load and the store have the same
638 // base, constant offsets and non-overlapping access ranges, ignore the
639 // store. This is a simple form of alias analysis that is used by the
640 // inliner. FIXME: use BasicAA if possible.
641 if (areNonOverlapSameBaseLoadAndStore(
642 Loc
.Ptr
, AccessTy
, SI
->getPointerOperand(),
643 SI
->getValueOperand()->getType(), DL
))
646 // If we have alias analysis and it says the store won't modify the
647 // loaded value, ignore the store.
648 if (!isModSet(AA
->getModRefInfo(SI
, Loc
)))
652 // Otherwise the store that may or may not alias the pointer, bail out.
657 // If this is some other instruction that may clobber Ptr, bail out.
658 if (Inst
->mayWriteToMemory()) {
659 // If alias analysis claims that it really won't modify the load,
661 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, Loc
)))
664 // May modify the pointer, bail out.
670 // Got to the start of the block, we didn't find it, but are done for this
675 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, BatchAAResults
&AA
,
677 unsigned MaxInstsToScan
) {
678 const DataLayout
&DL
= Load
->getDataLayout();
679 Value
*StrippedPtr
= Load
->getPointerOperand()->stripPointerCasts();
680 BasicBlock
*ScanBB
= Load
->getParent();
681 Type
*AccessTy
= Load
->getType();
682 bool AtLeastAtomic
= Load
->isAtomic();
684 if (!Load
->isUnordered())
687 // Try to find an available value first, and delay expensive alias analysis
688 // queries until later.
689 Value
*Available
= nullptr;
690 SmallVector
<Instruction
*> MustNotAliasInsts
;
691 for (Instruction
&Inst
: make_range(++Load
->getReverseIterator(),
693 if (Inst
.isDebugOrPseudoInst())
696 if (MaxInstsToScan
-- == 0)
699 Available
= getAvailableLoadStore(&Inst
, StrippedPtr
, AccessTy
,
700 AtLeastAtomic
, DL
, IsLoadCSE
);
704 if (Inst
.mayWriteToMemory())
705 MustNotAliasInsts
.push_back(&Inst
);
708 // If we found an available value, ensure that the instructions in between
709 // did not modify the memory location.
711 MemoryLocation Loc
= MemoryLocation::get(Load
);
712 for (Instruction
*Inst
: MustNotAliasInsts
)
713 if (isModSet(AA
.getModRefInfo(Inst
, Loc
)))
720 // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
722 static bool isPointerUseReplacable(const Use
&U
) {
724 SmallVector
<const User
*> Worklist({U
.getUser()});
725 SmallPtrSet
<const User
*, 8> Visited
;
727 while (!Worklist
.empty() && --Limit
) {
728 auto *User
= Worklist
.pop_back_val();
729 if (!Visited
.insert(User
).second
)
731 if (isa
<ICmpInst
, PtrToIntInst
>(User
))
733 if (isa
<PHINode
, SelectInst
>(User
))
734 Worklist
.append(User
->user_begin(), User
->user_end());
742 // Returns true if `To` is a null pointer, constant dereferenceable pointer or
743 // both pointers have the same underlying objects.
744 static bool isPointerAlwaysReplaceable(const Value
*From
, const Value
*To
,
745 const DataLayout
&DL
) {
746 // This is not strictly correct, but we do it for now to retain important
748 if (isa
<ConstantPointerNull
>(To
))
750 if (isa
<Constant
>(To
) &&
751 isDereferenceablePointer(To
, Type::getInt8Ty(To
->getContext()), DL
))
753 return getUnderlyingObjectAggressive(From
) ==
754 getUnderlyingObjectAggressive(To
);
757 bool llvm::canReplacePointersInUseIfEqual(const Use
&U
, const Value
*To
,
758 const DataLayout
&DL
) {
759 assert(U
->getType() == To
->getType() && "values must have matching types");
760 // Not a pointer, just return true.
761 if (!To
->getType()->isPointerTy())
764 if (isPointerAlwaysReplaceable(&*U
, To
, DL
))
766 return isPointerUseReplacable(U
);
769 bool llvm::canReplacePointersIfEqual(const Value
*From
, const Value
*To
,
770 const DataLayout
&DL
) {
771 assert(From
->getType() == To
->getType() && "values must have matching types");
772 // Not a pointer, just return true.
773 if (!From
->getType()->isPointerTy())
776 return isPointerAlwaysReplaceable(From
, To
, DL
);
779 bool llvm::isDereferenceableReadOnlyLoop(Loop
*L
, ScalarEvolution
*SE
,
781 AssumptionCache
*AC
) {
782 for (BasicBlock
*BB
: L
->blocks()) {
783 for (Instruction
&I
: *BB
) {
784 if (auto *LI
= dyn_cast
<LoadInst
>(&I
)) {
785 if (!isDereferenceableAndAlignedInLoop(LI
, L
, *SE
, *DT
, AC
))
787 } else if (I
.mayReadFromMemory() || I
.mayWriteToMemory() || I
.mayThrow())