1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Analysis/MemoryLocation.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
29 static bool isAligned(const Value
*Base
, const APInt
&Offset
, Align Alignment
,
30 const DataLayout
&DL
) {
31 Align BA
= Base
->getPointerAlignment(DL
);
32 return BA
>= Alignment
&& Offset
.isAligned(BA
);
35 /// Test if V is always a pointer to allocated and suitably aligned memory for
36 /// a simple load or store.
37 static bool isDereferenceableAndAlignedPointer(
38 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
39 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
40 const TargetLibraryInfo
*TLI
, SmallPtrSetImpl
<const Value
*> &Visited
,
42 assert(V
->getType()->isPointerTy() && "Base must be pointer");
48 // Already visited? Bail out, we've likely hit unreachable code.
49 if (!Visited
.insert(V
).second
)
52 // Note that it is not safe to speculate into a malloc'd region because
53 // malloc may return null.
55 // For GEPs, determine if the indexing lands within the allocated object.
56 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
57 const Value
*Base
= GEP
->getPointerOperand();
59 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
60 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
61 !Offset
.urem(APInt(Offset
.getBitWidth(), Alignment
.value()))
65 // If the base pointer is dereferenceable for Offset+Size bytes, then the
66 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
67 // pointer is aligned to Align bytes, and the Offset is divisible by Align
68 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
69 // aligned to Align bytes.
71 // Offset and Size may have different bit widths if we have visited an
72 // addrspacecast, so we can't do arithmetic directly on the APInt values.
73 return isDereferenceableAndAlignedPointer(
74 Base
, Alignment
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()), DL
,
75 CtxI
, AC
, DT
, TLI
, Visited
, MaxDepth
);
78 // bitcast instructions are no-ops as far as dereferenceability is concerned.
79 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
)) {
80 if (BC
->getSrcTy()->isPointerTy())
81 return isDereferenceableAndAlignedPointer(
82 BC
->getOperand(0), Alignment
, Size
, DL
, CtxI
, AC
, DT
, TLI
,
86 // Recurse into both hands of select.
87 if (const SelectInst
*Sel
= dyn_cast
<SelectInst
>(V
)) {
88 return isDereferenceableAndAlignedPointer(Sel
->getTrueValue(), Alignment
,
89 Size
, DL
, CtxI
, AC
, DT
, TLI
,
91 isDereferenceableAndAlignedPointer(Sel
->getFalseValue(), Alignment
,
92 Size
, DL
, CtxI
, AC
, DT
, TLI
,
96 bool CheckForNonNull
, CheckForFreed
;
97 APInt
KnownDerefBytes(Size
.getBitWidth(),
98 V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
,
100 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
102 if (!CheckForNonNull
|| isKnownNonZero(V
, DL
, 0, AC
, CtxI
, DT
)) {
103 // As we recursed through GEPs to get here, we've incrementally checked
104 // that each step advanced by a multiple of the alignment. If our base is
105 // properly aligned, then the original offset accessed must also be.
106 APInt
Offset(DL
.getTypeStoreSizeInBits(V
->getType()), 0);
107 return isAligned(V
, Offset
, Alignment
, DL
);
110 /// TODO refactor this function to be able to search independently for
111 /// Dereferencability and Alignment requirements.
114 if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
115 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
116 return isDereferenceableAndAlignedPointer(RP
, Alignment
, Size
, DL
, CtxI
,
117 AC
, DT
, TLI
, Visited
, MaxDepth
);
119 // If we have a call we can't recurse through, check to see if this is an
120 // allocation function for which we can establish an minimum object size.
121 // Such a minimum object size is analogous to a deref_or_null attribute in
122 // that we still need to prove the result non-null at point of use.
123 // NOTE: We can only use the object size as a base fact as we a) need to
124 // prove alignment too, and b) don't want the compile time impact of a
125 // separate recursive walk.
127 // TODO: It may be okay to round to align, but that would imply that
128 // accessing slightly out of bounds was legal, and we're currently
129 // inconsistent about that. For the moment, be conservative.
130 Opts
.RoundToAlign
= false;
131 Opts
.NullIsUnknownSize
= true;
133 if (getObjectSize(V
, ObjSize
, DL
, TLI
, Opts
)) {
134 APInt
KnownDerefBytes(Size
.getBitWidth(), ObjSize
);
135 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
136 isKnownNonZero(V
, DL
, 0, AC
, CtxI
, DT
) && !V
->canBeFreed()) {
137 // As we recursed through GEPs to get here, we've incrementally
138 // checked that each step advanced by a multiple of the alignment. If
139 // our base is properly aligned, then the original offset accessed
141 APInt
Offset(DL
.getTypeStoreSizeInBits(V
->getType()), 0);
142 return isAligned(V
, Offset
, Alignment
, DL
);
147 // For gc.relocate, look through relocations
148 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
149 return isDereferenceableAndAlignedPointer(RelocateInst
->getDerivedPtr(),
150 Alignment
, Size
, DL
, CtxI
, AC
, DT
,
151 TLI
, Visited
, MaxDepth
);
153 if (const AddrSpaceCastOperator
*ASC
= dyn_cast
<AddrSpaceCastOperator
>(V
))
154 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Alignment
,
155 Size
, DL
, CtxI
, AC
, DT
, TLI
,
159 /// Look through assumes to see if both dereferencability and alignment can
160 /// be provent by an assume
161 RetainedKnowledge AlignRK
;
162 RetainedKnowledge DerefRK
;
163 if (getKnowledgeForValue(
164 V
, {Attribute::Dereferenceable
, Attribute::Alignment
}, AC
,
165 [&](RetainedKnowledge RK
, Instruction
*Assume
, auto) {
166 if (!isValidAssumeForContext(Assume
, CtxI
))
168 if (RK
.AttrKind
== Attribute::Alignment
)
169 AlignRK
= std::max(AlignRK
, RK
);
170 if (RK
.AttrKind
== Attribute::Dereferenceable
)
171 DerefRK
= std::max(DerefRK
, RK
);
172 if (AlignRK
&& DerefRK
&& AlignRK
.ArgValue
>= Alignment
.value() &&
173 DerefRK
.ArgValue
>= Size
.getZExtValue())
174 return true; // We have found what we needed so we stop looking
175 return false; // Other assumes may have better information. so
181 // If we don't know, assume the worst.
185 bool llvm::isDereferenceableAndAlignedPointer(
186 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
187 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
188 const TargetLibraryInfo
*TLI
) {
189 // Note: At the moment, Size can be zero. This ends up being interpreted as
190 // a query of whether [Base, V] is dereferenceable and V is aligned (since
191 // that's what the implementation happened to do). It's unclear if this is
192 // the desired semantic, but at least SelectionDAG does exercise this case.
194 SmallPtrSet
<const Value
*, 32> Visited
;
195 return ::isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
,
196 DT
, TLI
, Visited
, 16);
199 bool llvm::isDereferenceableAndAlignedPointer(
200 const Value
*V
, Type
*Ty
, Align Alignment
, const DataLayout
&DL
,
201 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
202 const TargetLibraryInfo
*TLI
) {
203 // For unsized types or scalable vectors we don't know exactly how many bytes
204 // are dereferenced, so bail out.
205 if (!Ty
->isSized() || Ty
->isScalableTy())
208 // When dereferenceability information is provided by a dereferenceable
209 // attribute, we know exactly how many bytes are dereferenceable. If we can
210 // determine the exact offset to the attributed variable, we can use that
213 APInt
AccessSize(DL
.getPointerTypeSizeInBits(V
->getType()),
214 DL
.getTypeStoreSize(Ty
));
215 return isDereferenceableAndAlignedPointer(V
, Alignment
, AccessSize
, DL
, CtxI
,
219 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
220 const DataLayout
&DL
,
221 const Instruction
*CtxI
,
223 const DominatorTree
*DT
,
224 const TargetLibraryInfo
*TLI
) {
225 return isDereferenceableAndAlignedPointer(V
, Ty
, Align(1), DL
, CtxI
, AC
, DT
,
229 /// Test if A and B will obviously have the same value.
231 /// This includes recognizing that %t0 and %t1 will have the same
232 /// value in code like this:
234 /// %t0 = getelementptr \@a, 0, 3
235 /// store i32 0, i32* %t0
236 /// %t1 = getelementptr \@a, 0, 3
237 /// %t2 = load i32* %t1
240 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
241 // Test if the values are trivially equivalent.
245 // Test if the values come from identical arithmetic instructions.
246 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
247 // this function is only used when one address use dominates the
248 // other, which means that they'll always either have the same
249 // value or one of them will have an undefined value.
250 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
251 isa
<GetElementPtrInst
>(A
))
252 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
253 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
256 // Otherwise they may not be equivalent.
260 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst
*LI
, Loop
*L
,
263 AssumptionCache
*AC
) {
264 auto &DL
= LI
->getModule()->getDataLayout();
265 Value
*Ptr
= LI
->getPointerOperand();
267 APInt
EltSize(DL
.getIndexTypeSizeInBits(Ptr
->getType()),
268 DL
.getTypeStoreSize(LI
->getType()).getFixedValue());
269 const Align Alignment
= LI
->getAlign();
271 Instruction
*HeaderFirstNonPHI
= L
->getHeader()->getFirstNonPHI();
273 // If given a uniform (i.e. non-varying) address, see if we can prove the
274 // access is safe within the loop w/o needing predication.
275 if (L
->isLoopInvariant(Ptr
))
276 return isDereferenceableAndAlignedPointer(Ptr
, Alignment
, EltSize
, DL
,
277 HeaderFirstNonPHI
, AC
, &DT
);
279 // Otherwise, check to see if we have a repeating access pattern where we can
280 // prove that all accesses are well aligned and dereferenceable.
281 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(Ptr
));
282 if (!AddRec
|| AddRec
->getLoop() != L
|| !AddRec
->isAffine())
284 auto* Step
= dyn_cast
<SCEVConstant
>(AddRec
->getStepRecurrence(SE
));
288 auto TC
= SE
.getSmallConstantMaxTripCount(L
);
292 // TODO: Handle overlapping accesses.
293 // We should be computing AccessSize as (TC - 1) * Step + EltSize.
294 if (EltSize
.sgt(Step
->getAPInt()))
297 // Compute the total access size for access patterns with unit stride and
298 // patterns with gaps. For patterns with unit stride, Step and EltSize are the
300 // For patterns with gaps (i.e. non unit stride), we are
301 // accessing EltSize bytes at every Step.
302 APInt AccessSize
= TC
* Step
->getAPInt();
304 assert(SE
.isLoopInvariant(AddRec
->getStart(), L
) &&
305 "implied by addrec definition");
306 Value
*Base
= nullptr;
307 if (auto *StartS
= dyn_cast
<SCEVUnknown
>(AddRec
->getStart())) {
308 Base
= StartS
->getValue();
309 } else if (auto *StartS
= dyn_cast
<SCEVAddExpr
>(AddRec
->getStart())) {
310 // Handle (NewBase + offset) as start value.
311 const auto *Offset
= dyn_cast
<SCEVConstant
>(StartS
->getOperand(0));
312 const auto *NewBase
= dyn_cast
<SCEVUnknown
>(StartS
->getOperand(1));
313 if (StartS
->getNumOperands() == 2 && Offset
&& NewBase
) {
314 // For the moment, restrict ourselves to the case where the offset is a
315 // multiple of the requested alignment and the base is aligned.
316 // TODO: generalize if a case found which warrants
317 if (Offset
->getAPInt().urem(Alignment
.value()) != 0)
319 Base
= NewBase
->getValue();
320 bool Overflow
= false;
321 AccessSize
= AccessSize
.uadd_ov(Offset
->getAPInt(), Overflow
);
330 // For the moment, restrict ourselves to the case where the access size is a
331 // multiple of the requested alignment and the base is aligned.
332 // TODO: generalize if a case found which warrants
333 if (EltSize
.urem(Alignment
.value()) != 0)
335 return isDereferenceableAndAlignedPointer(Base
, Alignment
, AccessSize
, DL
,
336 HeaderFirstNonPHI
, AC
, &DT
);
339 /// Check if executing a load of this pointer value cannot trap.
341 /// If DT and ScanFrom are specified this method performs context-sensitive
342 /// analysis and returns true if it is safe to load immediately before ScanFrom.
344 /// If it is not obviously safe to load from the specified pointer, we do
345 /// a quick local scan of the basic block containing \c ScanFrom, to determine
346 /// if the address is already accessed.
348 /// This uses the pointee type to determine how many bytes need to be safe to
349 /// load from the pointer.
350 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Align Alignment
, APInt
&Size
,
351 const DataLayout
&DL
,
352 Instruction
*ScanFrom
,
354 const DominatorTree
*DT
,
355 const TargetLibraryInfo
*TLI
) {
356 // If DT is not specified we can't make context-sensitive query
357 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
358 if (isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
, DT
,
365 if (Size
.getBitWidth() > 64)
367 const TypeSize LoadSize
= TypeSize::getFixed(Size
.getZExtValue());
369 // Otherwise, be a little bit aggressive by scanning the local block where we
370 // want to check to see if the pointer is already being loaded or stored
371 // from/to. If so, the previous load or store would have already trapped,
372 // so there is no harm doing an extra load (also, CSE will later eliminate
373 // the load entirely).
374 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
375 E
= ScanFrom
->getParent()->begin();
377 // We can at least always strip pointer casts even though we can't use the
379 V
= V
->stripPointerCasts();
384 // If we see a free or a call which may write to memory (i.e. which might do
385 // a free) the pointer could be marked invalid.
386 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
387 !isa
<LifetimeIntrinsic
>(BBI
) && !isa
<DbgInfoIntrinsic
>(BBI
))
393 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
394 // Ignore volatile loads. The execution of a volatile load cannot
395 // be used to prove an address is backed by regular memory; it can,
396 // for example, point to an MMIO register.
397 if (LI
->isVolatile())
399 AccessedPtr
= LI
->getPointerOperand();
400 AccessedTy
= LI
->getType();
401 AccessedAlign
= LI
->getAlign();
402 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
403 // Ignore volatile stores (see comment for loads).
404 if (SI
->isVolatile())
406 AccessedPtr
= SI
->getPointerOperand();
407 AccessedTy
= SI
->getValueOperand()->getType();
408 AccessedAlign
= SI
->getAlign();
412 if (AccessedAlign
< Alignment
)
415 // Handle trivial cases.
416 if (AccessedPtr
== V
&&
417 TypeSize::isKnownLE(LoadSize
, DL
.getTypeStoreSize(AccessedTy
)))
420 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
421 TypeSize::isKnownLE(LoadSize
, DL
.getTypeStoreSize(AccessedTy
)))
427 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, Align Alignment
,
428 const DataLayout
&DL
,
429 Instruction
*ScanFrom
,
431 const DominatorTree
*DT
,
432 const TargetLibraryInfo
*TLI
) {
433 TypeSize TySize
= DL
.getTypeStoreSize(Ty
);
434 if (TySize
.isScalable())
436 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), TySize
.getFixedValue());
437 return isSafeToLoadUnconditionally(V
, Alignment
, Size
, DL
, ScanFrom
, AC
, DT
,
441 /// DefMaxInstsToScan - the default number of maximum instructions
442 /// to scan in the block, used by FindAvailableLoadedValue().
443 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
444 /// threading in part by eliminating partially redundant loads.
445 /// At that point, the value of MaxInstsToScan was already set to '6'
446 /// without documented explanation.
448 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
449 cl::desc("Use this to specify the default maximum number of instructions "
450 "to scan backward from a given instruction, when searching for "
451 "available loaded value"));
453 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, BasicBlock
*ScanBB
,
454 BasicBlock::iterator
&ScanFrom
,
455 unsigned MaxInstsToScan
,
456 BatchAAResults
*AA
, bool *IsLoad
,
457 unsigned *NumScanedInst
) {
458 // Don't CSE load that is volatile or anything stronger than unordered.
459 if (!Load
->isUnordered())
462 MemoryLocation Loc
= MemoryLocation::get(Load
);
463 return findAvailablePtrLoadStore(Loc
, Load
->getType(), Load
->isAtomic(),
464 ScanBB
, ScanFrom
, MaxInstsToScan
, AA
, IsLoad
,
468 // Check if the load and the store have the same base, constant offsets and
469 // non-overlapping access ranges.
470 static bool areNonOverlapSameBaseLoadAndStore(const Value
*LoadPtr
,
472 const Value
*StorePtr
,
474 const DataLayout
&DL
) {
475 APInt
LoadOffset(DL
.getIndexTypeSizeInBits(LoadPtr
->getType()), 0);
476 APInt
StoreOffset(DL
.getIndexTypeSizeInBits(StorePtr
->getType()), 0);
477 const Value
*LoadBase
= LoadPtr
->stripAndAccumulateConstantOffsets(
478 DL
, LoadOffset
, /* AllowNonInbounds */ false);
479 const Value
*StoreBase
= StorePtr
->stripAndAccumulateConstantOffsets(
480 DL
, StoreOffset
, /* AllowNonInbounds */ false);
481 if (LoadBase
!= StoreBase
)
483 auto LoadAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(LoadTy
));
484 auto StoreAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(StoreTy
));
485 ConstantRange
LoadRange(LoadOffset
,
486 LoadOffset
+ LoadAccessSize
.toRaw());
487 ConstantRange
StoreRange(StoreOffset
,
488 StoreOffset
+ StoreAccessSize
.toRaw());
489 return LoadRange
.intersectWith(StoreRange
).isEmptySet();
492 static Value
*getAvailableLoadStore(Instruction
*Inst
, const Value
*Ptr
,
493 Type
*AccessTy
, bool AtLeastAtomic
,
494 const DataLayout
&DL
, bool *IsLoadCSE
) {
495 // If this is a load of Ptr, the loaded value is available.
496 // (This is true even if the load is volatile or atomic, although
497 // those cases are unlikely.)
498 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
499 // We can value forward from an atomic to a non-atomic, but not the
501 if (LI
->isAtomic() < AtLeastAtomic
)
504 Value
*LoadPtr
= LI
->getPointerOperand()->stripPointerCasts();
505 if (!AreEquivalentAddressValues(LoadPtr
, Ptr
))
508 if (CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
515 // If this is a store through Ptr, the value is available!
516 // (This is true even if the store is volatile or atomic, although
517 // those cases are unlikely.)
518 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
519 // We can value forward from an atomic to a non-atomic, but not the
521 if (SI
->isAtomic() < AtLeastAtomic
)
524 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
525 if (!AreEquivalentAddressValues(StorePtr
, Ptr
))
531 Value
*Val
= SI
->getValueOperand();
532 if (CastInst::isBitOrNoopPointerCastable(Val
->getType(), AccessTy
, DL
))
535 TypeSize StoreSize
= DL
.getTypeSizeInBits(Val
->getType());
536 TypeSize LoadSize
= DL
.getTypeSizeInBits(AccessTy
);
537 if (TypeSize::isKnownLE(LoadSize
, StoreSize
))
538 if (auto *C
= dyn_cast
<Constant
>(Val
))
539 return ConstantFoldLoadFromConst(C
, AccessTy
, DL
);
542 if (auto *MSI
= dyn_cast
<MemSetInst
>(Inst
)) {
543 // Don't forward from (non-atomic) memset to atomic load.
547 // Only handle constant memsets.
548 auto *Val
= dyn_cast
<ConstantInt
>(MSI
->getValue());
549 auto *Len
= dyn_cast
<ConstantInt
>(MSI
->getLength());
553 // TODO: Handle offsets.
554 Value
*Dst
= MSI
->getDest();
555 if (!AreEquivalentAddressValues(Dst
, Ptr
))
561 TypeSize LoadTypeSize
= DL
.getTypeSizeInBits(AccessTy
);
562 if (LoadTypeSize
.isScalable())
565 // Make sure the read bytes are contained in the memset.
566 uint64_t LoadSize
= LoadTypeSize
.getFixedValue();
567 if ((Len
->getValue() * 8).ult(LoadSize
))
570 APInt Splat
= LoadSize
>= 8 ? APInt::getSplat(LoadSize
, Val
->getValue())
571 : Val
->getValue().trunc(LoadSize
);
572 ConstantInt
*SplatC
= ConstantInt::get(MSI
->getContext(), Splat
);
573 if (CastInst::isBitOrNoopPointerCastable(SplatC
->getType(), AccessTy
, DL
))
582 Value
*llvm::findAvailablePtrLoadStore(
583 const MemoryLocation
&Loc
, Type
*AccessTy
, bool AtLeastAtomic
,
584 BasicBlock
*ScanBB
, BasicBlock::iterator
&ScanFrom
, unsigned MaxInstsToScan
,
585 BatchAAResults
*AA
, bool *IsLoadCSE
, unsigned *NumScanedInst
) {
586 if (MaxInstsToScan
== 0)
587 MaxInstsToScan
= ~0U;
589 const DataLayout
&DL
= ScanBB
->getModule()->getDataLayout();
590 const Value
*StrippedPtr
= Loc
.Ptr
->stripPointerCasts();
592 while (ScanFrom
!= ScanBB
->begin()) {
593 // We must ignore debug info directives when counting (otherwise they
594 // would affect codegen).
595 Instruction
*Inst
= &*--ScanFrom
;
596 if (Inst
->isDebugOrPseudoInst())
599 // Restore ScanFrom to expected value in case next test succeeds
605 // Don't scan huge blocks.
606 if (MaxInstsToScan
-- == 0)
611 if (Value
*Available
= getAvailableLoadStore(Inst
, StrippedPtr
, AccessTy
,
612 AtLeastAtomic
, DL
, IsLoadCSE
))
615 // Try to get the store size for the type.
616 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
617 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
619 // If both StrippedPtr and StorePtr reach all the way to an alloca or
620 // global and they are different, ignore the store. This is a trivial form
621 // of alias analysis that is important for reg2mem'd code.
622 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
623 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
624 StrippedPtr
!= StorePtr
)
628 // When AA isn't available, but if the load and the store have the same
629 // base, constant offsets and non-overlapping access ranges, ignore the
630 // store. This is a simple form of alias analysis that is used by the
631 // inliner. FIXME: use BasicAA if possible.
632 if (areNonOverlapSameBaseLoadAndStore(
633 Loc
.Ptr
, AccessTy
, SI
->getPointerOperand(),
634 SI
->getValueOperand()->getType(), DL
))
637 // If we have alias analysis and it says the store won't modify the
638 // loaded value, ignore the store.
639 if (!isModSet(AA
->getModRefInfo(SI
, Loc
)))
643 // Otherwise the store that may or may not alias the pointer, bail out.
648 // If this is some other instruction that may clobber Ptr, bail out.
649 if (Inst
->mayWriteToMemory()) {
650 // If alias analysis claims that it really won't modify the load,
652 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, Loc
)))
655 // May modify the pointer, bail out.
661 // Got to the start of the block, we didn't find it, but are done for this
666 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, BatchAAResults
&AA
,
668 unsigned MaxInstsToScan
) {
669 const DataLayout
&DL
= Load
->getModule()->getDataLayout();
670 Value
*StrippedPtr
= Load
->getPointerOperand()->stripPointerCasts();
671 BasicBlock
*ScanBB
= Load
->getParent();
672 Type
*AccessTy
= Load
->getType();
673 bool AtLeastAtomic
= Load
->isAtomic();
675 if (!Load
->isUnordered())
678 // Try to find an available value first, and delay expensive alias analysis
679 // queries until later.
680 Value
*Available
= nullptr;
681 SmallVector
<Instruction
*> MustNotAliasInsts
;
682 for (Instruction
&Inst
: make_range(++Load
->getReverseIterator(),
684 if (Inst
.isDebugOrPseudoInst())
687 if (MaxInstsToScan
-- == 0)
690 Available
= getAvailableLoadStore(&Inst
, StrippedPtr
, AccessTy
,
691 AtLeastAtomic
, DL
, IsLoadCSE
);
695 if (Inst
.mayWriteToMemory())
696 MustNotAliasInsts
.push_back(&Inst
);
699 // If we found an available value, ensure that the instructions in between
700 // did not modify the memory location.
702 MemoryLocation Loc
= MemoryLocation::get(Load
);
703 for (Instruction
*Inst
: MustNotAliasInsts
)
704 if (isModSet(AA
.getModRefInfo(Inst
, Loc
)))
711 bool llvm::canReplacePointersIfEqual(Value
*A
, Value
*B
, const DataLayout
&DL
,
713 Type
*Ty
= A
->getType();
714 assert(Ty
== B
->getType() && Ty
->isPointerTy() &&
715 "values must have matching pointer types");
717 // NOTE: The checks in the function are incomplete and currently miss illegal
718 // cases! The current implementation is a starting point and the
719 // implementation should be made stricter over time.
720 if (auto *C
= dyn_cast
<Constant
>(B
)) {
721 // Do not allow replacing a pointer with a constant pointer, unless it is
722 // either null or at least one byte is dereferenceable.
723 APInt
OneByte(DL
.getPointerTypeSizeInBits(Ty
), 1);
724 return C
->isNullValue() ||
725 isDereferenceableAndAlignedPointer(B
, Align(1), OneByte
, DL
, CtxI
);