1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopInfo.h"
17 #include "llvm/Analysis/MemoryBuiltins.h"
18 #include "llvm/Analysis/MemoryLocation.h"
19 #include "llvm/Analysis/ScalarEvolution.h"
20 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
29 static bool isAligned(const Value
*Base
, const APInt
&Offset
, Align Alignment
,
30 const DataLayout
&DL
) {
31 Align BA
= Base
->getPointerAlignment(DL
);
32 const APInt
APAlign(Offset
.getBitWidth(), Alignment
.value());
33 assert(APAlign
.isPowerOf2() && "must be a power of 2!");
34 return BA
>= Alignment
&& !(Offset
& (APAlign
- 1));
37 /// Test if V is always a pointer to allocated and suitably aligned memory for
38 /// a simple load or store.
39 static bool isDereferenceableAndAlignedPointer(
40 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
41 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
42 const TargetLibraryInfo
*TLI
, SmallPtrSetImpl
<const Value
*> &Visited
,
44 assert(V
->getType()->isPointerTy() && "Base must be pointer");
50 // Already visited? Bail out, we've likely hit unreachable code.
51 if (!Visited
.insert(V
).second
)
54 // Note that it is not safe to speculate into a malloc'd region because
55 // malloc may return null.
57 // For GEPs, determine if the indexing lands within the allocated object.
58 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
59 const Value
*Base
= GEP
->getPointerOperand();
61 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
62 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
63 !Offset
.urem(APInt(Offset
.getBitWidth(), Alignment
.value()))
67 // If the base pointer is dereferenceable for Offset+Size bytes, then the
68 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
69 // pointer is aligned to Align bytes, and the Offset is divisible by Align
70 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
71 // aligned to Align bytes.
73 // Offset and Size may have different bit widths if we have visited an
74 // addrspacecast, so we can't do arithmetic directly on the APInt values.
75 return isDereferenceableAndAlignedPointer(
76 Base
, Alignment
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()), DL
,
77 CtxI
, AC
, DT
, TLI
, Visited
, MaxDepth
);
80 // bitcast instructions are no-ops as far as dereferenceability is concerned.
81 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
)) {
82 if (BC
->getSrcTy()->isPointerTy())
83 return isDereferenceableAndAlignedPointer(
84 BC
->getOperand(0), Alignment
, Size
, DL
, CtxI
, AC
, DT
, TLI
,
88 // Recurse into both hands of select.
89 if (const SelectInst
*Sel
= dyn_cast
<SelectInst
>(V
)) {
90 return isDereferenceableAndAlignedPointer(Sel
->getTrueValue(), Alignment
,
91 Size
, DL
, CtxI
, AC
, DT
, TLI
,
93 isDereferenceableAndAlignedPointer(Sel
->getFalseValue(), Alignment
,
94 Size
, DL
, CtxI
, AC
, DT
, TLI
,
98 bool CheckForNonNull
, CheckForFreed
;
99 APInt
KnownDerefBytes(Size
.getBitWidth(),
100 V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
,
102 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
104 if (!CheckForNonNull
|| isKnownNonZero(V
, DL
, 0, AC
, CtxI
, DT
)) {
105 // As we recursed through GEPs to get here, we've incrementally checked
106 // that each step advanced by a multiple of the alignment. If our base is
107 // properly aligned, then the original offset accessed must also be.
108 APInt
Offset(DL
.getTypeStoreSizeInBits(V
->getType()), 0);
109 return isAligned(V
, Offset
, Alignment
, DL
);
112 /// TODO refactor this function to be able to search independently for
113 /// Dereferencability and Alignment requirements.
116 if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
117 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
118 return isDereferenceableAndAlignedPointer(RP
, Alignment
, Size
, DL
, CtxI
,
119 AC
, DT
, TLI
, Visited
, MaxDepth
);
121 // If we have a call we can't recurse through, check to see if this is an
122 // allocation function for which we can establish an minimum object size.
123 // Such a minimum object size is analogous to a deref_or_null attribute in
124 // that we still need to prove the result non-null at point of use.
125 // NOTE: We can only use the object size as a base fact as we a) need to
126 // prove alignment too, and b) don't want the compile time impact of a
127 // separate recursive walk.
129 // TODO: It may be okay to round to align, but that would imply that
130 // accessing slightly out of bounds was legal, and we're currently
131 // inconsistent about that. For the moment, be conservative.
132 Opts
.RoundToAlign
= false;
133 Opts
.NullIsUnknownSize
= true;
135 if (getObjectSize(V
, ObjSize
, DL
, TLI
, Opts
)) {
136 APInt
KnownDerefBytes(Size
.getBitWidth(), ObjSize
);
137 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
138 isKnownNonZero(V
, DL
, 0, AC
, CtxI
, DT
) && !V
->canBeFreed()) {
139 // As we recursed through GEPs to get here, we've incrementally
140 // checked that each step advanced by a multiple of the alignment. If
141 // our base is properly aligned, then the original offset accessed
143 APInt
Offset(DL
.getTypeStoreSizeInBits(V
->getType()), 0);
144 return isAligned(V
, Offset
, Alignment
, DL
);
149 // For gc.relocate, look through relocations
150 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
151 return isDereferenceableAndAlignedPointer(RelocateInst
->getDerivedPtr(),
152 Alignment
, Size
, DL
, CtxI
, AC
, DT
,
153 TLI
, Visited
, MaxDepth
);
155 if (const AddrSpaceCastOperator
*ASC
= dyn_cast
<AddrSpaceCastOperator
>(V
))
156 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Alignment
,
157 Size
, DL
, CtxI
, AC
, DT
, TLI
,
161 /// Look through assumes to see if both dereferencability and alignment can
162 /// be provent by an assume
163 RetainedKnowledge AlignRK
;
164 RetainedKnowledge DerefRK
;
165 if (getKnowledgeForValue(
166 V
, {Attribute::Dereferenceable
, Attribute::Alignment
}, AC
,
167 [&](RetainedKnowledge RK
, Instruction
*Assume
, auto) {
168 if (!isValidAssumeForContext(Assume
, CtxI
))
170 if (RK
.AttrKind
== Attribute::Alignment
)
171 AlignRK
= std::max(AlignRK
, RK
);
172 if (RK
.AttrKind
== Attribute::Dereferenceable
)
173 DerefRK
= std::max(DerefRK
, RK
);
174 if (AlignRK
&& DerefRK
&& AlignRK
.ArgValue
>= Alignment
.value() &&
175 DerefRK
.ArgValue
>= Size
.getZExtValue())
176 return true; // We have found what we needed so we stop looking
177 return false; // Other assumes may have better information. so
183 // If we don't know, assume the worst.
187 bool llvm::isDereferenceableAndAlignedPointer(
188 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
189 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
190 const TargetLibraryInfo
*TLI
) {
191 // Note: At the moment, Size can be zero. This ends up being interpreted as
192 // a query of whether [Base, V] is dereferenceable and V is aligned (since
193 // that's what the implementation happened to do). It's unclear if this is
194 // the desired semantic, but at least SelectionDAG does exercise this case.
196 SmallPtrSet
<const Value
*, 32> Visited
;
197 return ::isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
,
198 DT
, TLI
, Visited
, 16);
201 bool llvm::isDereferenceableAndAlignedPointer(
202 const Value
*V
, Type
*Ty
, Align Alignment
, const DataLayout
&DL
,
203 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
204 const TargetLibraryInfo
*TLI
) {
205 // For unsized types or scalable vectors we don't know exactly how many bytes
206 // are dereferenced, so bail out.
207 if (!Ty
->isSized() || Ty
->isScalableTy())
210 // When dereferenceability information is provided by a dereferenceable
211 // attribute, we know exactly how many bytes are dereferenceable. If we can
212 // determine the exact offset to the attributed variable, we can use that
215 APInt
AccessSize(DL
.getPointerTypeSizeInBits(V
->getType()),
216 DL
.getTypeStoreSize(Ty
));
217 return isDereferenceableAndAlignedPointer(V
, Alignment
, AccessSize
, DL
, CtxI
,
221 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
222 const DataLayout
&DL
,
223 const Instruction
*CtxI
,
225 const DominatorTree
*DT
,
226 const TargetLibraryInfo
*TLI
) {
227 return isDereferenceableAndAlignedPointer(V
, Ty
, Align(1), DL
, CtxI
, AC
, DT
,
231 /// Test if A and B will obviously have the same value.
233 /// This includes recognizing that %t0 and %t1 will have the same
234 /// value in code like this:
236 /// %t0 = getelementptr \@a, 0, 3
237 /// store i32 0, i32* %t0
238 /// %t1 = getelementptr \@a, 0, 3
239 /// %t2 = load i32* %t1
242 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
243 // Test if the values are trivially equivalent.
247 // Test if the values come from identical arithmetic instructions.
248 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
249 // this function is only used when one address use dominates the
250 // other, which means that they'll always either have the same
251 // value or one of them will have an undefined value.
252 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
253 isa
<GetElementPtrInst
>(A
))
254 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
255 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
258 // Otherwise they may not be equivalent.
262 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst
*LI
, Loop
*L
,
265 AssumptionCache
*AC
) {
266 auto &DL
= LI
->getModule()->getDataLayout();
267 Value
*Ptr
= LI
->getPointerOperand();
269 APInt
EltSize(DL
.getIndexTypeSizeInBits(Ptr
->getType()),
270 DL
.getTypeStoreSize(LI
->getType()).getFixedValue());
271 const Align Alignment
= LI
->getAlign();
273 Instruction
*HeaderFirstNonPHI
= L
->getHeader()->getFirstNonPHI();
275 // If given a uniform (i.e. non-varying) address, see if we can prove the
276 // access is safe within the loop w/o needing predication.
277 if (L
->isLoopInvariant(Ptr
))
278 return isDereferenceableAndAlignedPointer(Ptr
, Alignment
, EltSize
, DL
,
279 HeaderFirstNonPHI
, AC
, &DT
);
281 // Otherwise, check to see if we have a repeating access pattern where we can
282 // prove that all accesses are well aligned and dereferenceable.
283 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(Ptr
));
284 if (!AddRec
|| AddRec
->getLoop() != L
|| !AddRec
->isAffine())
286 auto* Step
= dyn_cast
<SCEVConstant
>(AddRec
->getStepRecurrence(SE
));
290 auto TC
= SE
.getSmallConstantMaxTripCount(L
);
294 // TODO: Handle overlapping accesses.
295 // We should be computing AccessSize as (TC - 1) * Step + EltSize.
296 if (EltSize
.sgt(Step
->getAPInt()))
299 // Compute the total access size for access patterns with unit stride and
300 // patterns with gaps. For patterns with unit stride, Step and EltSize are the
302 // For patterns with gaps (i.e. non unit stride), we are
303 // accessing EltSize bytes at every Step.
304 APInt AccessSize
= TC
* Step
->getAPInt();
306 assert(SE
.isLoopInvariant(AddRec
->getStart(), L
) &&
307 "implied by addrec definition");
308 Value
*Base
= nullptr;
309 if (auto *StartS
= dyn_cast
<SCEVUnknown
>(AddRec
->getStart())) {
310 Base
= StartS
->getValue();
311 } else if (auto *StartS
= dyn_cast
<SCEVAddExpr
>(AddRec
->getStart())) {
312 // Handle (NewBase + offset) as start value.
313 const auto *Offset
= dyn_cast
<SCEVConstant
>(StartS
->getOperand(0));
314 const auto *NewBase
= dyn_cast
<SCEVUnknown
>(StartS
->getOperand(1));
315 if (StartS
->getNumOperands() == 2 && Offset
&& NewBase
) {
316 // For the moment, restrict ourselves to the case where the offset is a
317 // multiple of the requested alignment and the base is aligned.
318 // TODO: generalize if a case found which warrants
319 if (Offset
->getAPInt().urem(Alignment
.value()) != 0)
321 Base
= NewBase
->getValue();
322 bool Overflow
= false;
323 AccessSize
= AccessSize
.uadd_ov(Offset
->getAPInt(), Overflow
);
332 // For the moment, restrict ourselves to the case where the access size is a
333 // multiple of the requested alignment and the base is aligned.
334 // TODO: generalize if a case found which warrants
335 if (EltSize
.urem(Alignment
.value()) != 0)
337 return isDereferenceableAndAlignedPointer(Base
, Alignment
, AccessSize
, DL
,
338 HeaderFirstNonPHI
, AC
, &DT
);
341 /// Check if executing a load of this pointer value cannot trap.
343 /// If DT and ScanFrom are specified this method performs context-sensitive
344 /// analysis and returns true if it is safe to load immediately before ScanFrom.
346 /// If it is not obviously safe to load from the specified pointer, we do
347 /// a quick local scan of the basic block containing \c ScanFrom, to determine
348 /// if the address is already accessed.
350 /// This uses the pointee type to determine how many bytes need to be safe to
351 /// load from the pointer.
352 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Align Alignment
, APInt
&Size
,
353 const DataLayout
&DL
,
354 Instruction
*ScanFrom
,
356 const DominatorTree
*DT
,
357 const TargetLibraryInfo
*TLI
) {
358 // If DT is not specified we can't make context-sensitive query
359 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
360 if (isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
, DT
,
367 if (Size
.getBitWidth() > 64)
369 const uint64_t LoadSize
= Size
.getZExtValue();
371 // Otherwise, be a little bit aggressive by scanning the local block where we
372 // want to check to see if the pointer is already being loaded or stored
373 // from/to. If so, the previous load or store would have already trapped,
374 // so there is no harm doing an extra load (also, CSE will later eliminate
375 // the load entirely).
376 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
377 E
= ScanFrom
->getParent()->begin();
379 // We can at least always strip pointer casts even though we can't use the
381 V
= V
->stripPointerCasts();
386 // If we see a free or a call which may write to memory (i.e. which might do
387 // a free) the pointer could be marked invalid.
388 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
389 !isa
<LifetimeIntrinsic
>(BBI
) && !isa
<DbgInfoIntrinsic
>(BBI
))
395 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
396 // Ignore volatile loads. The execution of a volatile load cannot
397 // be used to prove an address is backed by regular memory; it can,
398 // for example, point to an MMIO register.
399 if (LI
->isVolatile())
401 AccessedPtr
= LI
->getPointerOperand();
402 AccessedTy
= LI
->getType();
403 AccessedAlign
= LI
->getAlign();
404 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
405 // Ignore volatile stores (see comment for loads).
406 if (SI
->isVolatile())
408 AccessedPtr
= SI
->getPointerOperand();
409 AccessedTy
= SI
->getValueOperand()->getType();
410 AccessedAlign
= SI
->getAlign();
414 if (AccessedAlign
< Alignment
)
417 // Handle trivial cases.
418 if (AccessedPtr
== V
&&
419 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
422 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
423 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
429 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, Align Alignment
,
430 const DataLayout
&DL
,
431 Instruction
*ScanFrom
,
433 const DominatorTree
*DT
,
434 const TargetLibraryInfo
*TLI
) {
435 TypeSize TySize
= DL
.getTypeStoreSize(Ty
);
436 if (TySize
.isScalable())
438 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), TySize
.getFixedValue());
439 return isSafeToLoadUnconditionally(V
, Alignment
, Size
, DL
, ScanFrom
, AC
, DT
,
443 /// DefMaxInstsToScan - the default number of maximum instructions
444 /// to scan in the block, used by FindAvailableLoadedValue().
445 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
446 /// threading in part by eliminating partially redundant loads.
447 /// At that point, the value of MaxInstsToScan was already set to '6'
448 /// without documented explanation.
450 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
451 cl::desc("Use this to specify the default maximum number of instructions "
452 "to scan backward from a given instruction, when searching for "
453 "available loaded value"));
455 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
,
457 BasicBlock::iterator
&ScanFrom
,
458 unsigned MaxInstsToScan
,
459 AAResults
*AA
, bool *IsLoad
,
460 unsigned *NumScanedInst
) {
461 // Don't CSE load that is volatile or anything stronger than unordered.
462 if (!Load
->isUnordered())
465 MemoryLocation Loc
= MemoryLocation::get(Load
);
466 return findAvailablePtrLoadStore(Loc
, Load
->getType(), Load
->isAtomic(),
467 ScanBB
, ScanFrom
, MaxInstsToScan
, AA
, IsLoad
,
471 // Check if the load and the store have the same base, constant offsets and
472 // non-overlapping access ranges.
473 static bool areNonOverlapSameBaseLoadAndStore(const Value
*LoadPtr
,
475 const Value
*StorePtr
,
477 const DataLayout
&DL
) {
478 APInt
LoadOffset(DL
.getIndexTypeSizeInBits(LoadPtr
->getType()), 0);
479 APInt
StoreOffset(DL
.getIndexTypeSizeInBits(StorePtr
->getType()), 0);
480 const Value
*LoadBase
= LoadPtr
->stripAndAccumulateConstantOffsets(
481 DL
, LoadOffset
, /* AllowNonInbounds */ false);
482 const Value
*StoreBase
= StorePtr
->stripAndAccumulateConstantOffsets(
483 DL
, StoreOffset
, /* AllowNonInbounds */ false);
484 if (LoadBase
!= StoreBase
)
486 auto LoadAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(LoadTy
));
487 auto StoreAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(StoreTy
));
488 ConstantRange
LoadRange(LoadOffset
,
489 LoadOffset
+ LoadAccessSize
.toRaw());
490 ConstantRange
StoreRange(StoreOffset
,
491 StoreOffset
+ StoreAccessSize
.toRaw());
492 return LoadRange
.intersectWith(StoreRange
).isEmptySet();
495 static Value
*getAvailableLoadStore(Instruction
*Inst
, const Value
*Ptr
,
496 Type
*AccessTy
, bool AtLeastAtomic
,
497 const DataLayout
&DL
, bool *IsLoadCSE
) {
498 // If this is a load of Ptr, the loaded value is available.
499 // (This is true even if the load is volatile or atomic, although
500 // those cases are unlikely.)
501 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
502 // We can value forward from an atomic to a non-atomic, but not the
504 if (LI
->isAtomic() < AtLeastAtomic
)
507 Value
*LoadPtr
= LI
->getPointerOperand()->stripPointerCasts();
508 if (!AreEquivalentAddressValues(LoadPtr
, Ptr
))
511 if (CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
518 // If this is a store through Ptr, the value is available!
519 // (This is true even if the store is volatile or atomic, although
520 // those cases are unlikely.)
521 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
522 // We can value forward from an atomic to a non-atomic, but not the
524 if (SI
->isAtomic() < AtLeastAtomic
)
527 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
528 if (!AreEquivalentAddressValues(StorePtr
, Ptr
))
534 Value
*Val
= SI
->getValueOperand();
535 if (CastInst::isBitOrNoopPointerCastable(Val
->getType(), AccessTy
, DL
))
538 TypeSize StoreSize
= DL
.getTypeSizeInBits(Val
->getType());
539 TypeSize LoadSize
= DL
.getTypeSizeInBits(AccessTy
);
540 if (TypeSize::isKnownLE(LoadSize
, StoreSize
))
541 if (auto *C
= dyn_cast
<Constant
>(Val
))
542 return ConstantFoldLoadFromConst(C
, AccessTy
, DL
);
545 if (auto *MSI
= dyn_cast
<MemSetInst
>(Inst
)) {
546 // Don't forward from (non-atomic) memset to atomic load.
550 // Only handle constant memsets.
551 auto *Val
= dyn_cast
<ConstantInt
>(MSI
->getValue());
552 auto *Len
= dyn_cast
<ConstantInt
>(MSI
->getLength());
556 // TODO: Handle offsets.
557 Value
*Dst
= MSI
->getDest();
558 if (!AreEquivalentAddressValues(Dst
, Ptr
))
564 TypeSize LoadTypeSize
= DL
.getTypeSizeInBits(AccessTy
);
565 if (LoadTypeSize
.isScalable())
568 // Make sure the read bytes are contained in the memset.
569 uint64_t LoadSize
= LoadTypeSize
.getFixedValue();
570 if ((Len
->getValue() * 8).ult(LoadSize
))
573 APInt Splat
= LoadSize
>= 8 ? APInt::getSplat(LoadSize
, Val
->getValue())
574 : Val
->getValue().trunc(LoadSize
);
575 ConstantInt
*SplatC
= ConstantInt::get(MSI
->getContext(), Splat
);
576 if (CastInst::isBitOrNoopPointerCastable(SplatC
->getType(), AccessTy
, DL
))
585 Value
*llvm::findAvailablePtrLoadStore(
586 const MemoryLocation
&Loc
, Type
*AccessTy
, bool AtLeastAtomic
,
587 BasicBlock
*ScanBB
, BasicBlock::iterator
&ScanFrom
, unsigned MaxInstsToScan
,
588 AAResults
*AA
, bool *IsLoadCSE
, unsigned *NumScanedInst
) {
589 if (MaxInstsToScan
== 0)
590 MaxInstsToScan
= ~0U;
592 const DataLayout
&DL
= ScanBB
->getModule()->getDataLayout();
593 const Value
*StrippedPtr
= Loc
.Ptr
->stripPointerCasts();
595 while (ScanFrom
!= ScanBB
->begin()) {
596 // We must ignore debug info directives when counting (otherwise they
597 // would affect codegen).
598 Instruction
*Inst
= &*--ScanFrom
;
599 if (Inst
->isDebugOrPseudoInst())
602 // Restore ScanFrom to expected value in case next test succeeds
608 // Don't scan huge blocks.
609 if (MaxInstsToScan
-- == 0)
614 if (Value
*Available
= getAvailableLoadStore(Inst
, StrippedPtr
, AccessTy
,
615 AtLeastAtomic
, DL
, IsLoadCSE
))
618 // Try to get the store size for the type.
619 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
620 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
622 // If both StrippedPtr and StorePtr reach all the way to an alloca or
623 // global and they are different, ignore the store. This is a trivial form
624 // of alias analysis that is important for reg2mem'd code.
625 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
626 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
627 StrippedPtr
!= StorePtr
)
631 // When AA isn't available, but if the load and the store have the same
632 // base, constant offsets and non-overlapping access ranges, ignore the
633 // store. This is a simple form of alias analysis that is used by the
634 // inliner. FIXME: use BasicAA if possible.
635 if (areNonOverlapSameBaseLoadAndStore(
636 Loc
.Ptr
, AccessTy
, SI
->getPointerOperand(),
637 SI
->getValueOperand()->getType(), DL
))
640 // If we have alias analysis and it says the store won't modify the
641 // loaded value, ignore the store.
642 if (!isModSet(AA
->getModRefInfo(SI
, Loc
)))
646 // Otherwise the store that may or may not alias the pointer, bail out.
651 // If this is some other instruction that may clobber Ptr, bail out.
652 if (Inst
->mayWriteToMemory()) {
653 // If alias analysis claims that it really won't modify the load,
655 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, Loc
)))
658 // May modify the pointer, bail out.
664 // Got to the start of the block, we didn't find it, but are done for this
669 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, AAResults
&AA
,
671 unsigned MaxInstsToScan
) {
672 const DataLayout
&DL
= Load
->getModule()->getDataLayout();
673 Value
*StrippedPtr
= Load
->getPointerOperand()->stripPointerCasts();
674 BasicBlock
*ScanBB
= Load
->getParent();
675 Type
*AccessTy
= Load
->getType();
676 bool AtLeastAtomic
= Load
->isAtomic();
678 if (!Load
->isUnordered())
681 // Try to find an available value first, and delay expensive alias analysis
682 // queries until later.
683 Value
*Available
= nullptr;;
684 SmallVector
<Instruction
*> MustNotAliasInsts
;
685 for (Instruction
&Inst
: make_range(++Load
->getReverseIterator(),
687 if (Inst
.isDebugOrPseudoInst())
690 if (MaxInstsToScan
-- == 0)
693 Available
= getAvailableLoadStore(&Inst
, StrippedPtr
, AccessTy
,
694 AtLeastAtomic
, DL
, IsLoadCSE
);
698 if (Inst
.mayWriteToMemory())
699 MustNotAliasInsts
.push_back(&Inst
);
702 // If we found an available value, ensure that the instructions in between
703 // did not modify the memory location.
705 MemoryLocation Loc
= MemoryLocation::get(Load
);
706 for (Instruction
*Inst
: MustNotAliasInsts
)
707 if (isModSet(AA
.getModRefInfo(Inst
, Loc
)))
714 bool llvm::canReplacePointersIfEqual(Value
*A
, Value
*B
, const DataLayout
&DL
,
716 Type
*Ty
= A
->getType();
717 assert(Ty
== B
->getType() && Ty
->isPointerTy() &&
718 "values must have matching pointer types");
720 // NOTE: The checks in the function are incomplete and currently miss illegal
721 // cases! The current implementation is a starting point and the
722 // implementation should be made stricter over time.
723 if (auto *C
= dyn_cast
<Constant
>(B
)) {
724 // Do not allow replacing a pointer with a constant pointer, unless it is
725 // either null or at least one byte is dereferenceable.
726 APInt
OneByte(DL
.getPointerTypeSizeInBits(Ty
), 1);
727 return C
->isNullValue() ||
728 isDereferenceableAndAlignedPointer(B
, Align(1), OneByte
, DL
, CtxI
);