1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/AssumeBundleQueries.h"
16 #include "llvm/Analysis/LoopAccessAnalysis.h"
17 #include "llvm/Analysis/LoopInfo.h"
18 #include "llvm/Analysis/MemoryBuiltins.h"
19 #include "llvm/Analysis/MemoryLocation.h"
20 #include "llvm/Analysis/ScalarEvolution.h"
21 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
22 #include "llvm/Analysis/ValueTracking.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Operator.h"
29 extern cl::opt
<bool> UseDerefAtPointSemantics
;
31 static bool isAligned(const Value
*Base
, Align Alignment
,
32 const DataLayout
&DL
) {
33 return Base
->getPointerAlignment(DL
) >= Alignment
;
36 /// Test if V is always a pointer to allocated and suitably aligned memory for
37 /// a simple load or store.
38 static bool isDereferenceableAndAlignedPointer(
39 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
40 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
41 const TargetLibraryInfo
*TLI
, SmallPtrSetImpl
<const Value
*> &Visited
,
43 assert(V
->getType()->isPointerTy() && "Base must be pointer");
49 // Already visited? Bail out, we've likely hit unreachable code.
50 if (!Visited
.insert(V
).second
)
53 // Note that it is not safe to speculate into a malloc'd region because
54 // malloc may return null.
56 // For GEPs, determine if the indexing lands within the allocated object.
57 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
58 const Value
*Base
= GEP
->getPointerOperand();
60 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
61 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
62 !Offset
.urem(APInt(Offset
.getBitWidth(), Alignment
.value()))
66 // If the base pointer is dereferenceable for Offset+Size bytes, then the
67 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
68 // pointer is aligned to Align bytes, and the Offset is divisible by Align
69 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
70 // aligned to Align bytes.
72 // Offset and Size may have different bit widths if we have visited an
73 // addrspacecast, so we can't do arithmetic directly on the APInt values.
74 return isDereferenceableAndAlignedPointer(
75 Base
, Alignment
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()), DL
,
76 CtxI
, AC
, DT
, TLI
, Visited
, MaxDepth
);
79 // bitcast instructions are no-ops as far as dereferenceability is concerned.
80 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
)) {
81 if (BC
->getSrcTy()->isPointerTy())
82 return isDereferenceableAndAlignedPointer(
83 BC
->getOperand(0), Alignment
, Size
, DL
, CtxI
, AC
, DT
, TLI
,
87 // Recurse into both hands of select.
88 if (const SelectInst
*Sel
= dyn_cast
<SelectInst
>(V
)) {
89 return isDereferenceableAndAlignedPointer(Sel
->getTrueValue(), Alignment
,
90 Size
, DL
, CtxI
, AC
, DT
, TLI
,
92 isDereferenceableAndAlignedPointer(Sel
->getFalseValue(), Alignment
,
93 Size
, DL
, CtxI
, AC
, DT
, TLI
,
97 auto IsKnownDeref
= [&]() {
98 bool CheckForNonNull
, CheckForFreed
;
99 if (!Size
.ule(V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
,
103 if (CheckForNonNull
&&
104 !isKnownNonZero(V
, SimplifyQuery(DL
, DT
, AC
, CtxI
)))
106 // When using something like !dereferenceable on a load, the
107 // dereferenceability may only be valid on a specific control-flow path.
108 // If the instruction doesn't dominate the context instruction, we're
109 // asking about dereferenceability under the assumption that the
110 // instruction has been speculated to the point of the context instruction,
111 // in which case we don't know if the dereferenceability info still holds.
112 // We don't bother handling allocas here, as they aren't speculatable
114 auto *I
= dyn_cast
<Instruction
>(V
);
115 if (I
&& !isa
<AllocaInst
>(I
))
116 return CtxI
&& isValidAssumeForContext(I
, CtxI
, DT
);
119 if (IsKnownDeref()) {
120 // As we recursed through GEPs to get here, we've incrementally checked
121 // that each step advanced by a multiple of the alignment. If our base is
122 // properly aligned, then the original offset accessed must also be.
123 return isAligned(V
, Alignment
, DL
);
126 /// TODO refactor this function to be able to search independently for
127 /// Dereferencability and Alignment requirements.
130 if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
131 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
132 return isDereferenceableAndAlignedPointer(RP
, Alignment
, Size
, DL
, CtxI
,
133 AC
, DT
, TLI
, Visited
, MaxDepth
);
135 // If we have a call we can't recurse through, check to see if this is an
136 // allocation function for which we can establish an minimum object size.
137 // Such a minimum object size is analogous to a deref_or_null attribute in
138 // that we still need to prove the result non-null at point of use.
139 // NOTE: We can only use the object size as a base fact as we a) need to
140 // prove alignment too, and b) don't want the compile time impact of a
141 // separate recursive walk.
143 // TODO: It may be okay to round to align, but that would imply that
144 // accessing slightly out of bounds was legal, and we're currently
145 // inconsistent about that. For the moment, be conservative.
146 Opts
.RoundToAlign
= false;
147 Opts
.NullIsUnknownSize
= true;
149 if (getObjectSize(V
, ObjSize
, DL
, TLI
, Opts
)) {
150 APInt
KnownDerefBytes(Size
.getBitWidth(), ObjSize
);
151 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
) &&
152 isKnownNonZero(V
, SimplifyQuery(DL
, DT
, AC
, CtxI
)) &&
154 // As we recursed through GEPs to get here, we've incrementally
155 // checked that each step advanced by a multiple of the alignment. If
156 // our base is properly aligned, then the original offset accessed
158 return isAligned(V
, Alignment
, DL
);
163 // For gc.relocate, look through relocations
164 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
165 return isDereferenceableAndAlignedPointer(RelocateInst
->getDerivedPtr(),
166 Alignment
, Size
, DL
, CtxI
, AC
, DT
,
167 TLI
, Visited
, MaxDepth
);
169 if (const AddrSpaceCastOperator
*ASC
= dyn_cast
<AddrSpaceCastOperator
>(V
))
170 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Alignment
,
171 Size
, DL
, CtxI
, AC
, DT
, TLI
,
174 if (CtxI
&& (!UseDerefAtPointSemantics
|| !V
->canBeFreed())) {
175 /// Look through assumes to see if both dereferencability and alignment can
176 /// be proven by an assume if needed.
177 RetainedKnowledge AlignRK
;
178 RetainedKnowledge DerefRK
;
179 bool IsAligned
= V
->getPointerAlignment(DL
) >= Alignment
;
180 if (getKnowledgeForValue(
181 V
, {Attribute::Dereferenceable
, Attribute::Alignment
}, AC
,
182 [&](RetainedKnowledge RK
, Instruction
*Assume
, auto) {
183 if (!isValidAssumeForContext(Assume
, CtxI
, DT
))
185 if (RK
.AttrKind
== Attribute::Alignment
)
186 AlignRK
= std::max(AlignRK
, RK
);
187 if (RK
.AttrKind
== Attribute::Dereferenceable
)
188 DerefRK
= std::max(DerefRK
, RK
);
189 IsAligned
|= AlignRK
&& AlignRK
.ArgValue
>= Alignment
.value();
190 if (IsAligned
&& DerefRK
&&
191 DerefRK
.ArgValue
>= Size
.getZExtValue())
192 return true; // We have found what we needed so we stop looking
193 return false; // Other assumes may have better information. so
199 // If we don't know, assume the worst.
203 bool llvm::isDereferenceableAndAlignedPointer(
204 const Value
*V
, Align Alignment
, const APInt
&Size
, const DataLayout
&DL
,
205 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
206 const TargetLibraryInfo
*TLI
) {
207 // Note: At the moment, Size can be zero. This ends up being interpreted as
208 // a query of whether [Base, V] is dereferenceable and V is aligned (since
209 // that's what the implementation happened to do). It's unclear if this is
210 // the desired semantic, but at least SelectionDAG does exercise this case.
212 SmallPtrSet
<const Value
*, 32> Visited
;
213 return ::isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
,
214 DT
, TLI
, Visited
, 16);
217 bool llvm::isDereferenceableAndAlignedPointer(
218 const Value
*V
, Type
*Ty
, Align Alignment
, const DataLayout
&DL
,
219 const Instruction
*CtxI
, AssumptionCache
*AC
, const DominatorTree
*DT
,
220 const TargetLibraryInfo
*TLI
) {
221 // For unsized types or scalable vectors we don't know exactly how many bytes
222 // are dereferenced, so bail out.
223 if (!Ty
->isSized() || Ty
->isScalableTy())
226 // When dereferenceability information is provided by a dereferenceable
227 // attribute, we know exactly how many bytes are dereferenceable. If we can
228 // determine the exact offset to the attributed variable, we can use that
231 APInt
AccessSize(DL
.getPointerTypeSizeInBits(V
->getType()),
232 DL
.getTypeStoreSize(Ty
));
233 return isDereferenceableAndAlignedPointer(V
, Alignment
, AccessSize
, DL
, CtxI
,
237 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
238 const DataLayout
&DL
,
239 const Instruction
*CtxI
,
241 const DominatorTree
*DT
,
242 const TargetLibraryInfo
*TLI
) {
243 return isDereferenceableAndAlignedPointer(V
, Ty
, Align(1), DL
, CtxI
, AC
, DT
,
247 /// Test if A and B will obviously have the same value.
249 /// This includes recognizing that %t0 and %t1 will have the same
250 /// value in code like this:
252 /// %t0 = getelementptr \@a, 0, 3
253 /// store i32 0, i32* %t0
254 /// %t1 = getelementptr \@a, 0, 3
255 /// %t2 = load i32* %t1
258 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
259 // Test if the values are trivially equivalent.
263 // Test if the values come from identical arithmetic instructions.
264 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
265 // this function is only used when one address use dominates the
266 // other, which means that they'll always either have the same
267 // value or one of them will have an undefined value.
268 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
269 isa
<GetElementPtrInst
>(A
))
270 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
271 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
274 // Otherwise they may not be equivalent.
278 bool llvm::isDereferenceableAndAlignedInLoop(
279 LoadInst
*LI
, Loop
*L
, ScalarEvolution
&SE
, DominatorTree
&DT
,
280 AssumptionCache
*AC
, SmallVectorImpl
<const SCEVPredicate
*> *Predicates
) {
281 const Align Alignment
= LI
->getAlign();
282 auto &DL
= LI
->getDataLayout();
283 Value
*Ptr
= LI
->getPointerOperand();
284 APInt
EltSize(DL
.getIndexTypeSizeInBits(Ptr
->getType()),
285 DL
.getTypeStoreSize(LI
->getType()).getFixedValue());
287 // If given a uniform (i.e. non-varying) address, see if we can prove the
288 // access is safe within the loop w/o needing predication.
289 if (L
->isLoopInvariant(Ptr
))
290 return isDereferenceableAndAlignedPointer(
291 Ptr
, Alignment
, EltSize
, DL
, &*L
->getHeader()->getFirstNonPHIIt(), AC
,
294 const SCEV
*PtrScev
= SE
.getSCEV(Ptr
);
295 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(PtrScev
);
297 // Check to see if we have a repeating access pattern and it's possible
298 // to prove all accesses are well aligned.
299 if (!AddRec
|| AddRec
->getLoop() != L
|| !AddRec
->isAffine())
302 auto *Step
= dyn_cast
<SCEVConstant
>(AddRec
->getStepRecurrence(SE
));
306 // For the moment, restrict ourselves to the case where the access size is a
307 // multiple of the requested alignment and the base is aligned.
308 // TODO: generalize if a case found which warrants
309 if (EltSize
.urem(Alignment
.value()) != 0)
312 // TODO: Handle overlapping accesses.
313 if (EltSize
.ugt(Step
->getAPInt().abs()))
316 const SCEV
*MaxBECount
=
317 Predicates
? SE
.getPredicatedConstantMaxBackedgeTakenCount(L
, *Predicates
)
318 : SE
.getConstantMaxBackedgeTakenCount(L
);
319 if (isa
<SCEVCouldNotCompute
>(MaxBECount
))
322 const auto &[AccessStart
, AccessEnd
] = getStartAndEndForAccess(
323 L
, PtrScev
, LI
->getType(), MaxBECount
, &SE
, nullptr);
324 if (isa
<SCEVCouldNotCompute
>(AccessStart
) ||
325 isa
<SCEVCouldNotCompute
>(AccessEnd
))
328 // Try to get the access size.
329 const SCEV
*PtrDiff
= SE
.getMinusSCEV(AccessEnd
, AccessStart
);
330 APInt MaxPtrDiff
= SE
.getUnsignedRangeMax(PtrDiff
);
332 Value
*Base
= nullptr;
334 if (const SCEVUnknown
*NewBase
= dyn_cast
<SCEVUnknown
>(AccessStart
)) {
335 Base
= NewBase
->getValue();
336 AccessSize
= MaxPtrDiff
;
337 } else if (auto *MinAdd
= dyn_cast
<SCEVAddExpr
>(AccessStart
)) {
338 if (MinAdd
->getNumOperands() != 2)
341 const auto *Offset
= dyn_cast
<SCEVConstant
>(MinAdd
->getOperand(0));
342 const auto *NewBase
= dyn_cast
<SCEVUnknown
>(MinAdd
->getOperand(1));
343 if (!Offset
|| !NewBase
)
346 // The following code below assumes the offset is unsigned, but GEP
347 // offsets are treated as signed so we can end up with a signed value
348 // here too. For example, suppose the initial PHI value is (i8 255),
349 // the offset will be treated as (i8 -1) and sign-extended to (i64 -1).
350 if (Offset
->getAPInt().isNegative())
353 // For the moment, restrict ourselves to the case where the offset is a
354 // multiple of the requested alignment and the base is aligned.
355 // TODO: generalize if a case found which warrants
356 if (Offset
->getAPInt().urem(Alignment
.value()) != 0)
359 AccessSize
= MaxPtrDiff
+ Offset
->getAPInt();
360 Base
= NewBase
->getValue();
364 Instruction
*HeaderFirstNonPHI
= &*L
->getHeader()->getFirstNonPHIIt();
365 return isDereferenceableAndAlignedPointer(Base
, Alignment
, AccessSize
, DL
,
366 HeaderFirstNonPHI
, AC
, &DT
);
369 static bool suppressSpeculativeLoadForSanitizers(const Instruction
&CtxI
) {
370 const Function
&F
= *CtxI
.getFunction();
371 // Speculative load may create a race that did not exist in the source.
372 return F
.hasFnAttribute(Attribute::SanitizeThread
) ||
373 // Speculative load may load data from dirty regions.
374 F
.hasFnAttribute(Attribute::SanitizeAddress
) ||
375 F
.hasFnAttribute(Attribute::SanitizeHWAddress
);
378 bool llvm::mustSuppressSpeculation(const LoadInst
&LI
) {
379 return !LI
.isUnordered() || suppressSpeculativeLoadForSanitizers(LI
);
382 /// Check if executing a load of this pointer value cannot trap.
384 /// If DT and ScanFrom are specified this method performs context-sensitive
385 /// analysis and returns true if it is safe to load immediately before ScanFrom.
387 /// If it is not obviously safe to load from the specified pointer, we do
388 /// a quick local scan of the basic block containing \c ScanFrom, to determine
389 /// if the address is already accessed.
391 /// This uses the pointee type to determine how many bytes need to be safe to
392 /// load from the pointer.
393 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Align Alignment
, const APInt
&Size
,
394 const DataLayout
&DL
,
395 Instruction
*ScanFrom
,
397 const DominatorTree
*DT
,
398 const TargetLibraryInfo
*TLI
) {
399 // If DT is not specified we can't make context-sensitive query
400 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
401 if (isDereferenceableAndAlignedPointer(V
, Alignment
, Size
, DL
, CtxI
, AC
, DT
,
403 // With sanitizers `Dereferenceable` is not always enough for unconditional
405 if (!ScanFrom
|| !suppressSpeculativeLoadForSanitizers(*ScanFrom
))
412 if (Size
.getBitWidth() > 64)
414 const TypeSize LoadSize
= TypeSize::getFixed(Size
.getZExtValue());
416 // Otherwise, be a little bit aggressive by scanning the local block where we
417 // want to check to see if the pointer is already being loaded or stored
418 // from/to. If so, the previous load or store would have already trapped,
419 // so there is no harm doing an extra load (also, CSE will later eliminate
420 // the load entirely).
421 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
422 E
= ScanFrom
->getParent()->begin();
424 // We can at least always strip pointer casts even though we can't use the
426 V
= V
->stripPointerCasts();
431 // If we see a free or a call which may write to memory (i.e. which might do
432 // a free) the pointer could be marked invalid.
433 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
434 !isa
<LifetimeIntrinsic
>(BBI
) && !isa
<DbgInfoIntrinsic
>(BBI
))
440 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
441 // Ignore volatile loads. The execution of a volatile load cannot
442 // be used to prove an address is backed by regular memory; it can,
443 // for example, point to an MMIO register.
444 if (LI
->isVolatile())
446 AccessedPtr
= LI
->getPointerOperand();
447 AccessedTy
= LI
->getType();
448 AccessedAlign
= LI
->getAlign();
449 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
450 // Ignore volatile stores (see comment for loads).
451 if (SI
->isVolatile())
453 AccessedPtr
= SI
->getPointerOperand();
454 AccessedTy
= SI
->getValueOperand()->getType();
455 AccessedAlign
= SI
->getAlign();
459 if (AccessedAlign
< Alignment
)
462 // Handle trivial cases.
463 if (AccessedPtr
== V
&&
464 TypeSize::isKnownLE(LoadSize
, DL
.getTypeStoreSize(AccessedTy
)))
467 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
468 TypeSize::isKnownLE(LoadSize
, DL
.getTypeStoreSize(AccessedTy
)))
474 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, Align Alignment
,
475 const DataLayout
&DL
,
476 Instruction
*ScanFrom
,
478 const DominatorTree
*DT
,
479 const TargetLibraryInfo
*TLI
) {
480 TypeSize TySize
= DL
.getTypeStoreSize(Ty
);
481 if (TySize
.isScalable())
483 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), TySize
.getFixedValue());
484 return isSafeToLoadUnconditionally(V
, Alignment
, Size
, DL
, ScanFrom
, AC
, DT
,
488 /// DefMaxInstsToScan - the default number of maximum instructions
489 /// to scan in the block, used by FindAvailableLoadedValue().
490 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
491 /// threading in part by eliminating partially redundant loads.
492 /// At that point, the value of MaxInstsToScan was already set to '6'
493 /// without documented explanation.
495 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
496 cl::desc("Use this to specify the default maximum number of instructions "
497 "to scan backward from a given instruction, when searching for "
498 "available loaded value"));
500 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, BasicBlock
*ScanBB
,
501 BasicBlock::iterator
&ScanFrom
,
502 unsigned MaxInstsToScan
,
503 BatchAAResults
*AA
, bool *IsLoad
,
504 unsigned *NumScanedInst
) {
505 // Don't CSE load that is volatile or anything stronger than unordered.
506 if (!Load
->isUnordered())
509 MemoryLocation Loc
= MemoryLocation::get(Load
);
510 return findAvailablePtrLoadStore(Loc
, Load
->getType(), Load
->isAtomic(),
511 ScanBB
, ScanFrom
, MaxInstsToScan
, AA
, IsLoad
,
515 // Check if the load and the store have the same base, constant offsets and
516 // non-overlapping access ranges.
517 static bool areNonOverlapSameBaseLoadAndStore(const Value
*LoadPtr
,
519 const Value
*StorePtr
,
521 const DataLayout
&DL
) {
522 APInt
LoadOffset(DL
.getIndexTypeSizeInBits(LoadPtr
->getType()), 0);
523 APInt
StoreOffset(DL
.getIndexTypeSizeInBits(StorePtr
->getType()), 0);
524 const Value
*LoadBase
= LoadPtr
->stripAndAccumulateConstantOffsets(
525 DL
, LoadOffset
, /* AllowNonInbounds */ false);
526 const Value
*StoreBase
= StorePtr
->stripAndAccumulateConstantOffsets(
527 DL
, StoreOffset
, /* AllowNonInbounds */ false);
528 if (LoadBase
!= StoreBase
)
530 auto LoadAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(LoadTy
));
531 auto StoreAccessSize
= LocationSize::precise(DL
.getTypeStoreSize(StoreTy
));
532 ConstantRange
LoadRange(LoadOffset
,
533 LoadOffset
+ LoadAccessSize
.toRaw());
534 ConstantRange
StoreRange(StoreOffset
,
535 StoreOffset
+ StoreAccessSize
.toRaw());
536 return LoadRange
.intersectWith(StoreRange
).isEmptySet();
539 static Value
*getAvailableLoadStore(Instruction
*Inst
, const Value
*Ptr
,
540 Type
*AccessTy
, bool AtLeastAtomic
,
541 const DataLayout
&DL
, bool *IsLoadCSE
) {
542 // If this is a load of Ptr, the loaded value is available.
543 // (This is true even if the load is volatile or atomic, although
544 // those cases are unlikely.)
545 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
)) {
546 // We can value forward from an atomic to a non-atomic, but not the
548 if (LI
->isAtomic() < AtLeastAtomic
)
551 Value
*LoadPtr
= LI
->getPointerOperand()->stripPointerCasts();
552 if (!AreEquivalentAddressValues(LoadPtr
, Ptr
))
555 if (CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
562 // If this is a store through Ptr, the value is available!
563 // (This is true even if the store is volatile or atomic, although
564 // those cases are unlikely.)
565 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
566 // We can value forward from an atomic to a non-atomic, but not the
568 if (SI
->isAtomic() < AtLeastAtomic
)
571 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
572 if (!AreEquivalentAddressValues(StorePtr
, Ptr
))
578 Value
*Val
= SI
->getValueOperand();
579 if (CastInst::isBitOrNoopPointerCastable(Val
->getType(), AccessTy
, DL
))
582 TypeSize StoreSize
= DL
.getTypeSizeInBits(Val
->getType());
583 TypeSize LoadSize
= DL
.getTypeSizeInBits(AccessTy
);
584 if (TypeSize::isKnownLE(LoadSize
, StoreSize
))
585 if (auto *C
= dyn_cast
<Constant
>(Val
))
586 return ConstantFoldLoadFromConst(C
, AccessTy
, DL
);
589 if (auto *MSI
= dyn_cast
<MemSetInst
>(Inst
)) {
590 // Don't forward from (non-atomic) memset to atomic load.
594 // Only handle constant memsets.
595 auto *Val
= dyn_cast
<ConstantInt
>(MSI
->getValue());
596 auto *Len
= dyn_cast
<ConstantInt
>(MSI
->getLength());
600 // TODO: Handle offsets.
601 Value
*Dst
= MSI
->getDest();
602 if (!AreEquivalentAddressValues(Dst
, Ptr
))
608 TypeSize LoadTypeSize
= DL
.getTypeSizeInBits(AccessTy
);
609 if (LoadTypeSize
.isScalable())
612 // Make sure the read bytes are contained in the memset.
613 uint64_t LoadSize
= LoadTypeSize
.getFixedValue();
614 if ((Len
->getValue() * 8).ult(LoadSize
))
617 APInt Splat
= LoadSize
>= 8 ? APInt::getSplat(LoadSize
, Val
->getValue())
618 : Val
->getValue().trunc(LoadSize
);
619 ConstantInt
*SplatC
= ConstantInt::get(MSI
->getContext(), Splat
);
620 if (CastInst::isBitOrNoopPointerCastable(SplatC
->getType(), AccessTy
, DL
))
629 Value
*llvm::findAvailablePtrLoadStore(
630 const MemoryLocation
&Loc
, Type
*AccessTy
, bool AtLeastAtomic
,
631 BasicBlock
*ScanBB
, BasicBlock::iterator
&ScanFrom
, unsigned MaxInstsToScan
,
632 BatchAAResults
*AA
, bool *IsLoadCSE
, unsigned *NumScanedInst
) {
633 if (MaxInstsToScan
== 0)
634 MaxInstsToScan
= ~0U;
636 const DataLayout
&DL
= ScanBB
->getDataLayout();
637 const Value
*StrippedPtr
= Loc
.Ptr
->stripPointerCasts();
639 while (ScanFrom
!= ScanBB
->begin()) {
640 // We must ignore debug info directives when counting (otherwise they
641 // would affect codegen).
642 Instruction
*Inst
= &*--ScanFrom
;
643 if (Inst
->isDebugOrPseudoInst())
646 // Restore ScanFrom to expected value in case next test succeeds
652 // Don't scan huge blocks.
653 if (MaxInstsToScan
-- == 0)
658 if (Value
*Available
= getAvailableLoadStore(Inst
, StrippedPtr
, AccessTy
,
659 AtLeastAtomic
, DL
, IsLoadCSE
))
662 // Try to get the store size for the type.
663 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
664 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
666 // If both StrippedPtr and StorePtr reach all the way to an alloca or
667 // global and they are different, ignore the store. This is a trivial form
668 // of alias analysis that is important for reg2mem'd code.
669 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
670 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
671 StrippedPtr
!= StorePtr
)
675 // When AA isn't available, but if the load and the store have the same
676 // base, constant offsets and non-overlapping access ranges, ignore the
677 // store. This is a simple form of alias analysis that is used by the
678 // inliner. FIXME: use BasicAA if possible.
679 if (areNonOverlapSameBaseLoadAndStore(
680 Loc
.Ptr
, AccessTy
, SI
->getPointerOperand(),
681 SI
->getValueOperand()->getType(), DL
))
684 // If we have alias analysis and it says the store won't modify the
685 // loaded value, ignore the store.
686 if (!isModSet(AA
->getModRefInfo(SI
, Loc
)))
690 // Otherwise the store that may or may not alias the pointer, bail out.
695 // If this is some other instruction that may clobber Ptr, bail out.
696 if (Inst
->mayWriteToMemory()) {
697 // If alias analysis claims that it really won't modify the load,
699 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, Loc
)))
702 // May modify the pointer, bail out.
708 // Got to the start of the block, we didn't find it, but are done for this
713 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
, BatchAAResults
&AA
,
715 unsigned MaxInstsToScan
) {
716 const DataLayout
&DL
= Load
->getDataLayout();
717 Value
*StrippedPtr
= Load
->getPointerOperand()->stripPointerCasts();
718 BasicBlock
*ScanBB
= Load
->getParent();
719 Type
*AccessTy
= Load
->getType();
720 bool AtLeastAtomic
= Load
->isAtomic();
722 if (!Load
->isUnordered())
725 // Try to find an available value first, and delay expensive alias analysis
726 // queries until later.
727 Value
*Available
= nullptr;
728 SmallVector
<Instruction
*> MustNotAliasInsts
;
729 for (Instruction
&Inst
: make_range(++Load
->getReverseIterator(),
731 if (Inst
.isDebugOrPseudoInst())
734 if (MaxInstsToScan
-- == 0)
737 Available
= getAvailableLoadStore(&Inst
, StrippedPtr
, AccessTy
,
738 AtLeastAtomic
, DL
, IsLoadCSE
);
742 if (Inst
.mayWriteToMemory())
743 MustNotAliasInsts
.push_back(&Inst
);
746 // If we found an available value, ensure that the instructions in between
747 // did not modify the memory location.
749 MemoryLocation Loc
= MemoryLocation::get(Load
);
750 for (Instruction
*Inst
: MustNotAliasInsts
)
751 if (isModSet(AA
.getModRefInfo(Inst
, Loc
)))
758 // Returns true if a use is either in an ICmp/PtrToInt or a Phi/Select that only
760 static bool isPointerUseReplacable(const Use
&U
) {
762 SmallVector
<const User
*> Worklist({U
.getUser()});
763 SmallPtrSet
<const User
*, 8> Visited
;
765 while (!Worklist
.empty() && --Limit
) {
766 auto *User
= Worklist
.pop_back_val();
767 if (!Visited
.insert(User
).second
)
769 if (isa
<ICmpInst
, PtrToIntInst
>(User
))
771 if (isa
<PHINode
, SelectInst
>(User
))
772 Worklist
.append(User
->user_begin(), User
->user_end());
780 // Returns true if `To` is a null pointer, constant dereferenceable pointer or
781 // both pointers have the same underlying objects.
782 static bool isPointerAlwaysReplaceable(const Value
*From
, const Value
*To
,
783 const DataLayout
&DL
) {
784 // This is not strictly correct, but we do it for now to retain important
786 if (isa
<ConstantPointerNull
>(To
))
788 if (isa
<Constant
>(To
) &&
789 isDereferenceablePointer(To
, Type::getInt8Ty(To
->getContext()), DL
))
791 return getUnderlyingObjectAggressive(From
) ==
792 getUnderlyingObjectAggressive(To
);
795 bool llvm::canReplacePointersInUseIfEqual(const Use
&U
, const Value
*To
,
796 const DataLayout
&DL
) {
797 assert(U
->getType() == To
->getType() && "values must have matching types");
798 // Not a pointer, just return true.
799 if (!To
->getType()->isPointerTy())
802 if (isPointerAlwaysReplaceable(&*U
, To
, DL
))
804 return isPointerUseReplacable(U
);
807 bool llvm::canReplacePointersIfEqual(const Value
*From
, const Value
*To
,
808 const DataLayout
&DL
) {
809 assert(From
->getType() == To
->getType() && "values must have matching types");
810 // Not a pointer, just return true.
811 if (!From
->getType()->isPointerTy())
814 return isPointerAlwaysReplaceable(From
, To
, DL
);
817 bool llvm::isDereferenceableReadOnlyLoop(
818 Loop
*L
, ScalarEvolution
*SE
, DominatorTree
*DT
, AssumptionCache
*AC
,
819 SmallVectorImpl
<const SCEVPredicate
*> *Predicates
) {
820 for (BasicBlock
*BB
: L
->blocks()) {
821 for (Instruction
&I
: *BB
) {
822 if (auto *LI
= dyn_cast
<LoadInst
>(&I
)) {
823 if (!isDereferenceableAndAlignedInLoop(LI
, L
, *SE
, *DT
, AC
, Predicates
))
825 } else if (I
.mayReadFromMemory() || I
.mayWriteToMemory() || I
.mayThrow())