1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/GlobalAlias.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Statepoint.h"
30 static MaybeAlign
getBaseAlign(const Value
*Base
, const DataLayout
&DL
) {
31 if (const MaybeAlign PA
= Base
->getPointerAlignment(DL
))
33 Type
*const Ty
= Base
->getType()->getPointerElementType();
36 return Align(DL
.getABITypeAlignment(Ty
));
39 static bool isAligned(const Value
*Base
, const APInt
&Offset
, Align Alignment
,
40 const DataLayout
&DL
) {
41 if (MaybeAlign BA
= getBaseAlign(Base
, DL
)) {
42 const APInt
APBaseAlign(Offset
.getBitWidth(), BA
->value());
43 const APInt
APAlign(Offset
.getBitWidth(), Alignment
.value());
44 assert(APAlign
.isPowerOf2() && "must be a power of 2!");
45 return APBaseAlign
.uge(APAlign
) && !(Offset
& (APAlign
- 1));
50 /// Test if V is always a pointer to allocated and suitably aligned memory for
51 /// a simple load or store.
52 static bool isDereferenceableAndAlignedPointer(
53 const Value
*V
, unsigned Align
, const APInt
&Size
, const DataLayout
&DL
,
54 const Instruction
*CtxI
, const DominatorTree
*DT
,
55 SmallPtrSetImpl
<const Value
*> &Visited
) {
56 // Already visited? Bail out, we've likely hit unreachable code.
57 if (!Visited
.insert(V
).second
)
60 // Note that it is not safe to speculate into a malloc'd region because
61 // malloc may return null.
63 // bitcast instructions are no-ops as far as dereferenceability is concerned.
64 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
))
65 return isDereferenceableAndAlignedPointer(BC
->getOperand(0), Align
, Size
,
66 DL
, CtxI
, DT
, Visited
);
68 bool CheckForNonNull
= false;
69 APInt
KnownDerefBytes(Size
.getBitWidth(),
70 V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
));
71 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
))
72 if (!CheckForNonNull
|| isKnownNonZero(V
, DL
, 0, nullptr, CtxI
, DT
)) {
73 // As we recursed through GEPs to get here, we've incrementally checked
74 // that each step advanced by a multiple of the alignment. If our base is
75 // properly aligned, then the original offset accessed must also be.
76 Type
*Ty
= V
->getType();
77 assert(Ty
->isSized() && "must be sized");
78 APInt
Offset(DL
.getTypeStoreSizeInBits(Ty
), 0);
79 return isAligned(V
, Offset
, llvm::Align(Align
), DL
);
82 // For GEPs, determine if the indexing lands within the allocated object.
83 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
84 const Value
*Base
= GEP
->getPointerOperand();
86 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
87 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
88 !Offset
.urem(APInt(Offset
.getBitWidth(), Align
)).isMinValue())
91 // If the base pointer is dereferenceable for Offset+Size bytes, then the
92 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
93 // pointer is aligned to Align bytes, and the Offset is divisible by Align
94 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
95 // aligned to Align bytes.
97 // Offset and Size may have different bit widths if we have visited an
98 // addrspacecast, so we can't do arithmetic directly on the APInt values.
99 return isDereferenceableAndAlignedPointer(
100 Base
, Align
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()),
101 DL
, CtxI
, DT
, Visited
);
104 // For gc.relocate, look through relocations
105 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
106 return isDereferenceableAndAlignedPointer(
107 RelocateInst
->getDerivedPtr(), Align
, Size
, DL
, CtxI
, DT
, Visited
);
109 if (const AddrSpaceCastInst
*ASC
= dyn_cast
<AddrSpaceCastInst
>(V
))
110 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Align
, Size
,
111 DL
, CtxI
, DT
, Visited
);
113 if (const auto *Call
= dyn_cast
<CallBase
>(V
))
114 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
115 return isDereferenceableAndAlignedPointer(RP
, Align
, Size
, DL
, CtxI
, DT
,
118 // If we don't know, assume the worst.
122 bool llvm::isDereferenceableAndAlignedPointer(const Value
*V
, unsigned Align
,
124 const DataLayout
&DL
,
125 const Instruction
*CtxI
,
126 const DominatorTree
*DT
) {
127 assert(Align
!= 0 && "expected explicitly set alignment");
128 // Note: At the moment, Size can be zero. This ends up being interpreted as
129 // a query of whether [Base, V] is dereferenceable and V is aligned (since
130 // that's what the implementation happened to do). It's unclear if this is
131 // the desired semantic, but at least SelectionDAG does exercise this case.
133 SmallPtrSet
<const Value
*, 32> Visited
;
134 return ::isDereferenceableAndAlignedPointer(V
, Align
, Size
, DL
, CtxI
, DT
,
138 bool llvm::isDereferenceableAndAlignedPointer(const Value
*V
, Type
*Ty
,
140 const DataLayout
&DL
,
141 const Instruction
*CtxI
,
142 const DominatorTree
*DT
) {
143 // When dereferenceability information is provided by a dereferenceable
144 // attribute, we know exactly how many bytes are dereferenceable. If we can
145 // determine the exact offset to the attributed variable, we can use that
148 // Require ABI alignment for loads without alignment specification
150 Align
= DL
.getABITypeAlignment(Ty
);
155 APInt
AccessSize(DL
.getIndexTypeSizeInBits(V
->getType()),
156 DL
.getTypeStoreSize(Ty
));
157 return isDereferenceableAndAlignedPointer(V
, Align
, AccessSize
,
161 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
162 const DataLayout
&DL
,
163 const Instruction
*CtxI
,
164 const DominatorTree
*DT
) {
165 return isDereferenceableAndAlignedPointer(V
, Ty
, 1, DL
, CtxI
, DT
);
168 /// Test if A and B will obviously have the same value.
170 /// This includes recognizing that %t0 and %t1 will have the same
171 /// value in code like this:
173 /// %t0 = getelementptr \@a, 0, 3
174 /// store i32 0, i32* %t0
175 /// %t1 = getelementptr \@a, 0, 3
176 /// %t2 = load i32* %t1
179 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
180 // Test if the values are trivially equivalent.
184 // Test if the values come from identical arithmetic instructions.
185 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
186 // this function is only used when one address use dominates the
187 // other, which means that they'll always either have the same
188 // value or one of them will have an undefined value.
189 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
190 isa
<GetElementPtrInst
>(A
))
191 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
192 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
195 // Otherwise they may not be equivalent.
199 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst
*LI
, Loop
*L
,
202 auto &DL
= LI
->getModule()->getDataLayout();
203 Value
*Ptr
= LI
->getPointerOperand();
205 APInt
EltSize(DL
.getIndexTypeSizeInBits(Ptr
->getType()),
206 DL
.getTypeStoreSize(LI
->getType()));
207 unsigned Align
= LI
->getAlignment();
209 Align
= DL
.getABITypeAlignment(LI
->getType());
211 Instruction
*HeaderFirstNonPHI
= L
->getHeader()->getFirstNonPHI();
213 // If given a uniform (i.e. non-varying) address, see if we can prove the
214 // access is safe within the loop w/o needing predication.
215 if (L
->isLoopInvariant(Ptr
))
216 return isDereferenceableAndAlignedPointer(Ptr
, Align
, EltSize
, DL
,
217 HeaderFirstNonPHI
, &DT
);
219 // Otherwise, check to see if we have a repeating access pattern where we can
220 // prove that all accesses are well aligned and dereferenceable.
221 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(Ptr
));
222 if (!AddRec
|| AddRec
->getLoop() != L
|| !AddRec
->isAffine())
224 auto* Step
= dyn_cast
<SCEVConstant
>(AddRec
->getStepRecurrence(SE
));
227 // TODO: generalize to access patterns which have gaps
228 if (Step
->getAPInt() != EltSize
)
231 // TODO: If the symbolic trip count has a small bound (max count), we might
232 // be able to prove safety.
233 auto TC
= SE
.getSmallConstantTripCount(L
);
237 const APInt AccessSize
= TC
* EltSize
;
239 auto *StartS
= dyn_cast
<SCEVUnknown
>(AddRec
->getStart());
242 assert(SE
.isLoopInvariant(StartS
, L
) && "implied by addrec definition");
243 Value
*Base
= StartS
->getValue();
245 // For the moment, restrict ourselves to the case where the access size is a
246 // multiple of the requested alignment and the base is aligned.
247 // TODO: generalize if a case found which warrants
248 if (EltSize
.urem(Align
) != 0)
250 return isDereferenceableAndAlignedPointer(Base
, Align
, AccessSize
,
251 DL
, HeaderFirstNonPHI
, &DT
);
254 /// Check if executing a load of this pointer value cannot trap.
256 /// If DT and ScanFrom are specified this method performs context-sensitive
257 /// analysis and returns true if it is safe to load immediately before ScanFrom.
259 /// If it is not obviously safe to load from the specified pointer, we do
260 /// a quick local scan of the basic block containing \c ScanFrom, to determine
261 /// if the address is already accessed.
263 /// This uses the pointee type to determine how many bytes need to be safe to
264 /// load from the pointer.
265 bool llvm::isSafeToLoadUnconditionally(Value
*V
, unsigned Align
, APInt
&Size
,
266 const DataLayout
&DL
,
267 Instruction
*ScanFrom
,
268 const DominatorTree
*DT
) {
269 // Zero alignment means that the load has the ABI alignment for the target
271 Align
= DL
.getABITypeAlignment(V
->getType()->getPointerElementType());
272 assert(isPowerOf2_32(Align
));
274 // If DT is not specified we can't make context-sensitive query
275 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
276 if (isDereferenceableAndAlignedPointer(V
, Align
, Size
, DL
, CtxI
, DT
))
282 if (Size
.getBitWidth() > 64)
284 const uint64_t LoadSize
= Size
.getZExtValue();
286 // Otherwise, be a little bit aggressive by scanning the local block where we
287 // want to check to see if the pointer is already being loaded or stored
288 // from/to. If so, the previous load or store would have already trapped,
289 // so there is no harm doing an extra load (also, CSE will later eliminate
290 // the load entirely).
291 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
292 E
= ScanFrom
->getParent()->begin();
294 // We can at least always strip pointer casts even though we can't use the
296 V
= V
->stripPointerCasts();
301 // If we see a free or a call which may write to memory (i.e. which might do
302 // a free) the pointer could be marked invalid.
303 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
304 !isa
<DbgInfoIntrinsic
>(BBI
))
308 unsigned AccessedAlign
;
309 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
310 // Ignore volatile loads. The execution of a volatile load cannot
311 // be used to prove an address is backed by regular memory; it can,
312 // for example, point to an MMIO register.
313 if (LI
->isVolatile())
315 AccessedPtr
= LI
->getPointerOperand();
316 AccessedAlign
= LI
->getAlignment();
317 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
318 // Ignore volatile stores (see comment for loads).
319 if (SI
->isVolatile())
321 AccessedPtr
= SI
->getPointerOperand();
322 AccessedAlign
= SI
->getAlignment();
326 Type
*AccessedTy
= AccessedPtr
->getType()->getPointerElementType();
327 if (AccessedAlign
== 0)
328 AccessedAlign
= DL
.getABITypeAlignment(AccessedTy
);
329 if (AccessedAlign
< Align
)
332 // Handle trivial cases.
333 if (AccessedPtr
== V
&&
334 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
337 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
338 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
344 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, unsigned Align
,
345 const DataLayout
&DL
,
346 Instruction
*ScanFrom
,
347 const DominatorTree
*DT
) {
348 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), DL
.getTypeStoreSize(Ty
));
349 return isSafeToLoadUnconditionally(V
, Align
, Size
, DL
, ScanFrom
, DT
);
352 /// DefMaxInstsToScan - the default number of maximum instructions
353 /// to scan in the block, used by FindAvailableLoadedValue().
354 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
355 /// threading in part by eliminating partially redundant loads.
356 /// At that point, the value of MaxInstsToScan was already set to '6'
357 /// without documented explanation.
359 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
360 cl::desc("Use this to specify the default maximum number of instructions "
361 "to scan backward from a given instruction, when searching for "
362 "available loaded value"));
364 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
,
366 BasicBlock::iterator
&ScanFrom
,
367 unsigned MaxInstsToScan
,
368 AliasAnalysis
*AA
, bool *IsLoad
,
369 unsigned *NumScanedInst
) {
370 // Don't CSE load that is volatile or anything stronger than unordered.
371 if (!Load
->isUnordered())
374 return FindAvailablePtrLoadStore(
375 Load
->getPointerOperand(), Load
->getType(), Load
->isAtomic(), ScanBB
,
376 ScanFrom
, MaxInstsToScan
, AA
, IsLoad
, NumScanedInst
);
379 Value
*llvm::FindAvailablePtrLoadStore(Value
*Ptr
, Type
*AccessTy
,
380 bool AtLeastAtomic
, BasicBlock
*ScanBB
,
381 BasicBlock::iterator
&ScanFrom
,
382 unsigned MaxInstsToScan
,
383 AliasAnalysis
*AA
, bool *IsLoadCSE
,
384 unsigned *NumScanedInst
) {
385 if (MaxInstsToScan
== 0)
386 MaxInstsToScan
= ~0U;
388 const DataLayout
&DL
= ScanBB
->getModule()->getDataLayout();
390 // Try to get the store size for the type.
391 auto AccessSize
= LocationSize::precise(DL
.getTypeStoreSize(AccessTy
));
393 Value
*StrippedPtr
= Ptr
->stripPointerCasts();
395 while (ScanFrom
!= ScanBB
->begin()) {
396 // We must ignore debug info directives when counting (otherwise they
397 // would affect codegen).
398 Instruction
*Inst
= &*--ScanFrom
;
399 if (isa
<DbgInfoIntrinsic
>(Inst
))
402 // Restore ScanFrom to expected value in case next test succeeds
408 // Don't scan huge blocks.
409 if (MaxInstsToScan
-- == 0)
413 // If this is a load of Ptr, the loaded value is available.
414 // (This is true even if the load is volatile or atomic, although
415 // those cases are unlikely.)
416 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
))
417 if (AreEquivalentAddressValues(
418 LI
->getPointerOperand()->stripPointerCasts(), StrippedPtr
) &&
419 CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
421 // We can value forward from an atomic to a non-atomic, but not the
423 if (LI
->isAtomic() < AtLeastAtomic
)
431 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
432 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
433 // If this is a store through Ptr, the value is available!
434 // (This is true even if the store is volatile or atomic, although
435 // those cases are unlikely.)
436 if (AreEquivalentAddressValues(StorePtr
, StrippedPtr
) &&
437 CastInst::isBitOrNoopPointerCastable(SI
->getValueOperand()->getType(),
440 // We can value forward from an atomic to a non-atomic, but not the
442 if (SI
->isAtomic() < AtLeastAtomic
)
447 return SI
->getOperand(0);
450 // If both StrippedPtr and StorePtr reach all the way to an alloca or
451 // global and they are different, ignore the store. This is a trivial form
452 // of alias analysis that is important for reg2mem'd code.
453 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
454 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
455 StrippedPtr
!= StorePtr
)
458 // If we have alias analysis and it says the store won't modify the loaded
459 // value, ignore the store.
460 if (AA
&& !isModSet(AA
->getModRefInfo(SI
, StrippedPtr
, AccessSize
)))
463 // Otherwise the store that may or may not alias the pointer, bail out.
468 // If this is some other instruction that may clobber Ptr, bail out.
469 if (Inst
->mayWriteToMemory()) {
470 // If alias analysis claims that it really won't modify the load,
472 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, StrippedPtr
, AccessSize
)))
475 // May modify the pointer, bail out.
481 // Got to the start of the block, we didn't find it, but are done for this