1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/LoopInfo.h"
16 #include "llvm/Analysis/ScalarEvolution.h"
17 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
18 #include "llvm/Analysis/ValueTracking.h"
19 #include "llvm/IR/DataLayout.h"
20 #include "llvm/IR/GlobalAlias.h"
21 #include "llvm/IR/GlobalVariable.h"
22 #include "llvm/IR/IntrinsicInst.h"
23 #include "llvm/IR/LLVMContext.h"
24 #include "llvm/IR/Module.h"
25 #include "llvm/IR/Operator.h"
26 #include "llvm/IR/Statepoint.h"
30 static bool isAligned(const Value
*Base
, const APInt
&Offset
, unsigned Align
,
31 const DataLayout
&DL
) {
32 APInt
BaseAlign(Offset
.getBitWidth(), Base
->getPointerAlignment(DL
));
35 Type
*Ty
= Base
->getType()->getPointerElementType();
38 BaseAlign
= DL
.getABITypeAlignment(Ty
);
41 APInt
Alignment(Offset
.getBitWidth(), Align
);
43 assert(Alignment
.isPowerOf2() && "must be a power of 2!");
44 return BaseAlign
.uge(Alignment
) && !(Offset
& (Alignment
-1));
47 /// Test if V is always a pointer to allocated and suitably aligned memory for
48 /// a simple load or store.
49 static bool isDereferenceableAndAlignedPointer(
50 const Value
*V
, unsigned Align
, const APInt
&Size
, const DataLayout
&DL
,
51 const Instruction
*CtxI
, const DominatorTree
*DT
,
52 SmallPtrSetImpl
<const Value
*> &Visited
) {
53 // Already visited? Bail out, we've likely hit unreachable code.
54 if (!Visited
.insert(V
).second
)
57 // Note that it is not safe to speculate into a malloc'd region because
58 // malloc may return null.
60 // bitcast instructions are no-ops as far as dereferenceability is concerned.
61 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
))
62 return isDereferenceableAndAlignedPointer(BC
->getOperand(0), Align
, Size
,
63 DL
, CtxI
, DT
, Visited
);
65 bool CheckForNonNull
= false;
66 APInt
KnownDerefBytes(Size
.getBitWidth(),
67 V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
));
68 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
))
69 if (!CheckForNonNull
|| isKnownNonZero(V
, DL
, 0, nullptr, CtxI
, DT
)) {
70 // As we recursed through GEPs to get here, we've incrementally checked
71 // that each step advanced by a multiple of the alignment. If our base is
72 // properly aligned, then the original offset accessed must also be.
73 Type
*Ty
= V
->getType();
74 assert(Ty
->isSized() && "must be sized");
75 APInt
Offset(DL
.getTypeStoreSizeInBits(Ty
), 0);
76 return isAligned(V
, Offset
, Align
, DL
);
79 // For GEPs, determine if the indexing lands within the allocated object.
80 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
81 const Value
*Base
= GEP
->getPointerOperand();
83 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
84 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
85 !Offset
.urem(APInt(Offset
.getBitWidth(), Align
)).isMinValue())
88 // If the base pointer is dereferenceable for Offset+Size bytes, then the
89 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
90 // pointer is aligned to Align bytes, and the Offset is divisible by Align
91 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
92 // aligned to Align bytes.
94 // Offset and Size may have different bit widths if we have visited an
95 // addrspacecast, so we can't do arithmetic directly on the APInt values.
96 return isDereferenceableAndAlignedPointer(
97 Base
, Align
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()),
98 DL
, CtxI
, DT
, Visited
);
101 // For gc.relocate, look through relocations
102 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
103 return isDereferenceableAndAlignedPointer(
104 RelocateInst
->getDerivedPtr(), Align
, Size
, DL
, CtxI
, DT
, Visited
);
106 if (const AddrSpaceCastInst
*ASC
= dyn_cast
<AddrSpaceCastInst
>(V
))
107 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Align
, Size
,
108 DL
, CtxI
, DT
, Visited
);
110 if (const auto *Call
= dyn_cast
<CallBase
>(V
))
111 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
112 return isDereferenceableAndAlignedPointer(RP
, Align
, Size
, DL
, CtxI
, DT
,
115 // If we don't know, assume the worst.
119 bool llvm::isDereferenceableAndAlignedPointer(const Value
*V
, unsigned Align
,
121 const DataLayout
&DL
,
122 const Instruction
*CtxI
,
123 const DominatorTree
*DT
) {
124 assert(Align
!= 0 && "expected explicitly set alignment");
125 // Note: At the moment, Size can be zero. This ends up being interpreted as
126 // a query of whether [Base, V] is dereferenceable and V is aligned (since
127 // that's what the implementation happened to do). It's unclear if this is
128 // the desired semantic, but at least SelectionDAG does exercise this case.
130 SmallPtrSet
<const Value
*, 32> Visited
;
131 return ::isDereferenceableAndAlignedPointer(V
, Align
, Size
, DL
, CtxI
, DT
,
135 bool llvm::isDereferenceableAndAlignedPointer(const Value
*V
, Type
*Ty
,
137 const DataLayout
&DL
,
138 const Instruction
*CtxI
,
139 const DominatorTree
*DT
) {
140 // When dereferenceability information is provided by a dereferenceable
141 // attribute, we know exactly how many bytes are dereferenceable. If we can
142 // determine the exact offset to the attributed variable, we can use that
145 // Require ABI alignment for loads without alignment specification
147 Align
= DL
.getABITypeAlignment(Ty
);
152 APInt
AccessSize(DL
.getIndexTypeSizeInBits(V
->getType()),
153 DL
.getTypeStoreSize(Ty
));
154 return isDereferenceableAndAlignedPointer(V
, Align
, AccessSize
,
158 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
159 const DataLayout
&DL
,
160 const Instruction
*CtxI
,
161 const DominatorTree
*DT
) {
162 return isDereferenceableAndAlignedPointer(V
, Ty
, 1, DL
, CtxI
, DT
);
165 /// Test if A and B will obviously have the same value.
167 /// This includes recognizing that %t0 and %t1 will have the same
168 /// value in code like this:
170 /// %t0 = getelementptr \@a, 0, 3
171 /// store i32 0, i32* %t0
172 /// %t1 = getelementptr \@a, 0, 3
173 /// %t2 = load i32* %t1
176 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
177 // Test if the values are trivially equivalent.
181 // Test if the values come from identical arithmetic instructions.
182 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
183 // this function is only used when one address use dominates the
184 // other, which means that they'll always either have the same
185 // value or one of them will have an undefined value.
186 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
187 isa
<GetElementPtrInst
>(A
))
188 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
189 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
192 // Otherwise they may not be equivalent.
196 bool llvm::isDereferenceableAndAlignedInLoop(LoadInst
*LI
, Loop
*L
,
199 auto &DL
= LI
->getModule()->getDataLayout();
200 Value
*Ptr
= LI
->getPointerOperand();
202 APInt
EltSize(DL
.getIndexTypeSizeInBits(Ptr
->getType()),
203 DL
.getTypeStoreSize(LI
->getType()));
204 unsigned Align
= LI
->getAlignment();
206 Align
= DL
.getABITypeAlignment(LI
->getType());
208 Instruction
*HeaderFirstNonPHI
= L
->getHeader()->getFirstNonPHI();
210 // If given a uniform (i.e. non-varying) address, see if we can prove the
211 // access is safe within the loop w/o needing predication.
212 if (L
->isLoopInvariant(Ptr
))
213 return isDereferenceableAndAlignedPointer(Ptr
, Align
, EltSize
, DL
,
214 HeaderFirstNonPHI
, &DT
);
216 // Otherwise, check to see if we have a repeating access pattern where we can
217 // prove that all accesses are well aligned and dereferenceable.
218 auto *AddRec
= dyn_cast
<SCEVAddRecExpr
>(SE
.getSCEV(Ptr
));
219 if (!AddRec
|| AddRec
->getLoop() != L
|| !AddRec
->isAffine())
221 auto* Step
= dyn_cast
<SCEVConstant
>(AddRec
->getStepRecurrence(SE
));
224 // TODO: generalize to access patterns which have gaps
225 if (Step
->getAPInt() != EltSize
)
228 // TODO: If the symbolic trip count has a small bound (max count), we might
229 // be able to prove safety.
230 auto TC
= SE
.getSmallConstantTripCount(L
);
234 const APInt AccessSize
= TC
* EltSize
;
236 auto *StartS
= dyn_cast
<SCEVUnknown
>(AddRec
->getStart());
239 assert(SE
.isLoopInvariant(StartS
, L
) && "implied by addrec definition");
240 Value
*Base
= StartS
->getValue();
242 // For the moment, restrict ourselves to the case where the access size is a
243 // multiple of the requested alignment and the base is aligned.
244 // TODO: generalize if a case found which warrants
245 if (EltSize
.urem(Align
) != 0)
247 return isDereferenceableAndAlignedPointer(Base
, Align
, AccessSize
,
248 DL
, HeaderFirstNonPHI
, &DT
);
251 /// Check if executing a load of this pointer value cannot trap.
253 /// If DT and ScanFrom are specified this method performs context-sensitive
254 /// analysis and returns true if it is safe to load immediately before ScanFrom.
256 /// If it is not obviously safe to load from the specified pointer, we do
257 /// a quick local scan of the basic block containing \c ScanFrom, to determine
258 /// if the address is already accessed.
260 /// This uses the pointee type to determine how many bytes need to be safe to
261 /// load from the pointer.
262 bool llvm::isSafeToLoadUnconditionally(Value
*V
, unsigned Align
, APInt
&Size
,
263 const DataLayout
&DL
,
264 Instruction
*ScanFrom
,
265 const DominatorTree
*DT
) {
266 // Zero alignment means that the load has the ABI alignment for the target
268 Align
= DL
.getABITypeAlignment(V
->getType()->getPointerElementType());
269 assert(isPowerOf2_32(Align
));
271 // If DT is not specified we can't make context-sensitive query
272 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
273 if (isDereferenceableAndAlignedPointer(V
, Align
, Size
, DL
, CtxI
, DT
))
279 if (Size
.getBitWidth() > 64)
281 const uint64_t LoadSize
= Size
.getZExtValue();
283 // Otherwise, be a little bit aggressive by scanning the local block where we
284 // want to check to see if the pointer is already being loaded or stored
285 // from/to. If so, the previous load or store would have already trapped,
286 // so there is no harm doing an extra load (also, CSE will later eliminate
287 // the load entirely).
288 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
289 E
= ScanFrom
->getParent()->begin();
291 // We can at least always strip pointer casts even though we can't use the
293 V
= V
->stripPointerCasts();
298 // If we see a free or a call which may write to memory (i.e. which might do
299 // a free) the pointer could be marked invalid.
300 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
301 !isa
<DbgInfoIntrinsic
>(BBI
))
305 unsigned AccessedAlign
;
306 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
307 // Ignore volatile loads. The execution of a volatile load cannot
308 // be used to prove an address is backed by regular memory; it can,
309 // for example, point to an MMIO register.
310 if (LI
->isVolatile())
312 AccessedPtr
= LI
->getPointerOperand();
313 AccessedAlign
= LI
->getAlignment();
314 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
315 // Ignore volatile stores (see comment for loads).
316 if (SI
->isVolatile())
318 AccessedPtr
= SI
->getPointerOperand();
319 AccessedAlign
= SI
->getAlignment();
323 Type
*AccessedTy
= AccessedPtr
->getType()->getPointerElementType();
324 if (AccessedAlign
== 0)
325 AccessedAlign
= DL
.getABITypeAlignment(AccessedTy
);
326 if (AccessedAlign
< Align
)
329 // Handle trivial cases.
330 if (AccessedPtr
== V
&&
331 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
334 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
335 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
341 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, unsigned Align
,
342 const DataLayout
&DL
,
343 Instruction
*ScanFrom
,
344 const DominatorTree
*DT
) {
345 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), DL
.getTypeStoreSize(Ty
));
346 return isSafeToLoadUnconditionally(V
, Align
, Size
, DL
, ScanFrom
, DT
);
349 /// DefMaxInstsToScan - the default number of maximum instructions
350 /// to scan in the block, used by FindAvailableLoadedValue().
351 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
352 /// threading in part by eliminating partially redundant loads.
353 /// At that point, the value of MaxInstsToScan was already set to '6'
354 /// without documented explanation.
356 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
357 cl::desc("Use this to specify the default maximum number of instructions "
358 "to scan backward from a given instruction, when searching for "
359 "available loaded value"));
361 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
,
363 BasicBlock::iterator
&ScanFrom
,
364 unsigned MaxInstsToScan
,
365 AliasAnalysis
*AA
, bool *IsLoad
,
366 unsigned *NumScanedInst
) {
367 // Don't CSE load that is volatile or anything stronger than unordered.
368 if (!Load
->isUnordered())
371 return FindAvailablePtrLoadStore(
372 Load
->getPointerOperand(), Load
->getType(), Load
->isAtomic(), ScanBB
,
373 ScanFrom
, MaxInstsToScan
, AA
, IsLoad
, NumScanedInst
);
376 Value
*llvm::FindAvailablePtrLoadStore(Value
*Ptr
, Type
*AccessTy
,
377 bool AtLeastAtomic
, BasicBlock
*ScanBB
,
378 BasicBlock::iterator
&ScanFrom
,
379 unsigned MaxInstsToScan
,
380 AliasAnalysis
*AA
, bool *IsLoadCSE
,
381 unsigned *NumScanedInst
) {
382 if (MaxInstsToScan
== 0)
383 MaxInstsToScan
= ~0U;
385 const DataLayout
&DL
= ScanBB
->getModule()->getDataLayout();
387 // Try to get the store size for the type.
388 auto AccessSize
= LocationSize::precise(DL
.getTypeStoreSize(AccessTy
));
390 Value
*StrippedPtr
= Ptr
->stripPointerCasts();
392 while (ScanFrom
!= ScanBB
->begin()) {
393 // We must ignore debug info directives when counting (otherwise they
394 // would affect codegen).
395 Instruction
*Inst
= &*--ScanFrom
;
396 if (isa
<DbgInfoIntrinsic
>(Inst
))
399 // Restore ScanFrom to expected value in case next test succeeds
405 // Don't scan huge blocks.
406 if (MaxInstsToScan
-- == 0)
410 // If this is a load of Ptr, the loaded value is available.
411 // (This is true even if the load is volatile or atomic, although
412 // those cases are unlikely.)
413 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
))
414 if (AreEquivalentAddressValues(
415 LI
->getPointerOperand()->stripPointerCasts(), StrippedPtr
) &&
416 CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
418 // We can value forward from an atomic to a non-atomic, but not the
420 if (LI
->isAtomic() < AtLeastAtomic
)
428 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
429 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
430 // If this is a store through Ptr, the value is available!
431 // (This is true even if the store is volatile or atomic, although
432 // those cases are unlikely.)
433 if (AreEquivalentAddressValues(StorePtr
, StrippedPtr
) &&
434 CastInst::isBitOrNoopPointerCastable(SI
->getValueOperand()->getType(),
437 // We can value forward from an atomic to a non-atomic, but not the
439 if (SI
->isAtomic() < AtLeastAtomic
)
444 return SI
->getOperand(0);
447 // If both StrippedPtr and StorePtr reach all the way to an alloca or
448 // global and they are different, ignore the store. This is a trivial form
449 // of alias analysis that is important for reg2mem'd code.
450 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
451 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
452 StrippedPtr
!= StorePtr
)
455 // If we have alias analysis and it says the store won't modify the loaded
456 // value, ignore the store.
457 if (AA
&& !isModSet(AA
->getModRefInfo(SI
, StrippedPtr
, AccessSize
)))
460 // Otherwise the store that may or may not alias the pointer, bail out.
465 // If this is some other instruction that may clobber Ptr, bail out.
466 if (Inst
->mayWriteToMemory()) {
467 // If alias analysis claims that it really won't modify the load,
469 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, StrippedPtr
, AccessSize
)))
472 // May modify the pointer, bail out.
478 // Got to the start of the block, we didn't find it, but are done for this