1 //===- Loads.cpp - Local load analysis ------------------------------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/GlobalAlias.h"
18 #include "llvm/IR/GlobalVariable.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/LLVMContext.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/IR/Operator.h"
23 #include "llvm/IR/Statepoint.h"
27 static bool isAligned(const Value
*Base
, const APInt
&Offset
, unsigned Align
,
28 const DataLayout
&DL
) {
29 APInt
BaseAlign(Offset
.getBitWidth(), Base
->getPointerAlignment(DL
));
32 Type
*Ty
= Base
->getType()->getPointerElementType();
35 BaseAlign
= DL
.getABITypeAlignment(Ty
);
38 APInt
Alignment(Offset
.getBitWidth(), Align
);
40 assert(Alignment
.isPowerOf2() && "must be a power of 2!");
41 return BaseAlign
.uge(Alignment
) && !(Offset
& (Alignment
-1));
44 /// Test if V is always a pointer to allocated and suitably aligned memory for
45 /// a simple load or store.
46 static bool isDereferenceableAndAlignedPointer(
47 const Value
*V
, unsigned Align
, const APInt
&Size
, const DataLayout
&DL
,
48 const Instruction
*CtxI
, const DominatorTree
*DT
,
49 SmallPtrSetImpl
<const Value
*> &Visited
) {
50 // Already visited? Bail out, we've likely hit unreachable code.
51 if (!Visited
.insert(V
).second
)
54 // Note that it is not safe to speculate into a malloc'd region because
55 // malloc may return null.
57 // bitcast instructions are no-ops as far as dereferenceability is concerned.
58 if (const BitCastOperator
*BC
= dyn_cast
<BitCastOperator
>(V
))
59 return isDereferenceableAndAlignedPointer(BC
->getOperand(0), Align
, Size
,
60 DL
, CtxI
, DT
, Visited
);
62 bool CheckForNonNull
= false;
63 APInt
KnownDerefBytes(Size
.getBitWidth(),
64 V
->getPointerDereferenceableBytes(DL
, CheckForNonNull
));
65 if (KnownDerefBytes
.getBoolValue() && KnownDerefBytes
.uge(Size
))
66 if (!CheckForNonNull
|| isKnownNonZero(V
, DL
, 0, nullptr, CtxI
, DT
)) {
67 // As we recursed through GEPs to get here, we've incrementally checked
68 // that each step advanced by a multiple of the alignment. If our base is
69 // properly aligned, then the original offset accessed must also be.
70 Type
*Ty
= V
->getType();
71 assert(Ty
->isSized() && "must be sized");
72 APInt
Offset(DL
.getTypeStoreSizeInBits(Ty
), 0);
73 return isAligned(V
, Offset
, Align
, DL
);
76 // For GEPs, determine if the indexing lands within the allocated object.
77 if (const GEPOperator
*GEP
= dyn_cast
<GEPOperator
>(V
)) {
78 const Value
*Base
= GEP
->getPointerOperand();
80 APInt
Offset(DL
.getIndexTypeSizeInBits(GEP
->getType()), 0);
81 if (!GEP
->accumulateConstantOffset(DL
, Offset
) || Offset
.isNegative() ||
82 !Offset
.urem(APInt(Offset
.getBitWidth(), Align
)).isMinValue())
85 // If the base pointer is dereferenceable for Offset+Size bytes, then the
86 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
87 // pointer is aligned to Align bytes, and the Offset is divisible by Align
88 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
89 // aligned to Align bytes.
91 // Offset and Size may have different bit widths if we have visited an
92 // addrspacecast, so we can't do arithmetic directly on the APInt values.
93 return isDereferenceableAndAlignedPointer(
94 Base
, Align
, Offset
+ Size
.sextOrTrunc(Offset
.getBitWidth()),
95 DL
, CtxI
, DT
, Visited
);
98 // For gc.relocate, look through relocations
99 if (const GCRelocateInst
*RelocateInst
= dyn_cast
<GCRelocateInst
>(V
))
100 return isDereferenceableAndAlignedPointer(
101 RelocateInst
->getDerivedPtr(), Align
, Size
, DL
, CtxI
, DT
, Visited
);
103 if (const AddrSpaceCastInst
*ASC
= dyn_cast
<AddrSpaceCastInst
>(V
))
104 return isDereferenceableAndAlignedPointer(ASC
->getOperand(0), Align
, Size
,
105 DL
, CtxI
, DT
, Visited
);
107 if (const auto *Call
= dyn_cast
<CallBase
>(V
))
108 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, true))
109 return isDereferenceableAndAlignedPointer(RP
, Align
, Size
, DL
, CtxI
, DT
,
112 // If we don't know, assume the worst.
116 bool llvm::isDereferenceableAndAlignedPointer(const Value
*V
, unsigned Align
,
118 const DataLayout
&DL
,
119 const Instruction
*CtxI
,
120 const DominatorTree
*DT
) {
121 assert(Align
!= 0 && "expected explicitly set alignment");
122 // Note: At the moment, Size can be zero. This ends up being interpreted as
123 // a query of whether [Base, V] is dereferenceable and V is aligned (since
124 // that's what the implementation happened to do). It's unclear if this is
125 // the desired semantic, but at least SelectionDAG does exercise this case.
127 SmallPtrSet
<const Value
*, 32> Visited
;
128 return ::isDereferenceableAndAlignedPointer(V
, Align
, Size
, DL
, CtxI
, DT
,
132 bool llvm::isDereferenceableAndAlignedPointer(const Value
*V
, Type
*Ty
,
134 const DataLayout
&DL
,
135 const Instruction
*CtxI
,
136 const DominatorTree
*DT
) {
137 // When dereferenceability information is provided by a dereferenceable
138 // attribute, we know exactly how many bytes are dereferenceable. If we can
139 // determine the exact offset to the attributed variable, we can use that
142 // Require ABI alignment for loads without alignment specification
144 Align
= DL
.getABITypeAlignment(Ty
);
149 APInt
AccessSize(DL
.getIndexTypeSizeInBits(V
->getType()),
150 DL
.getTypeStoreSize(Ty
));
151 return isDereferenceableAndAlignedPointer(V
, Align
, AccessSize
,
155 bool llvm::isDereferenceablePointer(const Value
*V
, Type
*Ty
,
156 const DataLayout
&DL
,
157 const Instruction
*CtxI
,
158 const DominatorTree
*DT
) {
159 return isDereferenceableAndAlignedPointer(V
, Ty
, 1, DL
, CtxI
, DT
);
162 /// Test if A and B will obviously have the same value.
164 /// This includes recognizing that %t0 and %t1 will have the same
165 /// value in code like this:
167 /// %t0 = getelementptr \@a, 0, 3
168 /// store i32 0, i32* %t0
169 /// %t1 = getelementptr \@a, 0, 3
170 /// %t2 = load i32* %t1
173 static bool AreEquivalentAddressValues(const Value
*A
, const Value
*B
) {
174 // Test if the values are trivially equivalent.
178 // Test if the values come from identical arithmetic instructions.
179 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
180 // this function is only used when one address use dominates the
181 // other, which means that they'll always either have the same
182 // value or one of them will have an undefined value.
183 if (isa
<BinaryOperator
>(A
) || isa
<CastInst
>(A
) || isa
<PHINode
>(A
) ||
184 isa
<GetElementPtrInst
>(A
))
185 if (const Instruction
*BI
= dyn_cast
<Instruction
>(B
))
186 if (cast
<Instruction
>(A
)->isIdenticalToWhenDefined(BI
))
189 // Otherwise they may not be equivalent.
193 /// Check if executing a load of this pointer value cannot trap.
195 /// If DT and ScanFrom are specified this method performs context-sensitive
196 /// analysis and returns true if it is safe to load immediately before ScanFrom.
198 /// If it is not obviously safe to load from the specified pointer, we do
199 /// a quick local scan of the basic block containing \c ScanFrom, to determine
200 /// if the address is already accessed.
202 /// This uses the pointee type to determine how many bytes need to be safe to
203 /// load from the pointer.
204 bool llvm::isSafeToLoadUnconditionally(Value
*V
, unsigned Align
, APInt
&Size
,
205 const DataLayout
&DL
,
206 Instruction
*ScanFrom
,
207 const DominatorTree
*DT
) {
208 // Zero alignment means that the load has the ABI alignment for the target
210 Align
= DL
.getABITypeAlignment(V
->getType()->getPointerElementType());
211 assert(isPowerOf2_32(Align
));
213 // If DT is not specified we can't make context-sensitive query
214 const Instruction
* CtxI
= DT
? ScanFrom
: nullptr;
215 if (isDereferenceableAndAlignedPointer(V
, Align
, Size
, DL
, CtxI
, DT
))
221 if (Size
.getBitWidth() > 64)
223 const uint64_t LoadSize
= Size
.getZExtValue();
225 // Otherwise, be a little bit aggressive by scanning the local block where we
226 // want to check to see if the pointer is already being loaded or stored
227 // from/to. If so, the previous load or store would have already trapped,
228 // so there is no harm doing an extra load (also, CSE will later eliminate
229 // the load entirely).
230 BasicBlock::iterator BBI
= ScanFrom
->getIterator(),
231 E
= ScanFrom
->getParent()->begin();
233 // We can at least always strip pointer casts even though we can't use the
235 V
= V
->stripPointerCasts();
240 // If we see a free or a call which may write to memory (i.e. which might do
241 // a free) the pointer could be marked invalid.
242 if (isa
<CallInst
>(BBI
) && BBI
->mayWriteToMemory() &&
243 !isa
<DbgInfoIntrinsic
>(BBI
))
247 unsigned AccessedAlign
;
248 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(BBI
)) {
249 // Ignore volatile loads. The execution of a volatile load cannot
250 // be used to prove an address is backed by regular memory; it can,
251 // for example, point to an MMIO register.
252 if (LI
->isVolatile())
254 AccessedPtr
= LI
->getPointerOperand();
255 AccessedAlign
= LI
->getAlignment();
256 } else if (StoreInst
*SI
= dyn_cast
<StoreInst
>(BBI
)) {
257 // Ignore volatile stores (see comment for loads).
258 if (SI
->isVolatile())
260 AccessedPtr
= SI
->getPointerOperand();
261 AccessedAlign
= SI
->getAlignment();
265 Type
*AccessedTy
= AccessedPtr
->getType()->getPointerElementType();
266 if (AccessedAlign
== 0)
267 AccessedAlign
= DL
.getABITypeAlignment(AccessedTy
);
268 if (AccessedAlign
< Align
)
271 // Handle trivial cases.
272 if (AccessedPtr
== V
&&
273 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
276 if (AreEquivalentAddressValues(AccessedPtr
->stripPointerCasts(), V
) &&
277 LoadSize
<= DL
.getTypeStoreSize(AccessedTy
))
283 bool llvm::isSafeToLoadUnconditionally(Value
*V
, Type
*Ty
, unsigned Align
,
284 const DataLayout
&DL
,
285 Instruction
*ScanFrom
,
286 const DominatorTree
*DT
) {
287 APInt
Size(DL
.getIndexTypeSizeInBits(V
->getType()), DL
.getTypeStoreSize(Ty
));
288 return isSafeToLoadUnconditionally(V
, Align
, Size
, DL
, ScanFrom
, DT
);
291 /// DefMaxInstsToScan - the default number of maximum instructions
292 /// to scan in the block, used by FindAvailableLoadedValue().
293 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
294 /// threading in part by eliminating partially redundant loads.
295 /// At that point, the value of MaxInstsToScan was already set to '6'
296 /// without documented explanation.
298 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden
,
299 cl::desc("Use this to specify the default maximum number of instructions "
300 "to scan backward from a given instruction, when searching for "
301 "available loaded value"));
303 Value
*llvm::FindAvailableLoadedValue(LoadInst
*Load
,
305 BasicBlock::iterator
&ScanFrom
,
306 unsigned MaxInstsToScan
,
307 AliasAnalysis
*AA
, bool *IsLoad
,
308 unsigned *NumScanedInst
) {
309 // Don't CSE load that is volatile or anything stronger than unordered.
310 if (!Load
->isUnordered())
313 return FindAvailablePtrLoadStore(
314 Load
->getPointerOperand(), Load
->getType(), Load
->isAtomic(), ScanBB
,
315 ScanFrom
, MaxInstsToScan
, AA
, IsLoad
, NumScanedInst
);
318 Value
*llvm::FindAvailablePtrLoadStore(Value
*Ptr
, Type
*AccessTy
,
319 bool AtLeastAtomic
, BasicBlock
*ScanBB
,
320 BasicBlock::iterator
&ScanFrom
,
321 unsigned MaxInstsToScan
,
322 AliasAnalysis
*AA
, bool *IsLoadCSE
,
323 unsigned *NumScanedInst
) {
324 if (MaxInstsToScan
== 0)
325 MaxInstsToScan
= ~0U;
327 const DataLayout
&DL
= ScanBB
->getModule()->getDataLayout();
329 // Try to get the store size for the type.
330 auto AccessSize
= LocationSize::precise(DL
.getTypeStoreSize(AccessTy
));
332 Value
*StrippedPtr
= Ptr
->stripPointerCasts();
334 while (ScanFrom
!= ScanBB
->begin()) {
335 // We must ignore debug info directives when counting (otherwise they
336 // would affect codegen).
337 Instruction
*Inst
= &*--ScanFrom
;
338 if (isa
<DbgInfoIntrinsic
>(Inst
))
341 // Restore ScanFrom to expected value in case next test succeeds
347 // Don't scan huge blocks.
348 if (MaxInstsToScan
-- == 0)
352 // If this is a load of Ptr, the loaded value is available.
353 // (This is true even if the load is volatile or atomic, although
354 // those cases are unlikely.)
355 if (LoadInst
*LI
= dyn_cast
<LoadInst
>(Inst
))
356 if (AreEquivalentAddressValues(
357 LI
->getPointerOperand()->stripPointerCasts(), StrippedPtr
) &&
358 CastInst::isBitOrNoopPointerCastable(LI
->getType(), AccessTy
, DL
)) {
360 // We can value forward from an atomic to a non-atomic, but not the
362 if (LI
->isAtomic() < AtLeastAtomic
)
370 if (StoreInst
*SI
= dyn_cast
<StoreInst
>(Inst
)) {
371 Value
*StorePtr
= SI
->getPointerOperand()->stripPointerCasts();
372 // If this is a store through Ptr, the value is available!
373 // (This is true even if the store is volatile or atomic, although
374 // those cases are unlikely.)
375 if (AreEquivalentAddressValues(StorePtr
, StrippedPtr
) &&
376 CastInst::isBitOrNoopPointerCastable(SI
->getValueOperand()->getType(),
379 // We can value forward from an atomic to a non-atomic, but not the
381 if (SI
->isAtomic() < AtLeastAtomic
)
386 return SI
->getOperand(0);
389 // If both StrippedPtr and StorePtr reach all the way to an alloca or
390 // global and they are different, ignore the store. This is a trivial form
391 // of alias analysis that is important for reg2mem'd code.
392 if ((isa
<AllocaInst
>(StrippedPtr
) || isa
<GlobalVariable
>(StrippedPtr
)) &&
393 (isa
<AllocaInst
>(StorePtr
) || isa
<GlobalVariable
>(StorePtr
)) &&
394 StrippedPtr
!= StorePtr
)
397 // If we have alias analysis and it says the store won't modify the loaded
398 // value, ignore the store.
399 if (AA
&& !isModSet(AA
->getModRefInfo(SI
, StrippedPtr
, AccessSize
)))
402 // Otherwise the store that may or may not alias the pointer, bail out.
407 // If this is some other instruction that may clobber Ptr, bail out.
408 if (Inst
->mayWriteToMemory()) {
409 // If alias analysis claims that it really won't modify the load,
411 if (AA
&& !isModSet(AA
->getModRefInfo(Inst
, StrippedPtr
, AccessSize
)))
414 // May modify the pointer, bail out.
420 // Got to the start of the block, we didn't find it, but are done for this