[ARM] More MVE compare vector splat combines for ANDs
[llvm-complete.git] / lib / Analysis / Loads.cpp
blob31da4e9ec78372866ffde6a40e605c8cc5505a26
1 //===- Loads.cpp - Local load analysis ------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines simple local analyses for load instructions.
11 //===----------------------------------------------------------------------===//
13 #include "llvm/Analysis/Loads.h"
14 #include "llvm/Analysis/AliasAnalysis.h"
15 #include "llvm/Analysis/ValueTracking.h"
16 #include "llvm/IR/DataLayout.h"
17 #include "llvm/IR/GlobalAlias.h"
18 #include "llvm/IR/GlobalVariable.h"
19 #include "llvm/IR/IntrinsicInst.h"
20 #include "llvm/IR/LLVMContext.h"
21 #include "llvm/IR/Module.h"
22 #include "llvm/IR/Operator.h"
23 #include "llvm/IR/Statepoint.h"
25 using namespace llvm;
27 static bool isAligned(const Value *Base, const APInt &Offset, unsigned Align,
28 const DataLayout &DL) {
29 APInt BaseAlign(Offset.getBitWidth(), Base->getPointerAlignment(DL));
31 if (!BaseAlign) {
32 Type *Ty = Base->getType()->getPointerElementType();
33 if (!Ty->isSized())
34 return false;
35 BaseAlign = DL.getABITypeAlignment(Ty);
38 APInt Alignment(Offset.getBitWidth(), Align);
40 assert(Alignment.isPowerOf2() && "must be a power of 2!");
41 return BaseAlign.uge(Alignment) && !(Offset & (Alignment-1));
44 static bool isAligned(const Value *Base, unsigned Align, const DataLayout &DL) {
45 Type *Ty = Base->getType();
46 assert(Ty->isSized() && "must be sized");
47 APInt Offset(DL.getTypeStoreSizeInBits(Ty), 0);
48 return isAligned(Base, Offset, Align, DL);
51 /// Test if V is always a pointer to allocated and suitably aligned memory for
52 /// a simple load or store.
53 static bool isDereferenceableAndAlignedPointer(
54 const Value *V, unsigned Align, const APInt &Size, const DataLayout &DL,
55 const Instruction *CtxI, const DominatorTree *DT,
56 SmallPtrSetImpl<const Value *> &Visited) {
57 // Already visited? Bail out, we've likely hit unreachable code.
58 if (!Visited.insert(V).second)
59 return false;
61 // Note that it is not safe to speculate into a malloc'd region because
62 // malloc may return null.
64 // bitcast instructions are no-ops as far as dereferenceability is concerned.
65 if (const BitCastOperator *BC = dyn_cast<BitCastOperator>(V))
66 return isDereferenceableAndAlignedPointer(BC->getOperand(0), Align, Size,
67 DL, CtxI, DT, Visited);
69 bool CheckForNonNull = false;
70 APInt KnownDerefBytes(Size.getBitWidth(),
71 V->getPointerDereferenceableBytes(DL, CheckForNonNull));
72 if (KnownDerefBytes.getBoolValue()) {
73 if (KnownDerefBytes.uge(Size))
74 if (!CheckForNonNull || isKnownNonZero(V, DL, 0, nullptr, CtxI, DT))
75 return isAligned(V, Align, DL);
78 // For GEPs, determine if the indexing lands within the allocated object.
79 if (const GEPOperator *GEP = dyn_cast<GEPOperator>(V)) {
80 const Value *Base = GEP->getPointerOperand();
82 APInt Offset(DL.getIndexTypeSizeInBits(GEP->getType()), 0);
83 if (!GEP->accumulateConstantOffset(DL, Offset) || Offset.isNegative() ||
84 !Offset.urem(APInt(Offset.getBitWidth(), Align)).isMinValue())
85 return false;
87 // If the base pointer is dereferenceable for Offset+Size bytes, then the
88 // GEP (== Base + Offset) is dereferenceable for Size bytes. If the base
89 // pointer is aligned to Align bytes, and the Offset is divisible by Align
90 // then the GEP (== Base + Offset == k_0 * Align + k_1 * Align) is also
91 // aligned to Align bytes.
93 // Offset and Size may have different bit widths if we have visited an
94 // addrspacecast, so we can't do arithmetic directly on the APInt values.
95 return isDereferenceableAndAlignedPointer(
96 Base, Align, Offset + Size.sextOrTrunc(Offset.getBitWidth()),
97 DL, CtxI, DT, Visited);
100 // For gc.relocate, look through relocations
101 if (const GCRelocateInst *RelocateInst = dyn_cast<GCRelocateInst>(V))
102 return isDereferenceableAndAlignedPointer(
103 RelocateInst->getDerivedPtr(), Align, Size, DL, CtxI, DT, Visited);
105 if (const AddrSpaceCastInst *ASC = dyn_cast<AddrSpaceCastInst>(V))
106 return isDereferenceableAndAlignedPointer(ASC->getOperand(0), Align, Size,
107 DL, CtxI, DT, Visited);
109 if (const auto *Call = dyn_cast<CallBase>(V))
110 if (auto *RP = getArgumentAliasingToReturnedPointer(Call))
111 return isDereferenceableAndAlignedPointer(RP, Align, Size, DL, CtxI, DT,
112 Visited);
114 // If we don't know, assume the worst.
115 return false;
118 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, unsigned Align,
119 const APInt &Size,
120 const DataLayout &DL,
121 const Instruction *CtxI,
122 const DominatorTree *DT) {
123 SmallPtrSet<const Value *, 32> Visited;
124 return ::isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT,
125 Visited);
128 bool llvm::isDereferenceableAndAlignedPointer(const Value *V, Type *Ty,
129 unsigned Align,
130 const DataLayout &DL,
131 const Instruction *CtxI,
132 const DominatorTree *DT) {
133 // When dereferenceability information is provided by a dereferenceable
134 // attribute, we know exactly how many bytes are dereferenceable. If we can
135 // determine the exact offset to the attributed variable, we can use that
136 // information here.
138 // Require ABI alignment for loads without alignment specification
139 if (Align == 0)
140 Align = DL.getABITypeAlignment(Ty);
142 if (!Ty->isSized())
143 return false;
145 SmallPtrSet<const Value *, 32> Visited;
146 return ::isDereferenceableAndAlignedPointer(
147 V, Align,
148 APInt(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty)),
149 DL, CtxI, DT, Visited);
152 bool llvm::isDereferenceablePointer(const Value *V, Type *Ty,
153 const DataLayout &DL,
154 const Instruction *CtxI,
155 const DominatorTree *DT) {
156 return isDereferenceableAndAlignedPointer(V, Ty, 1, DL, CtxI, DT);
159 /// Test if A and B will obviously have the same value.
161 /// This includes recognizing that %t0 and %t1 will have the same
162 /// value in code like this:
163 /// \code
164 /// %t0 = getelementptr \@a, 0, 3
165 /// store i32 0, i32* %t0
166 /// %t1 = getelementptr \@a, 0, 3
167 /// %t2 = load i32* %t1
168 /// \endcode
170 static bool AreEquivalentAddressValues(const Value *A, const Value *B) {
171 // Test if the values are trivially equivalent.
172 if (A == B)
173 return true;
175 // Test if the values come from identical arithmetic instructions.
176 // Use isIdenticalToWhenDefined instead of isIdenticalTo because
177 // this function is only used when one address use dominates the
178 // other, which means that they'll always either have the same
179 // value or one of them will have an undefined value.
180 if (isa<BinaryOperator>(A) || isa<CastInst>(A) || isa<PHINode>(A) ||
181 isa<GetElementPtrInst>(A))
182 if (const Instruction *BI = dyn_cast<Instruction>(B))
183 if (cast<Instruction>(A)->isIdenticalToWhenDefined(BI))
184 return true;
186 // Otherwise they may not be equivalent.
187 return false;
190 /// Check if executing a load of this pointer value cannot trap.
192 /// If DT and ScanFrom are specified this method performs context-sensitive
193 /// analysis and returns true if it is safe to load immediately before ScanFrom.
195 /// If it is not obviously safe to load from the specified pointer, we do
196 /// a quick local scan of the basic block containing \c ScanFrom, to determine
197 /// if the address is already accessed.
199 /// This uses the pointee type to determine how many bytes need to be safe to
200 /// load from the pointer.
201 bool llvm::isSafeToLoadUnconditionally(Value *V, unsigned Align, APInt &Size,
202 const DataLayout &DL,
203 Instruction *ScanFrom,
204 const DominatorTree *DT) {
205 // Zero alignment means that the load has the ABI alignment for the target
206 if (Align == 0)
207 Align = DL.getABITypeAlignment(V->getType()->getPointerElementType());
208 assert(isPowerOf2_32(Align));
210 // If DT is not specified we can't make context-sensitive query
211 const Instruction* CtxI = DT ? ScanFrom : nullptr;
212 if (isDereferenceableAndAlignedPointer(V, Align, Size, DL, CtxI, DT))
213 return true;
215 int64_t ByteOffset = 0;
216 Value *Base = V;
217 Base = GetPointerBaseWithConstantOffset(V, ByteOffset, DL);
219 if (ByteOffset < 0) // out of bounds
220 return false;
222 Type *BaseType = nullptr;
223 unsigned BaseAlign = 0;
224 if (const AllocaInst *AI = dyn_cast<AllocaInst>(Base)) {
225 // An alloca is safe to load from as load as it is suitably aligned.
226 BaseType = AI->getAllocatedType();
227 BaseAlign = AI->getAlignment();
228 } else if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(Base)) {
229 // Global variables are not necessarily safe to load from if they are
230 // interposed arbitrarily. Their size may change or they may be weak and
231 // require a test to determine if they were in fact provided.
232 if (!GV->isInterposable()) {
233 BaseType = GV->getType()->getElementType();
234 BaseAlign = GV->getAlignment();
238 PointerType *AddrTy = cast<PointerType>(V->getType());
239 uint64_t LoadSize = DL.getTypeStoreSize(AddrTy->getElementType());
241 // If we found a base allocated type from either an alloca or global variable,
242 // try to see if we are definitively within the allocated region. We need to
243 // know the size of the base type and the loaded type to do anything in this
244 // case.
245 if (BaseType && BaseType->isSized()) {
246 if (BaseAlign == 0)
247 BaseAlign = DL.getPrefTypeAlignment(BaseType);
249 if (Align <= BaseAlign) {
250 // Check if the load is within the bounds of the underlying object.
251 if (ByteOffset + LoadSize <= DL.getTypeAllocSize(BaseType) &&
252 ((ByteOffset % Align) == 0))
253 return true;
257 if (!ScanFrom)
258 return false;
260 // Otherwise, be a little bit aggressive by scanning the local block where we
261 // want to check to see if the pointer is already being loaded or stored
262 // from/to. If so, the previous load or store would have already trapped,
263 // so there is no harm doing an extra load (also, CSE will later eliminate
264 // the load entirely).
265 BasicBlock::iterator BBI = ScanFrom->getIterator(),
266 E = ScanFrom->getParent()->begin();
268 // We can at least always strip pointer casts even though we can't use the
269 // base here.
270 V = V->stripPointerCasts();
272 while (BBI != E) {
273 --BBI;
275 // If we see a free or a call which may write to memory (i.e. which might do
276 // a free) the pointer could be marked invalid.
277 if (isa<CallInst>(BBI) && BBI->mayWriteToMemory() &&
278 !isa<DbgInfoIntrinsic>(BBI))
279 return false;
281 Value *AccessedPtr;
282 unsigned AccessedAlign;
283 if (LoadInst *LI = dyn_cast<LoadInst>(BBI)) {
284 // Ignore volatile loads. The execution of a volatile load cannot
285 // be used to prove an address is backed by regular memory; it can,
286 // for example, point to an MMIO register.
287 if (LI->isVolatile())
288 continue;
289 AccessedPtr = LI->getPointerOperand();
290 AccessedAlign = LI->getAlignment();
291 } else if (StoreInst *SI = dyn_cast<StoreInst>(BBI)) {
292 // Ignore volatile stores (see comment for loads).
293 if (SI->isVolatile())
294 continue;
295 AccessedPtr = SI->getPointerOperand();
296 AccessedAlign = SI->getAlignment();
297 } else
298 continue;
300 Type *AccessedTy = AccessedPtr->getType()->getPointerElementType();
301 if (AccessedAlign == 0)
302 AccessedAlign = DL.getABITypeAlignment(AccessedTy);
303 if (AccessedAlign < Align)
304 continue;
306 // Handle trivial cases.
307 if (AccessedPtr == V)
308 return true;
310 if (AreEquivalentAddressValues(AccessedPtr->stripPointerCasts(), V) &&
311 LoadSize <= DL.getTypeStoreSize(AccessedTy))
312 return true;
314 return false;
317 bool llvm::isSafeToLoadUnconditionally(Value *V, Type *Ty, unsigned Align,
318 const DataLayout &DL,
319 Instruction *ScanFrom,
320 const DominatorTree *DT) {
321 APInt Size(DL.getIndexTypeSizeInBits(V->getType()), DL.getTypeStoreSize(Ty));
322 return isSafeToLoadUnconditionally(V, Align, Size, DL, ScanFrom, DT);
325 /// DefMaxInstsToScan - the default number of maximum instructions
326 /// to scan in the block, used by FindAvailableLoadedValue().
327 /// FindAvailableLoadedValue() was introduced in r60148, to improve jump
328 /// threading in part by eliminating partially redundant loads.
329 /// At that point, the value of MaxInstsToScan was already set to '6'
330 /// without documented explanation.
331 cl::opt<unsigned>
332 llvm::DefMaxInstsToScan("available-load-scan-limit", cl::init(6), cl::Hidden,
333 cl::desc("Use this to specify the default maximum number of instructions "
334 "to scan backward from a given instruction, when searching for "
335 "available loaded value"));
337 Value *llvm::FindAvailableLoadedValue(LoadInst *Load,
338 BasicBlock *ScanBB,
339 BasicBlock::iterator &ScanFrom,
340 unsigned MaxInstsToScan,
341 AliasAnalysis *AA, bool *IsLoad,
342 unsigned *NumScanedInst) {
343 // Don't CSE load that is volatile or anything stronger than unordered.
344 if (!Load->isUnordered())
345 return nullptr;
347 return FindAvailablePtrLoadStore(
348 Load->getPointerOperand(), Load->getType(), Load->isAtomic(), ScanBB,
349 ScanFrom, MaxInstsToScan, AA, IsLoad, NumScanedInst);
352 Value *llvm::FindAvailablePtrLoadStore(Value *Ptr, Type *AccessTy,
353 bool AtLeastAtomic, BasicBlock *ScanBB,
354 BasicBlock::iterator &ScanFrom,
355 unsigned MaxInstsToScan,
356 AliasAnalysis *AA, bool *IsLoadCSE,
357 unsigned *NumScanedInst) {
358 if (MaxInstsToScan == 0)
359 MaxInstsToScan = ~0U;
361 const DataLayout &DL = ScanBB->getModule()->getDataLayout();
363 // Try to get the store size for the type.
364 auto AccessSize = LocationSize::precise(DL.getTypeStoreSize(AccessTy));
366 Value *StrippedPtr = Ptr->stripPointerCasts();
368 while (ScanFrom != ScanBB->begin()) {
369 // We must ignore debug info directives when counting (otherwise they
370 // would affect codegen).
371 Instruction *Inst = &*--ScanFrom;
372 if (isa<DbgInfoIntrinsic>(Inst))
373 continue;
375 // Restore ScanFrom to expected value in case next test succeeds
376 ScanFrom++;
378 if (NumScanedInst)
379 ++(*NumScanedInst);
381 // Don't scan huge blocks.
382 if (MaxInstsToScan-- == 0)
383 return nullptr;
385 --ScanFrom;
386 // If this is a load of Ptr, the loaded value is available.
387 // (This is true even if the load is volatile or atomic, although
388 // those cases are unlikely.)
389 if (LoadInst *LI = dyn_cast<LoadInst>(Inst))
390 if (AreEquivalentAddressValues(
391 LI->getPointerOperand()->stripPointerCasts(), StrippedPtr) &&
392 CastInst::isBitOrNoopPointerCastable(LI->getType(), AccessTy, DL)) {
394 // We can value forward from an atomic to a non-atomic, but not the
395 // other way around.
396 if (LI->isAtomic() < AtLeastAtomic)
397 return nullptr;
399 if (IsLoadCSE)
400 *IsLoadCSE = true;
401 return LI;
404 if (StoreInst *SI = dyn_cast<StoreInst>(Inst)) {
405 Value *StorePtr = SI->getPointerOperand()->stripPointerCasts();
406 // If this is a store through Ptr, the value is available!
407 // (This is true even if the store is volatile or atomic, although
408 // those cases are unlikely.)
409 if (AreEquivalentAddressValues(StorePtr, StrippedPtr) &&
410 CastInst::isBitOrNoopPointerCastable(SI->getValueOperand()->getType(),
411 AccessTy, DL)) {
413 // We can value forward from an atomic to a non-atomic, but not the
414 // other way around.
415 if (SI->isAtomic() < AtLeastAtomic)
416 return nullptr;
418 if (IsLoadCSE)
419 *IsLoadCSE = false;
420 return SI->getOperand(0);
423 // If both StrippedPtr and StorePtr reach all the way to an alloca or
424 // global and they are different, ignore the store. This is a trivial form
425 // of alias analysis that is important for reg2mem'd code.
426 if ((isa<AllocaInst>(StrippedPtr) || isa<GlobalVariable>(StrippedPtr)) &&
427 (isa<AllocaInst>(StorePtr) || isa<GlobalVariable>(StorePtr)) &&
428 StrippedPtr != StorePtr)
429 continue;
431 // If we have alias analysis and it says the store won't modify the loaded
432 // value, ignore the store.
433 if (AA && !isModSet(AA->getModRefInfo(SI, StrippedPtr, AccessSize)))
434 continue;
436 // Otherwise the store that may or may not alias the pointer, bail out.
437 ++ScanFrom;
438 return nullptr;
441 // If this is some other instruction that may clobber Ptr, bail out.
442 if (Inst->mayWriteToMemory()) {
443 // If alias analysis claims that it really won't modify the load,
444 // ignore it.
445 if (AA && !isModSet(AA->getModRefInfo(Inst, StrippedPtr, AccessSize)))
446 continue;
448 // May modify the pointer, bail out.
449 ++ScanFrom;
450 return nullptr;
454 // Got to the start of the block, we didn't find it, but are done for this
455 // block.
456 return nullptr;