Revert r354244 "[DAGCombiner] Eliminate dead stores to stack."
[llvm-complete.git] / lib / Analysis / BasicAliasAnalysis.cpp
blob382a70b80666a5bde5d4999f391c16566e105fed
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/PhiValues.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <utility>
62 #define DEBUG_TYPE "basicaa"
64 using namespace llvm;
66 /// Enable analysis of recursive PHI nodes.
67 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
68 cl::init(false));
70 /// By default, even on 32-bit architectures we use 64-bit integers for
71 /// calculations. This will allow us to more-aggressively decompose indexing
72 /// expressions calculated using i64 values (e.g., long long in C) which is
73 /// common enough to worry about.
74 static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b",
75 cl::Hidden, cl::init(true));
76 static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits",
77 cl::Hidden, cl::init(false));
79 /// SearchLimitReached / SearchTimes shows how often the limit of
80 /// to decompose GEPs is reached. It will affect the precision
81 /// of basic alias analysis.
82 STATISTIC(SearchLimitReached, "Number of times the limit to "
83 "decompose GEPs is reached");
84 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
87 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
88 /// careful with value equivalence. We use reachability to make sure a value
89 /// cannot be involved in a cycle.
90 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 // The max limit of the search depth in DecomposeGEPExpression() and
93 // GetUnderlyingObject(), both functions need to use the same search
94 // depth otherwise the algorithm in aliasGEP will assert.
95 static const unsigned MaxLookupSearchDepth = 6;
97 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
98 FunctionAnalysisManager::Invalidator &Inv) {
99 // We don't care if this analysis itself is preserved, it has no state. But
100 // we need to check that the analyses it depends on have been. Note that we
101 // may be created without handles to some analyses and in that case don't
102 // depend on them.
103 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
104 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
105 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
106 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107 return true;
109 // Otherwise this analysis result remains valid.
110 return false;
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
117 /// Returns true if the pointer is to a function-local object that never
118 /// escapes from the function.
119 static bool isNonEscapingLocalObject(
120 const Value *V,
121 SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) {
122 SmallDenseMap<const Value *, bool, 8>::iterator CacheIt;
123 if (IsCapturedCache) {
124 bool Inserted;
125 std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false});
126 if (!Inserted)
127 // Found cached result, return it!
128 return CacheIt->second;
131 // If this is a local allocation, check to see if it escapes.
132 if (isa<AllocaInst>(V) || isNoAliasCall(V)) {
133 // Set StoreCaptures to True so that we can assume in our callers that the
134 // pointer is not the result of a load instruction. Currently
135 // PointerMayBeCaptured doesn't have any special analysis for the
136 // StoreCaptures=false case; if it did, our callers could be refined to be
137 // more precise.
138 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
139 if (IsCapturedCache)
140 CacheIt->second = Ret;
141 return Ret;
144 // If this is an argument that corresponds to a byval or noalias argument,
145 // then it has not escaped before entering the function. Check if it escapes
146 // inside the function.
147 if (const Argument *A = dyn_cast<Argument>(V))
148 if (A->hasByValAttr() || A->hasNoAliasAttr()) {
149 // Note even if the argument is marked nocapture, we still need to check
150 // for copies made inside the function. The nocapture attribute only
151 // specifies that there are no copies made that outlive the function.
152 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
153 if (IsCapturedCache)
154 CacheIt->second = Ret;
155 return Ret;
158 return false;
161 /// Returns true if the pointer is one which would have been considered an
162 /// escape by isNonEscapingLocalObject.
163 static bool isEscapeSource(const Value *V) {
164 if (isa<CallBase>(V))
165 return true;
167 if (isa<Argument>(V))
168 return true;
170 // The load case works because isNonEscapingLocalObject considers all
171 // stores to be escapes (it passes true for the StoreCaptures argument
172 // to PointerMayBeCaptured).
173 if (isa<LoadInst>(V))
174 return true;
176 return false;
179 /// Returns the size of the object specified by V or UnknownSize if unknown.
180 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
181 const TargetLibraryInfo &TLI,
182 bool NullIsValidLoc,
183 bool RoundToAlign = false) {
184 uint64_t Size;
185 ObjectSizeOpts Opts;
186 Opts.RoundToAlign = RoundToAlign;
187 Opts.NullIsUnknownSize = NullIsValidLoc;
188 if (getObjectSize(V, Size, DL, &TLI, Opts))
189 return Size;
190 return MemoryLocation::UnknownSize;
193 /// Returns true if we can prove that the object specified by V is smaller than
194 /// Size.
195 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
196 const DataLayout &DL,
197 const TargetLibraryInfo &TLI,
198 bool NullIsValidLoc) {
199 // Note that the meanings of the "object" are slightly different in the
200 // following contexts:
201 // c1: llvm::getObjectSize()
202 // c2: llvm.objectsize() intrinsic
203 // c3: isObjectSmallerThan()
204 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
205 // refers to the "entire object".
207 // Consider this example:
208 // char *p = (char*)malloc(100)
209 // char *q = p+80;
211 // In the context of c1 and c2, the "object" pointed by q refers to the
212 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
214 // However, in the context of c3, the "object" refers to the chunk of memory
215 // being allocated. So, the "object" has 100 bytes, and q points to the middle
216 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
217 // parameter, before the llvm::getObjectSize() is called to get the size of
218 // entire object, we should:
219 // - either rewind the pointer q to the base-address of the object in
220 // question (in this case rewind to p), or
221 // - just give up. It is up to caller to make sure the pointer is pointing
222 // to the base address the object.
224 // We go for 2nd option for simplicity.
225 if (!isIdentifiedObject(V))
226 return false;
228 // This function needs to use the aligned object size because we allow
229 // reads a bit past the end given sufficient alignment.
230 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
231 /*RoundToAlign*/ true);
233 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
236 /// Returns true if we can prove that the object specified by V has size Size.
237 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
238 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
239 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
240 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
243 //===----------------------------------------------------------------------===//
244 // GetElementPtr Instruction Decomposition and Analysis
245 //===----------------------------------------------------------------------===//
247 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
248 /// B are constant integers.
250 /// Returns the scale and offset values as APInts and return V as a Value*, and
251 /// return whether we looked through any sign or zero extends. The incoming
252 /// Value is known to have IntegerType, and it may already be sign or zero
253 /// extended.
255 /// Note that this looks through extends, so the high bits may not be
256 /// represented in the result.
257 /*static*/ const Value *BasicAAResult::GetLinearExpression(
258 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
259 unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
260 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
261 assert(V->getType()->isIntegerTy() && "Not an integer value");
263 // Limit our recursion depth.
264 if (Depth == 6) {
265 Scale = 1;
266 Offset = 0;
267 return V;
270 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
271 // If it's a constant, just convert it to an offset and remove the variable.
272 // If we've been called recursively, the Offset bit width will be greater
273 // than the constant's (the Offset's always as wide as the outermost call),
274 // so we'll zext here and process any extension in the isa<SExtInst> &
275 // isa<ZExtInst> cases below.
276 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
277 assert(Scale == 0 && "Constant values don't have a scale");
278 return V;
281 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
282 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
283 // If we've been called recursively, then Offset and Scale will be wider
284 // than the BOp operands. We'll always zext it here as we'll process sign
285 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
286 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
288 switch (BOp->getOpcode()) {
289 default:
290 // We don't understand this instruction, so we can't decompose it any
291 // further.
292 Scale = 1;
293 Offset = 0;
294 return V;
295 case Instruction::Or:
296 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
297 // analyze it.
298 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
299 BOp, DT)) {
300 Scale = 1;
301 Offset = 0;
302 return V;
304 LLVM_FALLTHROUGH;
305 case Instruction::Add:
306 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
307 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
308 Offset += RHS;
309 break;
310 case Instruction::Sub:
311 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
312 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
313 Offset -= RHS;
314 break;
315 case Instruction::Mul:
316 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
317 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
318 Offset *= RHS;
319 Scale *= RHS;
320 break;
321 case Instruction::Shl:
322 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
323 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
325 // We're trying to linearize an expression of the kind:
326 // shl i8 -128, 36
327 // where the shift count exceeds the bitwidth of the type.
328 // We can't decompose this further (the expression would return
329 // a poison value).
330 if (Offset.getBitWidth() < RHS.getLimitedValue() ||
331 Scale.getBitWidth() < RHS.getLimitedValue()) {
332 Scale = 1;
333 Offset = 0;
334 return V;
337 Offset <<= RHS.getLimitedValue();
338 Scale <<= RHS.getLimitedValue();
339 // the semantics of nsw and nuw for left shifts don't match those of
340 // multiplications, so we won't propagate them.
341 NSW = NUW = false;
342 return V;
345 if (isa<OverflowingBinaryOperator>(BOp)) {
346 NUW &= BOp->hasNoUnsignedWrap();
347 NSW &= BOp->hasNoSignedWrap();
349 return V;
353 // Since GEP indices are sign extended anyway, we don't care about the high
354 // bits of a sign or zero extended value - just scales and offsets. The
355 // extensions have to be consistent though.
356 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
357 Value *CastOp = cast<CastInst>(V)->getOperand(0);
358 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
359 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
360 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
361 const Value *Result =
362 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
363 Depth + 1, AC, DT, NSW, NUW);
365 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
366 // by just incrementing the number of bits we've extended by.
367 unsigned ExtendedBy = NewWidth - SmallWidth;
369 if (isa<SExtInst>(V) && ZExtBits == 0) {
370 // sext(sext(%x, a), b) == sext(%x, a + b)
372 if (NSW) {
373 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
374 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
375 unsigned OldWidth = Offset.getBitWidth();
376 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
377 } else {
378 // We may have signed-wrapped, so don't decompose sext(%x + c) into
379 // sext(%x) + sext(c)
380 Scale = 1;
381 Offset = 0;
382 Result = CastOp;
383 ZExtBits = OldZExtBits;
384 SExtBits = OldSExtBits;
386 SExtBits += ExtendedBy;
387 } else {
388 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
390 if (!NUW) {
391 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
392 // zext(%x) + zext(c)
393 Scale = 1;
394 Offset = 0;
395 Result = CastOp;
396 ZExtBits = OldZExtBits;
397 SExtBits = OldSExtBits;
399 ZExtBits += ExtendedBy;
402 return Result;
405 Scale = 1;
406 Offset = 0;
407 return V;
410 /// To ensure a pointer offset fits in an integer of size PointerSize
411 /// (in bits) when that size is smaller than the maximum pointer size. This is
412 /// an issue, for example, in particular for 32b pointers with negative indices
413 /// that rely on two's complement wrap-arounds for precise alias information
414 /// where the maximum pointer size is 64b.
415 static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) {
416 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
417 unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
418 return (Offset << ShiftBits).ashr(ShiftBits);
421 static unsigned getMaxPointerSize(const DataLayout &DL) {
422 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
423 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
424 if (DoubleCalcBits) MaxPointerSize *= 2;
426 return MaxPointerSize;
429 /// If V is a symbolic pointer expression, decompose it into a base pointer
430 /// with a constant offset and a number of scaled symbolic offsets.
432 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
433 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
434 /// specified amount, but which may have other unrepresented high bits. As
435 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
437 /// When DataLayout is around, this function is capable of analyzing everything
438 /// that GetUnderlyingObject can look through. To be able to do that
439 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
440 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
441 /// through pointer casts.
442 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
443 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
444 DominatorTree *DT) {
445 // Limit recursion depth to limit compile time in crazy cases.
446 unsigned MaxLookup = MaxLookupSearchDepth;
447 SearchTimes++;
449 unsigned MaxPointerSize = getMaxPointerSize(DL);
450 Decomposed.VarIndices.clear();
451 do {
452 // See if this is a bitcast or GEP.
453 const Operator *Op = dyn_cast<Operator>(V);
454 if (!Op) {
455 // The only non-operator case we can handle are GlobalAliases.
456 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
457 if (!GA->isInterposable()) {
458 V = GA->getAliasee();
459 continue;
462 Decomposed.Base = V;
463 return false;
466 if (Op->getOpcode() == Instruction::BitCast ||
467 Op->getOpcode() == Instruction::AddrSpaceCast) {
468 V = Op->getOperand(0);
469 continue;
472 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
473 if (!GEPOp) {
474 if (const auto *Call = dyn_cast<CallBase>(V)) {
475 // CaptureTracking can know about special capturing properties of some
476 // intrinsics like launder.invariant.group, that can't be expressed with
477 // the attributes, but have properties like returning aliasing pointer.
478 // Because some analysis may assume that nocaptured pointer is not
479 // returned from some special intrinsic (because function would have to
480 // be marked with returns attribute), it is crucial to use this function
481 // because it should be in sync with CaptureTracking. Not using it may
482 // cause weird miscompilations where 2 aliasing pointers are assumed to
483 // noalias.
484 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
485 V = RP;
486 continue;
490 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
491 // can come up with something. This matches what GetUnderlyingObject does.
492 if (const Instruction *I = dyn_cast<Instruction>(V))
493 // TODO: Get a DominatorTree and AssumptionCache and use them here
494 // (these are both now available in this function, but this should be
495 // updated when GetUnderlyingObject is updated). TLI should be
496 // provided also.
497 if (const Value *Simplified =
498 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
499 V = Simplified;
500 continue;
503 Decomposed.Base = V;
504 return false;
507 // Don't attempt to analyze GEPs over unsized objects.
508 if (!GEPOp->getSourceElementType()->isSized()) {
509 Decomposed.Base = V;
510 return false;
513 unsigned AS = GEPOp->getPointerAddressSpace();
514 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
515 gep_type_iterator GTI = gep_type_begin(GEPOp);
516 unsigned PointerSize = DL.getPointerSizeInBits(AS);
517 // Assume all GEP operands are constants until proven otherwise.
518 bool GepHasConstantOffset = true;
519 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
520 I != E; ++I, ++GTI) {
521 const Value *Index = *I;
522 // Compute the (potentially symbolic) offset in bytes for this index.
523 if (StructType *STy = GTI.getStructTypeOrNull()) {
524 // For a struct, add the member offset.
525 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
526 if (FieldNo == 0)
527 continue;
529 Decomposed.StructOffset +=
530 DL.getStructLayout(STy)->getElementOffset(FieldNo);
531 continue;
534 // For an array/pointer, add the element offset, explicitly scaled.
535 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
536 if (CIdx->isZero())
537 continue;
538 Decomposed.OtherOffset +=
539 (DL.getTypeAllocSize(GTI.getIndexedType()) *
540 CIdx->getValue().sextOrSelf(MaxPointerSize))
541 .sextOrTrunc(MaxPointerSize);
542 continue;
545 GepHasConstantOffset = false;
547 APInt Scale(MaxPointerSize, DL.getTypeAllocSize(GTI.getIndexedType()));
548 unsigned ZExtBits = 0, SExtBits = 0;
550 // If the integer type is smaller than the pointer size, it is implicitly
551 // sign extended to pointer size.
552 unsigned Width = Index->getType()->getIntegerBitWidth();
553 if (PointerSize > Width)
554 SExtBits += PointerSize - Width;
556 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
557 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
558 bool NSW = true, NUW = true;
559 const Value *OrigIndex = Index;
560 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
561 SExtBits, DL, 0, AC, DT, NSW, NUW);
563 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
564 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
566 // It can be the case that, even through C1*V+C2 does not overflow for
567 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
568 // decompose the expression in this way.
570 // FIXME: C1*Scale and the other operations in the decomposed
571 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
572 // possibility.
573 APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
574 Scale.sext(MaxPointerSize*2);
575 if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
576 Index = OrigIndex;
577 IndexScale = 1;
578 IndexOffset = 0;
580 ZExtBits = SExtBits = 0;
581 if (PointerSize > Width)
582 SExtBits += PointerSize - Width;
583 } else {
584 Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
585 Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
588 // If we already had an occurrence of this index variable, merge this
589 // scale into it. For example, we want to handle:
590 // A[x][x] -> x*16 + x*4 -> x*20
591 // This also ensures that 'x' only appears in the index list once.
592 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
593 if (Decomposed.VarIndices[i].V == Index &&
594 Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
595 Decomposed.VarIndices[i].SExtBits == SExtBits) {
596 Scale += Decomposed.VarIndices[i].Scale;
597 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
598 break;
602 // Make sure that we have a scale that makes sense for this target's
603 // pointer size.
604 Scale = adjustToPointerSize(Scale, PointerSize);
606 if (!!Scale) {
607 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
608 Decomposed.VarIndices.push_back(Entry);
612 // Take care of wrap-arounds
613 if (GepHasConstantOffset) {
614 Decomposed.StructOffset =
615 adjustToPointerSize(Decomposed.StructOffset, PointerSize);
616 Decomposed.OtherOffset =
617 adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
620 // Analyze the base pointer next.
621 V = GEPOp->getOperand(0);
622 } while (--MaxLookup);
624 // If the chain of expressions is too deep, just return early.
625 Decomposed.Base = V;
626 SearchLimitReached++;
627 return true;
630 /// Returns whether the given pointer value points to memory that is local to
631 /// the function, with global constants being considered local to all
632 /// functions.
633 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
634 bool OrLocal) {
635 assert(Visited.empty() && "Visited must be cleared after use!");
637 unsigned MaxLookup = 8;
638 SmallVector<const Value *, 16> Worklist;
639 Worklist.push_back(Loc.Ptr);
640 do {
641 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
642 if (!Visited.insert(V).second) {
643 Visited.clear();
644 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
647 // An alloca instruction defines local memory.
648 if (OrLocal && isa<AllocaInst>(V))
649 continue;
651 // A global constant counts as local memory for our purposes.
652 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
653 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
654 // global to be marked constant in some modules and non-constant in
655 // others. GV may even be a declaration, not a definition.
656 if (!GV->isConstant()) {
657 Visited.clear();
658 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
660 continue;
663 // If both select values point to local memory, then so does the select.
664 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
665 Worklist.push_back(SI->getTrueValue());
666 Worklist.push_back(SI->getFalseValue());
667 continue;
670 // If all values incoming to a phi node point to local memory, then so does
671 // the phi.
672 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
673 // Don't bother inspecting phi nodes with many operands.
674 if (PN->getNumIncomingValues() > MaxLookup) {
675 Visited.clear();
676 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
678 for (Value *IncValue : PN->incoming_values())
679 Worklist.push_back(IncValue);
680 continue;
683 // Otherwise be conservative.
684 Visited.clear();
685 return AAResultBase::pointsToConstantMemory(Loc, OrLocal);
686 } while (!Worklist.empty() && --MaxLookup);
688 Visited.clear();
689 return Worklist.empty();
692 /// Returns the behavior when calling the given call site.
693 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
694 if (Call->doesNotAccessMemory())
695 // Can't do better than this.
696 return FMRB_DoesNotAccessMemory;
698 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
700 // If the callsite knows it only reads memory, don't return worse
701 // than that.
702 if (Call->onlyReadsMemory())
703 Min = FMRB_OnlyReadsMemory;
704 else if (Call->doesNotReadMemory())
705 Min = FMRB_DoesNotReadMemory;
707 if (Call->onlyAccessesArgMemory())
708 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
709 else if (Call->onlyAccessesInaccessibleMemory())
710 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
711 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
712 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
714 // If the call has operand bundles then aliasing attributes from the function
715 // it calls do not directly apply to the call. This can be made more precise
716 // in the future.
717 if (!Call->hasOperandBundles())
718 if (const Function *F = Call->getCalledFunction())
719 Min =
720 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
722 return Min;
725 /// Returns the behavior when calling the given function. For use when the call
726 /// site is not known.
727 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
728 // If the function declares it doesn't access memory, we can't do better.
729 if (F->doesNotAccessMemory())
730 return FMRB_DoesNotAccessMemory;
732 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
734 // If the function declares it only reads memory, go with that.
735 if (F->onlyReadsMemory())
736 Min = FMRB_OnlyReadsMemory;
737 else if (F->doesNotReadMemory())
738 Min = FMRB_DoesNotReadMemory;
740 if (F->onlyAccessesArgMemory())
741 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
742 else if (F->onlyAccessesInaccessibleMemory())
743 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
744 else if (F->onlyAccessesInaccessibleMemOrArgMem())
745 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
747 return Min;
750 /// Returns true if this is a writeonly (i.e Mod only) parameter.
751 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
752 const TargetLibraryInfo &TLI) {
753 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
754 return true;
756 // We can bound the aliasing properties of memset_pattern16 just as we can
757 // for memcpy/memset. This is particularly important because the
758 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
759 // whenever possible.
760 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
761 // attributes.
762 LibFunc F;
763 if (Call->getCalledFunction() &&
764 TLI.getLibFunc(*Call->getCalledFunction(), F) &&
765 F == LibFunc_memset_pattern16 && TLI.has(F))
766 if (ArgIdx == 0)
767 return true;
769 // TODO: memset_pattern4, memset_pattern8
770 // TODO: _chk variants
771 // TODO: strcmp, strcpy
773 return false;
776 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
777 unsigned ArgIdx) {
778 // Checking for known builtin intrinsics and target library functions.
779 if (isWriteOnlyParam(Call, ArgIdx, TLI))
780 return ModRefInfo::Mod;
782 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
783 return ModRefInfo::Ref;
785 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
786 return ModRefInfo::NoModRef;
788 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
791 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
792 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
793 return II && II->getIntrinsicID() == IID;
796 #ifndef NDEBUG
797 static const Function *getParent(const Value *V) {
798 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
799 if (!inst->getParent())
800 return nullptr;
801 return inst->getParent()->getParent();
804 if (const Argument *arg = dyn_cast<Argument>(V))
805 return arg->getParent();
807 return nullptr;
810 static bool notDifferentParent(const Value *O1, const Value *O2) {
812 const Function *F1 = getParent(O1);
813 const Function *F2 = getParent(O2);
815 return !F1 || !F2 || F1 == F2;
817 #endif
819 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
820 const MemoryLocation &LocB) {
821 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
822 "BasicAliasAnalysis doesn't support interprocedural queries.");
824 // If we have a directly cached entry for these locations, we have recursed
825 // through this once, so just return the cached results. Notably, when this
826 // happens, we don't clear the cache.
827 auto CacheIt = AliasCache.find(LocPair(LocA, LocB));
828 if (CacheIt != AliasCache.end())
829 return CacheIt->second;
831 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
832 LocB.Size, LocB.AATags);
833 // AliasCache rarely has more than 1 or 2 elements, always use
834 // shrink_and_clear so it quickly returns to the inline capacity of the
835 // SmallDenseMap if it ever grows larger.
836 // FIXME: This should really be shrink_to_inline_capacity_and_clear().
837 AliasCache.shrink_and_clear();
838 IsCapturedCache.shrink_and_clear();
839 VisitedPhiBBs.clear();
840 return Alias;
843 /// Checks to see if the specified callsite can clobber the specified memory
844 /// object.
846 /// Since we only look at local properties of this function, we really can't
847 /// say much about this query. We do, however, use simple "address taken"
848 /// analysis on local objects.
849 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
850 const MemoryLocation &Loc) {
851 assert(notDifferentParent(Call, Loc.Ptr) &&
852 "AliasAnalysis query involving multiple functions!");
854 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
856 // Calls marked 'tail' cannot read or write allocas from the current frame
857 // because the current frame might be destroyed by the time they run. However,
858 // a tail call may use an alloca with byval. Calling with byval copies the
859 // contents of the alloca into argument registers or stack slots, so there is
860 // no lifetime issue.
861 if (isa<AllocaInst>(Object))
862 if (const CallInst *CI = dyn_cast<CallInst>(Call))
863 if (CI->isTailCall() &&
864 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
865 return ModRefInfo::NoModRef;
867 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
868 // modify them even though the alloca is not escaped.
869 if (auto *AI = dyn_cast<AllocaInst>(Object))
870 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
871 return ModRefInfo::Mod;
873 // If the pointer is to a locally allocated object that does not escape,
874 // then the call can not mod/ref the pointer unless the call takes the pointer
875 // as an argument, and itself doesn't capture it.
876 if (!isa<Constant>(Object) && Call != Object &&
877 isNonEscapingLocalObject(Object)) {
879 // Optimistically assume that call doesn't touch Object and check this
880 // assumption in the following loop.
881 ModRefInfo Result = ModRefInfo::NoModRef;
882 bool IsMustAlias = true;
884 unsigned OperandNo = 0;
885 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
886 CI != CE; ++CI, ++OperandNo) {
887 // Only look at the no-capture or byval pointer arguments. If this
888 // pointer were passed to arguments that were neither of these, then it
889 // couldn't be no-capture.
890 if (!(*CI)->getType()->isPointerTy() ||
891 (!Call->doesNotCapture(OperandNo) &&
892 OperandNo < Call->getNumArgOperands() &&
893 !Call->isByValArgument(OperandNo)))
894 continue;
896 // Call doesn't access memory through this operand, so we don't care
897 // if it aliases with Object.
898 if (Call->doesNotAccessMemory(OperandNo))
899 continue;
901 // If this is a no-capture pointer argument, see if we can tell that it
902 // is impossible to alias the pointer we're checking.
903 AliasResult AR =
904 getBestAAResults().alias(MemoryLocation(*CI), MemoryLocation(Object));
905 if (AR != MustAlias)
906 IsMustAlias = false;
907 // Operand doesn't alias 'Object', continue looking for other aliases
908 if (AR == NoAlias)
909 continue;
910 // Operand aliases 'Object', but call doesn't modify it. Strengthen
911 // initial assumption and keep looking in case if there are more aliases.
912 if (Call->onlyReadsMemory(OperandNo)) {
913 Result = setRef(Result);
914 continue;
916 // Operand aliases 'Object' but call only writes into it.
917 if (Call->doesNotReadMemory(OperandNo)) {
918 Result = setMod(Result);
919 continue;
921 // This operand aliases 'Object' and call reads and writes into it.
922 // Setting ModRef will not yield an early return below, MustAlias is not
923 // used further.
924 Result = ModRefInfo::ModRef;
925 break;
928 // No operand aliases, reset Must bit. Add below if at least one aliases
929 // and all aliases found are MustAlias.
930 if (isNoModRef(Result))
931 IsMustAlias = false;
933 // Early return if we improved mod ref information
934 if (!isModAndRefSet(Result)) {
935 if (isNoModRef(Result))
936 return ModRefInfo::NoModRef;
937 return IsMustAlias ? setMust(Result) : clearMust(Result);
941 // If the call is to malloc or calloc, we can assume that it doesn't
942 // modify any IR visible value. This is only valid because we assume these
943 // routines do not read values visible in the IR. TODO: Consider special
944 // casing realloc and strdup routines which access only their arguments as
945 // well. Or alternatively, replace all of this with inaccessiblememonly once
946 // that's implemented fully.
947 if (isMallocOrCallocLikeFn(Call, &TLI)) {
948 // Be conservative if the accessed pointer may alias the allocation -
949 // fallback to the generic handling below.
950 if (getBestAAResults().alias(MemoryLocation(Call), Loc) == NoAlias)
951 return ModRefInfo::NoModRef;
954 // The semantics of memcpy intrinsics forbid overlap between their respective
955 // operands, i.e., source and destination of any given memcpy must no-alias.
956 // If Loc must-aliases either one of these two locations, then it necessarily
957 // no-aliases the other.
958 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
959 AliasResult SrcAA, DestAA;
961 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
962 Loc)) == MustAlias)
963 // Loc is exactly the memcpy source thus disjoint from memcpy dest.
964 return ModRefInfo::Ref;
965 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
966 Loc)) == MustAlias)
967 // The converse case.
968 return ModRefInfo::Mod;
970 // It's also possible for Loc to alias both src and dest, or neither.
971 ModRefInfo rv = ModRefInfo::NoModRef;
972 if (SrcAA != NoAlias)
973 rv = setRef(rv);
974 if (DestAA != NoAlias)
975 rv = setMod(rv);
976 return rv;
979 // While the assume intrinsic is marked as arbitrarily writing so that
980 // proper control dependencies will be maintained, it never aliases any
981 // particular memory location.
982 if (isIntrinsicCall(Call, Intrinsic::assume))
983 return ModRefInfo::NoModRef;
985 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
986 // that proper control dependencies are maintained but they never mods any
987 // particular memory location.
989 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
990 // heap state at the point the guard is issued needs to be consistent in case
991 // the guard invokes the "deopt" continuation.
992 if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
993 return ModRefInfo::Ref;
995 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
996 // writing so that proper control dependencies are maintained but they never
997 // mod any particular memory location visible to the IR.
998 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
999 // intrinsic is now modeled as reading memory. This prevents hoisting the
1000 // invariant.start intrinsic over stores. Consider:
1001 // *ptr = 40;
1002 // *ptr = 50;
1003 // invariant_start(ptr)
1004 // int val = *ptr;
1005 // print(val);
1007 // This cannot be transformed to:
1009 // *ptr = 40;
1010 // invariant_start(ptr)
1011 // *ptr = 50;
1012 // int val = *ptr;
1013 // print(val);
1015 // The transformation will cause the second store to be ignored (based on
1016 // rules of invariant.start) and print 40, while the first program always
1017 // prints 50.
1018 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1019 return ModRefInfo::Ref;
1021 // The AAResultBase base class has some smarts, lets use them.
1022 return AAResultBase::getModRefInfo(Call, Loc);
1025 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1026 const CallBase *Call2) {
1027 // While the assume intrinsic is marked as arbitrarily writing so that
1028 // proper control dependencies will be maintained, it never aliases any
1029 // particular memory location.
1030 if (isIntrinsicCall(Call1, Intrinsic::assume) ||
1031 isIntrinsicCall(Call2, Intrinsic::assume))
1032 return ModRefInfo::NoModRef;
1034 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1035 // that proper control dependencies are maintained but they never mod any
1036 // particular memory location.
1038 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1039 // heap state at the point the guard is issued needs to be consistent in case
1040 // the guard invokes the "deopt" continuation.
1042 // NB! This function is *not* commutative, so we special case two
1043 // possibilities for guard intrinsics.
1045 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1046 return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1047 ? ModRefInfo::Ref
1048 : ModRefInfo::NoModRef;
1050 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1051 return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1052 ? ModRefInfo::Mod
1053 : ModRefInfo::NoModRef;
1055 // The AAResultBase base class has some smarts, lets use them.
1056 return AAResultBase::getModRefInfo(Call1, Call2);
1059 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1060 /// both having the exact same pointer operand.
1061 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1062 LocationSize MaybeV1Size,
1063 const GEPOperator *GEP2,
1064 LocationSize MaybeV2Size,
1065 const DataLayout &DL) {
1066 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1067 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1068 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1069 "Expected GEPs with the same pointer operand");
1071 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1072 // such that the struct field accesses provably cannot alias.
1073 // We also need at least two indices (the pointer, and the struct field).
1074 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1075 GEP1->getNumIndices() < 2)
1076 return MayAlias;
1078 // If we don't know the size of the accesses through both GEPs, we can't
1079 // determine whether the struct fields accessed can't alias.
1080 if (MaybeV1Size == LocationSize::unknown() ||
1081 MaybeV2Size == LocationSize::unknown())
1082 return MayAlias;
1084 const uint64_t V1Size = MaybeV1Size.getValue();
1085 const uint64_t V2Size = MaybeV2Size.getValue();
1087 ConstantInt *C1 =
1088 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1089 ConstantInt *C2 =
1090 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1092 // If the last (struct) indices are constants and are equal, the other indices
1093 // might be also be dynamically equal, so the GEPs can alias.
1094 if (C1 && C2) {
1095 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1096 if (C1->getValue().sextOrSelf(BitWidth) ==
1097 C2->getValue().sextOrSelf(BitWidth))
1098 return MayAlias;
1101 // Find the last-indexed type of the GEP, i.e., the type you'd get if
1102 // you stripped the last index.
1103 // On the way, look at each indexed type. If there's something other
1104 // than an array, different indices can lead to different final types.
1105 SmallVector<Value *, 8> IntermediateIndices;
1107 // Insert the first index; we don't need to check the type indexed
1108 // through it as it only drops the pointer indirection.
1109 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1110 IntermediateIndices.push_back(GEP1->getOperand(1));
1112 // Insert all the remaining indices but the last one.
1113 // Also, check that they all index through arrays.
1114 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1115 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1116 GEP1->getSourceElementType(), IntermediateIndices)))
1117 return MayAlias;
1118 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1121 auto *Ty = GetElementPtrInst::getIndexedType(
1122 GEP1->getSourceElementType(), IntermediateIndices);
1123 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1125 if (isa<SequentialType>(Ty)) {
1126 // We know that:
1127 // - both GEPs begin indexing from the exact same pointer;
1128 // - the last indices in both GEPs are constants, indexing into a sequential
1129 // type (array or pointer);
1130 // - both GEPs only index through arrays prior to that.
1132 // Because array indices greater than the number of elements are valid in
1133 // GEPs, unless we know the intermediate indices are identical between
1134 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1135 // partially overlap. We also need to check that the loaded size matches
1136 // the element size, otherwise we could still have overlap.
1137 const uint64_t ElementSize =
1138 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1139 if (V1Size != ElementSize || V2Size != ElementSize)
1140 return MayAlias;
1142 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1143 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1144 return MayAlias;
1146 // Now we know that the array/pointer that GEP1 indexes into and that
1147 // that GEP2 indexes into must either precisely overlap or be disjoint.
1148 // Because they cannot partially overlap and because fields in an array
1149 // cannot overlap, if we can prove the final indices are different between
1150 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1152 // If the last indices are constants, we've already checked they don't
1153 // equal each other so we can exit early.
1154 if (C1 && C2)
1155 return NoAlias;
1157 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1158 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1159 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1160 // If one of the indices is a PHI node, be safe and only use
1161 // computeKnownBits so we don't make any assumptions about the
1162 // relationships between the two indices. This is important if we're
1163 // asking about values from different loop iterations. See PR32314.
1164 // TODO: We may be able to change the check so we only do this when
1165 // we definitely looked through a PHINode.
1166 if (GEP1LastIdx != GEP2LastIdx &&
1167 GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1168 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1169 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1170 if (Known1.Zero.intersects(Known2.One) ||
1171 Known1.One.intersects(Known2.Zero))
1172 return NoAlias;
1174 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1175 return NoAlias;
1177 return MayAlias;
1178 } else if (!LastIndexedStruct || !C1 || !C2) {
1179 return MayAlias;
1182 if (C1->getValue().getActiveBits() > 64 ||
1183 C2->getValue().getActiveBits() > 64)
1184 return MayAlias;
1186 // We know that:
1187 // - both GEPs begin indexing from the exact same pointer;
1188 // - the last indices in both GEPs are constants, indexing into a struct;
1189 // - said indices are different, hence, the pointed-to fields are different;
1190 // - both GEPs only index through arrays prior to that.
1192 // This lets us determine that the struct that GEP1 indexes into and the
1193 // struct that GEP2 indexes into must either precisely overlap or be
1194 // completely disjoint. Because they cannot partially overlap, indexing into
1195 // different non-overlapping fields of the struct will never alias.
1197 // Therefore, the only remaining thing needed to show that both GEPs can't
1198 // alias is that the fields are not overlapping.
1199 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1200 const uint64_t StructSize = SL->getSizeInBytes();
1201 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1202 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1204 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1205 uint64_t V2Off, uint64_t V2Size) {
1206 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1207 ((V2Off + V2Size <= StructSize) ||
1208 (V2Off + V2Size - StructSize <= V1Off));
1211 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1212 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1213 return NoAlias;
1215 return MayAlias;
1218 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1219 // beginning of the object the GEP points would have a negative offset with
1220 // repsect to the alloca, that means the GEP can not alias pointer (b).
1221 // Note that the pointer based on the alloca may not be a GEP. For
1222 // example, it may be the alloca itself.
1223 // The same applies if (b) is based on a GlobalVariable. Note that just being
1224 // based on isIdentifiedObject() is not enough - we need an identified object
1225 // that does not permit access to negative offsets. For example, a negative
1226 // offset from a noalias argument or call can be inbounds w.r.t the actual
1227 // underlying object.
1229 // For example, consider:
1231 // struct { int f0, int f1, ...} foo;
1232 // foo alloca;
1233 // foo* random = bar(alloca);
1234 // int *f0 = &alloca.f0
1235 // int *f1 = &random->f1;
1237 // Which is lowered, approximately, to:
1239 // %alloca = alloca %struct.foo
1240 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1241 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1242 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1244 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1245 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1246 // point into the same object. But since %f0 points to the beginning of %alloca,
1247 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1248 // than (%alloca - 1), and so is not inbounds, a contradiction.
1249 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1250 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1251 LocationSize MaybeObjectAccessSize) {
1252 // If the object access size is unknown, or the GEP isn't inbounds, bail.
1253 if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1254 return false;
1256 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1258 // We need the object to be an alloca or a globalvariable, and want to know
1259 // the offset of the pointer from the object precisely, so no variable
1260 // indices are allowed.
1261 if (!(isa<AllocaInst>(DecompObject.Base) ||
1262 isa<GlobalVariable>(DecompObject.Base)) ||
1263 !DecompObject.VarIndices.empty())
1264 return false;
1266 APInt ObjectBaseOffset = DecompObject.StructOffset +
1267 DecompObject.OtherOffset;
1269 // If the GEP has no variable indices, we know the precise offset
1270 // from the base, then use it. If the GEP has variable indices,
1271 // we can't get exact GEP offset to identify pointer alias. So return
1272 // false in that case.
1273 if (!DecompGEP.VarIndices.empty())
1274 return false;
1276 APInt GEPBaseOffset = DecompGEP.StructOffset;
1277 GEPBaseOffset += DecompGEP.OtherOffset;
1279 return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
1282 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1283 /// another pointer.
1285 /// We know that V1 is a GEP, but we don't know anything about V2.
1286 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1287 /// V2.
1288 AliasResult
1289 BasicAAResult::aliasGEP(const GEPOperator *GEP1, LocationSize V1Size,
1290 const AAMDNodes &V1AAInfo, const Value *V2,
1291 LocationSize V2Size, const AAMDNodes &V2AAInfo,
1292 const Value *UnderlyingV1, const Value *UnderlyingV2) {
1293 DecomposedGEP DecompGEP1, DecompGEP2;
1294 unsigned MaxPointerSize = getMaxPointerSize(DL);
1295 DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
1296 DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
1298 bool GEP1MaxLookupReached =
1299 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1300 bool GEP2MaxLookupReached =
1301 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1303 APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1304 APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1306 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1307 "DecomposeGEPExpression returned a result different from "
1308 "GetUnderlyingObject");
1310 // If the GEP's offset relative to its base is such that the base would
1311 // fall below the start of the object underlying V2, then the GEP and V2
1312 // cannot alias.
1313 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1314 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1315 return NoAlias;
1316 // If we have two gep instructions with must-alias or not-alias'ing base
1317 // pointers, figure out if the indexes to the GEP tell us anything about the
1318 // derived pointer.
1319 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1320 // Check for the GEP base being at a negative offset, this time in the other
1321 // direction.
1322 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1323 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1324 return NoAlias;
1325 // Do the base pointers alias?
1326 AliasResult BaseAlias =
1327 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1328 UnderlyingV2, LocationSize::unknown(), AAMDNodes());
1330 // Check for geps of non-aliasing underlying pointers where the offsets are
1331 // identical.
1332 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1333 // Do the base pointers alias assuming type and size.
1334 AliasResult PreciseBaseAlias = aliasCheck(UnderlyingV1, V1Size, V1AAInfo,
1335 UnderlyingV2, V2Size, V2AAInfo);
1336 if (PreciseBaseAlias == NoAlias) {
1337 // See if the computed offset from the common pointer tells us about the
1338 // relation of the resulting pointer.
1339 // If the max search depth is reached the result is undefined
1340 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1341 return MayAlias;
1343 // Same offsets.
1344 if (GEP1BaseOffset == GEP2BaseOffset &&
1345 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1346 return NoAlias;
1350 // If we get a No or May, then return it immediately, no amount of analysis
1351 // will improve this situation.
1352 if (BaseAlias != MustAlias) {
1353 assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1354 return BaseAlias;
1357 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1358 // exactly, see if the computed offset from the common pointer tells us
1359 // about the relation of the resulting pointer.
1360 // If we know the two GEPs are based off of the exact same pointer (and not
1361 // just the same underlying object), see if that tells us anything about
1362 // the resulting pointers.
1363 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1364 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1365 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1366 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1367 // If we couldn't find anything interesting, don't abandon just yet.
1368 if (R != MayAlias)
1369 return R;
1372 // If the max search depth is reached, the result is undefined
1373 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1374 return MayAlias;
1376 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1377 // symbolic difference.
1378 GEP1BaseOffset -= GEP2BaseOffset;
1379 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1381 } else {
1382 // Check to see if these two pointers are related by the getelementptr
1383 // instruction. If one pointer is a GEP with a non-zero index of the other
1384 // pointer, we know they cannot alias.
1386 // If both accesses are unknown size, we can't do anything useful here.
1387 if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1388 return MayAlias;
1390 AliasResult R =
1391 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(), V2,
1392 LocationSize::unknown(), V2AAInfo, nullptr, UnderlyingV2);
1393 if (R != MustAlias) {
1394 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1395 // If V2 is known not to alias GEP base pointer, then the two values
1396 // cannot alias per GEP semantics: "Any memory access must be done through
1397 // a pointer value associated with an address range of the memory access,
1398 // otherwise the behavior is undefined.".
1399 assert(R == NoAlias || R == MayAlias);
1400 return R;
1403 // If the max search depth is reached the result is undefined
1404 if (GEP1MaxLookupReached)
1405 return MayAlias;
1408 // In the two GEP Case, if there is no difference in the offsets of the
1409 // computed pointers, the resultant pointers are a must alias. This
1410 // happens when we have two lexically identical GEP's (for example).
1412 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1413 // must aliases the GEP, the end result is a must alias also.
1414 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1415 return MustAlias;
1417 // If there is a constant difference between the pointers, but the difference
1418 // is less than the size of the associated memory object, then we know
1419 // that the objects are partially overlapping. If the difference is
1420 // greater, we know they do not overlap.
1421 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1422 if (GEP1BaseOffset.sge(0)) {
1423 if (V2Size != LocationSize::unknown()) {
1424 if (GEP1BaseOffset.ult(V2Size.getValue()))
1425 return PartialAlias;
1426 return NoAlias;
1428 } else {
1429 // We have the situation where:
1430 // + +
1431 // | BaseOffset |
1432 // ---------------->|
1433 // |-->V1Size |-------> V2Size
1434 // GEP1 V2
1435 // We need to know that V2Size is not unknown, otherwise we might have
1436 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1437 if (V1Size != LocationSize::unknown() &&
1438 V2Size != LocationSize::unknown()) {
1439 if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1440 return PartialAlias;
1441 return NoAlias;
1446 if (!DecompGEP1.VarIndices.empty()) {
1447 APInt Modulo(MaxPointerSize, 0);
1448 bool AllPositive = true;
1449 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1451 // Try to distinguish something like &A[i][1] against &A[42][0].
1452 // Grab the least significant bit set in any of the scales. We
1453 // don't need std::abs here (even if the scale's negative) as we'll
1454 // be ^'ing Modulo with itself later.
1455 Modulo |= DecompGEP1.VarIndices[i].Scale;
1457 if (AllPositive) {
1458 // If the Value could change between cycles, then any reasoning about
1459 // the Value this cycle may not hold in the next cycle. We'll just
1460 // give up if we can't determine conditions that hold for every cycle:
1461 const Value *V = DecompGEP1.VarIndices[i].V;
1463 KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1464 bool SignKnownZero = Known.isNonNegative();
1465 bool SignKnownOne = Known.isNegative();
1467 // Zero-extension widens the variable, and so forces the sign
1468 // bit to zero.
1469 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1470 SignKnownZero |= IsZExt;
1471 SignKnownOne &= !IsZExt;
1473 // If the variable begins with a zero then we know it's
1474 // positive, regardless of whether the value is signed or
1475 // unsigned.
1476 APInt Scale = DecompGEP1.VarIndices[i].Scale;
1477 AllPositive =
1478 (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
1482 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1484 // We can compute the difference between the two addresses
1485 // mod Modulo. Check whether that difference guarantees that the
1486 // two locations do not alias.
1487 APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
1488 if (V1Size != LocationSize::unknown() &&
1489 V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1490 (Modulo - ModOffset).uge(V1Size.getValue()))
1491 return NoAlias;
1493 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1494 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1495 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1496 if (AllPositive && GEP1BaseOffset.sgt(0) &&
1497 V2Size != LocationSize::unknown() &&
1498 GEP1BaseOffset.uge(V2Size.getValue()))
1499 return NoAlias;
1501 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1502 GEP1BaseOffset, &AC, DT))
1503 return NoAlias;
1506 // Statically, we can see that the base objects are the same, but the
1507 // pointers have dynamic offsets which we can't resolve. And none of our
1508 // little tricks above worked.
1509 return MayAlias;
1512 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1513 // If the results agree, take it.
1514 if (A == B)
1515 return A;
1516 // A mix of PartialAlias and MustAlias is PartialAlias.
1517 if ((A == PartialAlias && B == MustAlias) ||
1518 (B == PartialAlias && A == MustAlias))
1519 return PartialAlias;
1520 // Otherwise, we don't know anything.
1521 return MayAlias;
1524 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1525 /// against another.
1526 AliasResult BasicAAResult::aliasSelect(const SelectInst *SI,
1527 LocationSize SISize,
1528 const AAMDNodes &SIAAInfo,
1529 const Value *V2, LocationSize V2Size,
1530 const AAMDNodes &V2AAInfo,
1531 const Value *UnderV2) {
1532 // If the values are Selects with the same condition, we can do a more precise
1533 // check: just check for aliases between the values on corresponding arms.
1534 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1535 if (SI->getCondition() == SI2->getCondition()) {
1536 AliasResult Alias = aliasCheck(SI->getTrueValue(), SISize, SIAAInfo,
1537 SI2->getTrueValue(), V2Size, V2AAInfo);
1538 if (Alias == MayAlias)
1539 return MayAlias;
1540 AliasResult ThisAlias =
1541 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1542 SI2->getFalseValue(), V2Size, V2AAInfo);
1543 return MergeAliasResults(ThisAlias, Alias);
1546 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1547 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1548 AliasResult Alias =
1549 aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1550 SISize, SIAAInfo, UnderV2);
1551 if (Alias == MayAlias)
1552 return MayAlias;
1554 AliasResult ThisAlias =
1555 aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(), SISize, SIAAInfo,
1556 UnderV2);
1557 return MergeAliasResults(ThisAlias, Alias);
1560 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1561 /// another.
1562 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1563 const AAMDNodes &PNAAInfo, const Value *V2,
1564 LocationSize V2Size,
1565 const AAMDNodes &V2AAInfo,
1566 const Value *UnderV2) {
1567 // Track phi nodes we have visited. We use this information when we determine
1568 // value equivalence.
1569 VisitedPhiBBs.insert(PN->getParent());
1571 // If the values are PHIs in the same block, we can do a more precise
1572 // as well as efficient check: just check for aliases between the values
1573 // on corresponding edges.
1574 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1575 if (PN2->getParent() == PN->getParent()) {
1576 LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1577 MemoryLocation(V2, V2Size, V2AAInfo));
1578 if (PN > V2)
1579 std::swap(Locs.first, Locs.second);
1580 // Analyse the PHIs' inputs under the assumption that the PHIs are
1581 // NoAlias.
1582 // If the PHIs are May/MustAlias there must be (recursively) an input
1583 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1584 // there must be an operation on the PHIs within the PHIs' value cycle
1585 // that causes a MayAlias.
1586 // Pretend the phis do not alias.
1587 AliasResult Alias = NoAlias;
1588 assert(AliasCache.count(Locs) &&
1589 "There must exist an entry for the phi node");
1590 AliasResult OrigAliasResult = AliasCache[Locs];
1591 AliasCache[Locs] = NoAlias;
1593 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1594 AliasResult ThisAlias =
1595 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1596 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1597 V2Size, V2AAInfo);
1598 Alias = MergeAliasResults(ThisAlias, Alias);
1599 if (Alias == MayAlias)
1600 break;
1603 // Reset if speculation failed.
1604 if (Alias != NoAlias)
1605 AliasCache[Locs] = OrigAliasResult;
1607 return Alias;
1610 SmallVector<Value *, 4> V1Srcs;
1611 bool isRecursive = false;
1612 if (PV) {
1613 // If we have PhiValues then use it to get the underlying phi values.
1614 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1615 // If we have more phi values than the search depth then return MayAlias
1616 // conservatively to avoid compile time explosion. The worst possible case
1617 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1618 // where 'm' and 'n' are the number of PHI sources.
1619 if (PhiValueSet.size() > MaxLookupSearchDepth)
1620 return MayAlias;
1621 // Add the values to V1Srcs
1622 for (Value *PV1 : PhiValueSet) {
1623 if (EnableRecPhiAnalysis) {
1624 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1625 // Check whether the incoming value is a GEP that advances the pointer
1626 // result of this PHI node (e.g. in a loop). If this is the case, we
1627 // would recurse and always get a MayAlias. Handle this case specially
1628 // below.
1629 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1630 isa<ConstantInt>(PV1GEP->idx_begin())) {
1631 isRecursive = true;
1632 continue;
1636 V1Srcs.push_back(PV1);
1638 } else {
1639 // If we don't have PhiInfo then just look at the operands of the phi itself
1640 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1641 SmallPtrSet<Value *, 4> UniqueSrc;
1642 for (Value *PV1 : PN->incoming_values()) {
1643 if (isa<PHINode>(PV1))
1644 // If any of the source itself is a PHI, return MayAlias conservatively
1645 // to avoid compile time explosion. The worst possible case is if both
1646 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1647 // and 'n' are the number of PHI sources.
1648 return MayAlias;
1650 if (EnableRecPhiAnalysis)
1651 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1652 // Check whether the incoming value is a GEP that advances the pointer
1653 // result of this PHI node (e.g. in a loop). If this is the case, we
1654 // would recurse and always get a MayAlias. Handle this case specially
1655 // below.
1656 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1657 isa<ConstantInt>(PV1GEP->idx_begin())) {
1658 isRecursive = true;
1659 continue;
1663 if (UniqueSrc.insert(PV1).second)
1664 V1Srcs.push_back(PV1);
1668 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1669 // value. This should only be possible in blocks unreachable from the entry
1670 // block, but return MayAlias just in case.
1671 if (V1Srcs.empty())
1672 return MayAlias;
1674 // If this PHI node is recursive, set the size of the accessed memory to
1675 // unknown to represent all the possible values the GEP could advance the
1676 // pointer to.
1677 if (isRecursive)
1678 PNSize = LocationSize::unknown();
1680 AliasResult Alias =
1681 aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0],
1682 PNSize, PNAAInfo, UnderV2);
1684 // Early exit if the check of the first PHI source against V2 is MayAlias.
1685 // Other results are not possible.
1686 if (Alias == MayAlias)
1687 return MayAlias;
1689 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1690 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1691 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1692 Value *V = V1Srcs[i];
1694 AliasResult ThisAlias =
1695 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, UnderV2);
1696 Alias = MergeAliasResults(ThisAlias, Alias);
1697 if (Alias == MayAlias)
1698 break;
1701 return Alias;
1704 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1705 /// array references.
1706 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1707 AAMDNodes V1AAInfo, const Value *V2,
1708 LocationSize V2Size, AAMDNodes V2AAInfo,
1709 const Value *O1, const Value *O2) {
1710 // If either of the memory references is empty, it doesn't matter what the
1711 // pointer values are.
1712 if (V1Size.isZero() || V2Size.isZero())
1713 return NoAlias;
1715 // Strip off any casts if they exist.
1716 V1 = V1->stripPointerCastsAndInvariantGroups();
1717 V2 = V2->stripPointerCastsAndInvariantGroups();
1719 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1720 // value for undef that aliases nothing in the program.
1721 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1722 return NoAlias;
1724 // Are we checking for alias of the same value?
1725 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1726 // different iterations. We must therefore make sure that this is not the
1727 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1728 // happen by looking at the visited phi nodes and making sure they cannot
1729 // reach the value.
1730 if (isValueEqualInPotentialCycles(V1, V2))
1731 return MustAlias;
1733 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1734 return NoAlias; // Scalars cannot alias each other
1736 // Figure out what objects these things are pointing to if we can.
1737 if (O1 == nullptr)
1738 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1740 if (O2 == nullptr)
1741 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1743 // Null values in the default address space don't point to any object, so they
1744 // don't alias any other pointer.
1745 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1746 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1747 return NoAlias;
1748 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1749 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1750 return NoAlias;
1752 if (O1 != O2) {
1753 // If V1/V2 point to two different objects, we know that we have no alias.
1754 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1755 return NoAlias;
1757 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1758 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1759 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1760 return NoAlias;
1762 // Function arguments can't alias with things that are known to be
1763 // unambigously identified at the function level.
1764 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1765 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1766 return NoAlias;
1768 // If one pointer is the result of a call/invoke or load and the other is a
1769 // non-escaping local object within the same function, then we know the
1770 // object couldn't escape to a point where the call could return it.
1772 // Note that if the pointers are in different functions, there are a
1773 // variety of complications. A call with a nocapture argument may still
1774 // temporary store the nocapture argument's value in a temporary memory
1775 // location if that memory location doesn't escape. Or it may pass a
1776 // nocapture value to other functions as long as they don't capture it.
1777 if (isEscapeSource(O1) && isNonEscapingLocalObject(O2, &IsCapturedCache))
1778 return NoAlias;
1779 if (isEscapeSource(O2) && isNonEscapingLocalObject(O1, &IsCapturedCache))
1780 return NoAlias;
1783 // If the size of one access is larger than the entire object on the other
1784 // side, then we know such behavior is undefined and can assume no alias.
1785 bool NullIsValidLocation = NullPointerIsDefined(&F);
1786 if ((V1Size.isPrecise() && isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI,
1787 NullIsValidLocation)) ||
1788 (V2Size.isPrecise() && isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI,
1789 NullIsValidLocation)))
1790 return NoAlias;
1792 // Check the cache before climbing up use-def chains. This also terminates
1793 // otherwise infinitely recursive queries.
1794 LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1795 MemoryLocation(V2, V2Size, V2AAInfo));
1796 if (V1 > V2)
1797 std::swap(Locs.first, Locs.second);
1798 std::pair<AliasCacheTy::iterator, bool> Pair =
1799 AliasCache.insert(std::make_pair(Locs, MayAlias));
1800 if (!Pair.second)
1801 return Pair.first->second;
1803 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1804 // GEP can't simplify, we don't even look at the PHI cases.
1805 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1806 std::swap(V1, V2);
1807 std::swap(V1Size, V2Size);
1808 std::swap(O1, O2);
1809 std::swap(V1AAInfo, V2AAInfo);
1811 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1812 AliasResult Result =
1813 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2);
1814 if (Result != MayAlias)
1815 return AliasCache[Locs] = Result;
1818 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1819 std::swap(V1, V2);
1820 std::swap(O1, O2);
1821 std::swap(V1Size, V2Size);
1822 std::swap(V1AAInfo, V2AAInfo);
1824 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1825 AliasResult Result = aliasPHI(PN, V1Size, V1AAInfo,
1826 V2, V2Size, V2AAInfo, O2);
1827 if (Result != MayAlias)
1828 return AliasCache[Locs] = Result;
1831 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1832 std::swap(V1, V2);
1833 std::swap(O1, O2);
1834 std::swap(V1Size, V2Size);
1835 std::swap(V1AAInfo, V2AAInfo);
1837 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1838 AliasResult Result =
1839 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2);
1840 if (Result != MayAlias)
1841 return AliasCache[Locs] = Result;
1844 // If both pointers are pointing into the same object and one of them
1845 // accesses the entire object, then the accesses must overlap in some way.
1846 if (O1 == O2)
1847 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1848 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1849 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1850 return AliasCache[Locs] = PartialAlias;
1852 // Recurse back into the best AA results we have, potentially with refined
1853 // memory locations. We have already ensured that BasicAA has a MayAlias
1854 // cache result for these, so any recursion back into BasicAA won't loop.
1855 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second);
1856 return AliasCache[Locs] = Result;
1859 /// Check whether two Values can be considered equivalent.
1861 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1862 /// they can not be part of a cycle in the value graph by looking at all
1863 /// visited phi nodes an making sure that the phis cannot reach the value. We
1864 /// have to do this because we are looking through phi nodes (That is we say
1865 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1866 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1867 const Value *V2) {
1868 if (V != V2)
1869 return false;
1871 const Instruction *Inst = dyn_cast<Instruction>(V);
1872 if (!Inst)
1873 return true;
1875 if (VisitedPhiBBs.empty())
1876 return true;
1878 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1879 return false;
1881 // Make sure that the visited phis cannot reach the Value. This ensures that
1882 // the Values cannot come from different iterations of a potential cycle the
1883 // phi nodes could be involved in.
1884 for (auto *P : VisitedPhiBBs)
1885 if (isPotentiallyReachable(&P->front(), Inst, DT, LI))
1886 return false;
1888 return true;
1891 /// Computes the symbolic difference between two de-composed GEPs.
1893 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1894 /// instructions GEP1 and GEP2 which have common base pointers.
1895 void BasicAAResult::GetIndexDifference(
1896 SmallVectorImpl<VariableGEPIndex> &Dest,
1897 const SmallVectorImpl<VariableGEPIndex> &Src) {
1898 if (Src.empty())
1899 return;
1901 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1902 const Value *V = Src[i].V;
1903 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1904 APInt Scale = Src[i].Scale;
1906 // Find V in Dest. This is N^2, but pointer indices almost never have more
1907 // than a few variable indexes.
1908 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1909 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1910 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1911 continue;
1913 // If we found it, subtract off Scale V's from the entry in Dest. If it
1914 // goes to zero, remove the entry.
1915 if (Dest[j].Scale != Scale)
1916 Dest[j].Scale -= Scale;
1917 else
1918 Dest.erase(Dest.begin() + j);
1919 Scale = 0;
1920 break;
1923 // If we didn't consume this entry, add it to the end of the Dest list.
1924 if (!!Scale) {
1925 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1926 Dest.push_back(Entry);
1931 bool BasicAAResult::constantOffsetHeuristic(
1932 const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1933 LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset,
1934 AssumptionCache *AC, DominatorTree *DT) {
1935 if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1936 MaybeV2Size == LocationSize::unknown())
1937 return false;
1939 const uint64_t V1Size = MaybeV1Size.getValue();
1940 const uint64_t V2Size = MaybeV2Size.getValue();
1942 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1944 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1945 Var0.Scale != -Var1.Scale)
1946 return false;
1948 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1950 // We'll strip off the Extensions of Var0 and Var1 and do another round
1951 // of GetLinearExpression decomposition. In the example above, if Var0
1952 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1954 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1955 V1Offset(Width, 0);
1956 bool NSW = true, NUW = true;
1957 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1958 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1959 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1960 NSW = true;
1961 NUW = true;
1962 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1963 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1965 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1966 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1967 return false;
1969 // We have a hit - Var0 and Var1 only differ by a constant offset!
1971 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1972 // Var1 is possible to calculate, but we're just interested in the absolute
1973 // minimum difference between the two. The minimum distance may occur due to
1974 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1975 // the minimum distance between %i and %i + 5 is 3.
1976 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
1977 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1978 APInt MinDiffBytes =
1979 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1981 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1982 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1983 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1984 // V2Size can fit in the MinDiffBytes gap.
1985 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
1986 MinDiffBytes.uge(V2Size + BaseOffset.abs());
1989 //===----------------------------------------------------------------------===//
1990 // BasicAliasAnalysis Pass
1991 //===----------------------------------------------------------------------===//
1993 AnalysisKey BasicAA::Key;
1995 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1996 return BasicAAResult(F.getParent()->getDataLayout(),
1998 AM.getResult<TargetLibraryAnalysis>(F),
1999 AM.getResult<AssumptionAnalysis>(F),
2000 &AM.getResult<DominatorTreeAnalysis>(F),
2001 AM.getCachedResult<LoopAnalysis>(F),
2002 AM.getCachedResult<PhiValuesAnalysis>(F));
2005 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
2006 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
2009 char BasicAAWrapperPass::ID = 0;
2011 void BasicAAWrapperPass::anchor() {}
2013 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
2014 "Basic Alias Analysis (stateless AA impl)", false, true)
2015 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2016 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2017 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2018 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
2019 "Basic Alias Analysis (stateless AA impl)", false, true)
2021 FunctionPass *llvm::createBasicAAWrapperPass() {
2022 return new BasicAAWrapperPass();
2025 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2026 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2027 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2028 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2029 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2030 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
2032 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
2033 ACT.getAssumptionCache(F), &DTWP.getDomTree(),
2034 LIWP ? &LIWP->getLoopInfo() : nullptr,
2035 PVWP ? &PVWP->getResult() : nullptr));
2037 return false;
2040 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2041 AU.setPreservesAll();
2042 AU.addRequired<AssumptionCacheTracker>();
2043 AU.addRequired<DominatorTreeWrapperPass>();
2044 AU.addRequired<TargetLibraryInfoWrapperPass>();
2045 AU.addUsedIfAvailable<PhiValuesWrapperPass>();
2048 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
2049 return BasicAAResult(
2050 F.getParent()->getDataLayout(),
2052 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
2053 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));