[llvm-objcopy] - Reimplement strip-dwo-groups.test to stop using the precompiled...
[llvm-complete.git] / lib / Analysis / BasicAliasAnalysis.cpp
blob3721c99883b82d3e6aec403bce2c1d32605db546
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallPtrSet.h"
18 #include "llvm/ADT/SmallVector.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/AssumptionCache.h"
22 #include "llvm/Analysis/CFG.h"
23 #include "llvm/Analysis/CaptureTracking.h"
24 #include "llvm/Analysis/InstructionSimplify.h"
25 #include "llvm/Analysis/LoopInfo.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MemoryLocation.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/PhiValues.h"
31 #include "llvm/IR/Argument.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/Constant.h"
34 #include "llvm/IR/Constants.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/DerivedTypes.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GetElementPtrTypeIterator.h"
40 #include "llvm/IR/GlobalAlias.h"
41 #include "llvm/IR/GlobalVariable.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Intrinsics.h"
47 #include "llvm/IR/Metadata.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include <cassert>
58 #include <cstdint>
59 #include <cstdlib>
60 #include <utility>
62 #define DEBUG_TYPE "basicaa"
64 using namespace llvm;
66 /// Enable analysis of recursive PHI nodes.
67 static cl::opt<bool> EnableRecPhiAnalysis("basicaa-recphi", cl::Hidden,
68 cl::init(false));
70 /// By default, even on 32-bit architectures we use 64-bit integers for
71 /// calculations. This will allow us to more-aggressively decompose indexing
72 /// expressions calculated using i64 values (e.g., long long in C) which is
73 /// common enough to worry about.
74 static cl::opt<bool> ForceAtLeast64Bits("basicaa-force-at-least-64b",
75 cl::Hidden, cl::init(true));
76 static cl::opt<bool> DoubleCalcBits("basicaa-double-calc-bits",
77 cl::Hidden, cl::init(false));
79 /// SearchLimitReached / SearchTimes shows how often the limit of
80 /// to decompose GEPs is reached. It will affect the precision
81 /// of basic alias analysis.
82 STATISTIC(SearchLimitReached, "Number of times the limit to "
83 "decompose GEPs is reached");
84 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
86 /// Cutoff after which to stop analysing a set of phi nodes potentially involved
87 /// in a cycle. Because we are analysing 'through' phi nodes, we need to be
88 /// careful with value equivalence. We use reachability to make sure a value
89 /// cannot be involved in a cycle.
90 const unsigned MaxNumPhiBBsValueReachabilityCheck = 20;
92 // The max limit of the search depth in DecomposeGEPExpression() and
93 // GetUnderlyingObject(), both functions need to use the same search
94 // depth otherwise the algorithm in aliasGEP will assert.
95 static const unsigned MaxLookupSearchDepth = 6;
97 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
98 FunctionAnalysisManager::Invalidator &Inv) {
99 // We don't care if this analysis itself is preserved, it has no state. But
100 // we need to check that the analyses it depends on have been. Note that we
101 // may be created without handles to some analyses and in that case don't
102 // depend on them.
103 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
104 (DT && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)) ||
105 (LI && Inv.invalidate<LoopAnalysis>(Fn, PA)) ||
106 (PV && Inv.invalidate<PhiValuesAnalysis>(Fn, PA)))
107 return true;
109 // Otherwise this analysis result remains valid.
110 return false;
113 //===----------------------------------------------------------------------===//
114 // Useful predicates
115 //===----------------------------------------------------------------------===//
117 /// Returns true if the pointer is to a function-local object that never
118 /// escapes from the function.
119 static bool isNonEscapingLocalObject(
120 const Value *V,
121 SmallDenseMap<const Value *, bool, 8> *IsCapturedCache = nullptr) {
122 SmallDenseMap<const Value *, bool, 8>::iterator CacheIt;
123 if (IsCapturedCache) {
124 bool Inserted;
125 std::tie(CacheIt, Inserted) = IsCapturedCache->insert({V, false});
126 if (!Inserted)
127 // Found cached result, return it!
128 return CacheIt->second;
131 // If this is a local allocation, check to see if it escapes.
132 if (isa<AllocaInst>(V) || isNoAliasCall(V)) {
133 // Set StoreCaptures to True so that we can assume in our callers that the
134 // pointer is not the result of a load instruction. Currently
135 // PointerMayBeCaptured doesn't have any special analysis for the
136 // StoreCaptures=false case; if it did, our callers could be refined to be
137 // more precise.
138 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
139 if (IsCapturedCache)
140 CacheIt->second = Ret;
141 return Ret;
144 // If this is an argument that corresponds to a byval or noalias argument,
145 // then it has not escaped before entering the function. Check if it escapes
146 // inside the function.
147 if (const Argument *A = dyn_cast<Argument>(V))
148 if (A->hasByValAttr() || A->hasNoAliasAttr()) {
149 // Note even if the argument is marked nocapture, we still need to check
150 // for copies made inside the function. The nocapture attribute only
151 // specifies that there are no copies made that outlive the function.
152 auto Ret = !PointerMayBeCaptured(V, false, /*StoreCaptures=*/true);
153 if (IsCapturedCache)
154 CacheIt->second = Ret;
155 return Ret;
158 return false;
161 /// Returns true if the pointer is one which would have been considered an
162 /// escape by isNonEscapingLocalObject.
163 static bool isEscapeSource(const Value *V) {
164 if (isa<CallBase>(V))
165 return true;
167 if (isa<Argument>(V))
168 return true;
170 // The load case works because isNonEscapingLocalObject considers all
171 // stores to be escapes (it passes true for the StoreCaptures argument
172 // to PointerMayBeCaptured).
173 if (isa<LoadInst>(V))
174 return true;
176 return false;
179 /// Returns the size of the object specified by V or UnknownSize if unknown.
180 static uint64_t getObjectSize(const Value *V, const DataLayout &DL,
181 const TargetLibraryInfo &TLI,
182 bool NullIsValidLoc,
183 bool RoundToAlign = false) {
184 uint64_t Size;
185 ObjectSizeOpts Opts;
186 Opts.RoundToAlign = RoundToAlign;
187 Opts.NullIsUnknownSize = NullIsValidLoc;
188 if (getObjectSize(V, Size, DL, &TLI, Opts))
189 return Size;
190 return MemoryLocation::UnknownSize;
193 /// Returns true if we can prove that the object specified by V is smaller than
194 /// Size.
195 static bool isObjectSmallerThan(const Value *V, uint64_t Size,
196 const DataLayout &DL,
197 const TargetLibraryInfo &TLI,
198 bool NullIsValidLoc) {
199 // Note that the meanings of the "object" are slightly different in the
200 // following contexts:
201 // c1: llvm::getObjectSize()
202 // c2: llvm.objectsize() intrinsic
203 // c3: isObjectSmallerThan()
204 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
205 // refers to the "entire object".
207 // Consider this example:
208 // char *p = (char*)malloc(100)
209 // char *q = p+80;
211 // In the context of c1 and c2, the "object" pointed by q refers to the
212 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
214 // However, in the context of c3, the "object" refers to the chunk of memory
215 // being allocated. So, the "object" has 100 bytes, and q points to the middle
216 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
217 // parameter, before the llvm::getObjectSize() is called to get the size of
218 // entire object, we should:
219 // - either rewind the pointer q to the base-address of the object in
220 // question (in this case rewind to p), or
221 // - just give up. It is up to caller to make sure the pointer is pointing
222 // to the base address the object.
224 // We go for 2nd option for simplicity.
225 if (!isIdentifiedObject(V))
226 return false;
228 // This function needs to use the aligned object size because we allow
229 // reads a bit past the end given sufficient alignment.
230 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
231 /*RoundToAlign*/ true);
233 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize < Size;
236 /// Returns true if we can prove that the object specified by V has size Size.
237 static bool isObjectSize(const Value *V, uint64_t Size, const DataLayout &DL,
238 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
239 uint64_t ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc);
240 return ObjectSize != MemoryLocation::UnknownSize && ObjectSize == Size;
243 //===----------------------------------------------------------------------===//
244 // GetElementPtr Instruction Decomposition and Analysis
245 //===----------------------------------------------------------------------===//
247 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
248 /// B are constant integers.
250 /// Returns the scale and offset values as APInts and return V as a Value*, and
251 /// return whether we looked through any sign or zero extends. The incoming
252 /// Value is known to have IntegerType, and it may already be sign or zero
253 /// extended.
255 /// Note that this looks through extends, so the high bits may not be
256 /// represented in the result.
257 /*static*/ const Value *BasicAAResult::GetLinearExpression(
258 const Value *V, APInt &Scale, APInt &Offset, unsigned &ZExtBits,
259 unsigned &SExtBits, const DataLayout &DL, unsigned Depth,
260 AssumptionCache *AC, DominatorTree *DT, bool &NSW, bool &NUW) {
261 assert(V->getType()->isIntegerTy() && "Not an integer value");
263 // Limit our recursion depth.
264 if (Depth == 6) {
265 Scale = 1;
266 Offset = 0;
267 return V;
270 if (const ConstantInt *Const = dyn_cast<ConstantInt>(V)) {
271 // If it's a constant, just convert it to an offset and remove the variable.
272 // If we've been called recursively, the Offset bit width will be greater
273 // than the constant's (the Offset's always as wide as the outermost call),
274 // so we'll zext here and process any extension in the isa<SExtInst> &
275 // isa<ZExtInst> cases below.
276 Offset += Const->getValue().zextOrSelf(Offset.getBitWidth());
277 assert(Scale == 0 && "Constant values don't have a scale");
278 return V;
281 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(V)) {
282 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
283 // If we've been called recursively, then Offset and Scale will be wider
284 // than the BOp operands. We'll always zext it here as we'll process sign
285 // extensions below (see the isa<SExtInst> / isa<ZExtInst> cases).
286 APInt RHS = RHSC->getValue().zextOrSelf(Offset.getBitWidth());
288 switch (BOp->getOpcode()) {
289 default:
290 // We don't understand this instruction, so we can't decompose it any
291 // further.
292 Scale = 1;
293 Offset = 0;
294 return V;
295 case Instruction::Or:
296 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
297 // analyze it.
298 if (!MaskedValueIsZero(BOp->getOperand(0), RHSC->getValue(), DL, 0, AC,
299 BOp, DT)) {
300 Scale = 1;
301 Offset = 0;
302 return V;
304 LLVM_FALLTHROUGH;
305 case Instruction::Add:
306 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
307 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
308 Offset += RHS;
309 break;
310 case Instruction::Sub:
311 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
312 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
313 Offset -= RHS;
314 break;
315 case Instruction::Mul:
316 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
317 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
318 Offset *= RHS;
319 Scale *= RHS;
320 break;
321 case Instruction::Shl:
322 V = GetLinearExpression(BOp->getOperand(0), Scale, Offset, ZExtBits,
323 SExtBits, DL, Depth + 1, AC, DT, NSW, NUW);
325 // We're trying to linearize an expression of the kind:
326 // shl i8 -128, 36
327 // where the shift count exceeds the bitwidth of the type.
328 // We can't decompose this further (the expression would return
329 // a poison value).
330 if (Offset.getBitWidth() < RHS.getLimitedValue() ||
331 Scale.getBitWidth() < RHS.getLimitedValue()) {
332 Scale = 1;
333 Offset = 0;
334 return V;
337 Offset <<= RHS.getLimitedValue();
338 Scale <<= RHS.getLimitedValue();
339 // the semantics of nsw and nuw for left shifts don't match those of
340 // multiplications, so we won't propagate them.
341 NSW = NUW = false;
342 return V;
345 if (isa<OverflowingBinaryOperator>(BOp)) {
346 NUW &= BOp->hasNoUnsignedWrap();
347 NSW &= BOp->hasNoSignedWrap();
349 return V;
353 // Since GEP indices are sign extended anyway, we don't care about the high
354 // bits of a sign or zero extended value - just scales and offsets. The
355 // extensions have to be consistent though.
356 if (isa<SExtInst>(V) || isa<ZExtInst>(V)) {
357 Value *CastOp = cast<CastInst>(V)->getOperand(0);
358 unsigned NewWidth = V->getType()->getPrimitiveSizeInBits();
359 unsigned SmallWidth = CastOp->getType()->getPrimitiveSizeInBits();
360 unsigned OldZExtBits = ZExtBits, OldSExtBits = SExtBits;
361 const Value *Result =
362 GetLinearExpression(CastOp, Scale, Offset, ZExtBits, SExtBits, DL,
363 Depth + 1, AC, DT, NSW, NUW);
365 // zext(zext(%x)) == zext(%x), and similarly for sext; we'll handle this
366 // by just incrementing the number of bits we've extended by.
367 unsigned ExtendedBy = NewWidth - SmallWidth;
369 if (isa<SExtInst>(V) && ZExtBits == 0) {
370 // sext(sext(%x, a), b) == sext(%x, a + b)
372 if (NSW) {
373 // We haven't sign-wrapped, so it's valid to decompose sext(%x + c)
374 // into sext(%x) + sext(c). We'll sext the Offset ourselves:
375 unsigned OldWidth = Offset.getBitWidth();
376 Offset = Offset.trunc(SmallWidth).sext(NewWidth).zextOrSelf(OldWidth);
377 } else {
378 // We may have signed-wrapped, so don't decompose sext(%x + c) into
379 // sext(%x) + sext(c)
380 Scale = 1;
381 Offset = 0;
382 Result = CastOp;
383 ZExtBits = OldZExtBits;
384 SExtBits = OldSExtBits;
386 SExtBits += ExtendedBy;
387 } else {
388 // sext(zext(%x, a), b) = zext(zext(%x, a), b) = zext(%x, a + b)
390 if (!NUW) {
391 // We may have unsigned-wrapped, so don't decompose zext(%x + c) into
392 // zext(%x) + zext(c)
393 Scale = 1;
394 Offset = 0;
395 Result = CastOp;
396 ZExtBits = OldZExtBits;
397 SExtBits = OldSExtBits;
399 ZExtBits += ExtendedBy;
402 return Result;
405 Scale = 1;
406 Offset = 0;
407 return V;
410 /// To ensure a pointer offset fits in an integer of size PointerSize
411 /// (in bits) when that size is smaller than the maximum pointer size. This is
412 /// an issue, for example, in particular for 32b pointers with negative indices
413 /// that rely on two's complement wrap-arounds for precise alias information
414 /// where the maximum pointer size is 64b.
415 static APInt adjustToPointerSize(APInt Offset, unsigned PointerSize) {
416 assert(PointerSize <= Offset.getBitWidth() && "Invalid PointerSize!");
417 unsigned ShiftBits = Offset.getBitWidth() - PointerSize;
418 return (Offset << ShiftBits).ashr(ShiftBits);
421 static unsigned getMaxPointerSize(const DataLayout &DL) {
422 unsigned MaxPointerSize = DL.getMaxPointerSizeInBits();
423 if (MaxPointerSize < 64 && ForceAtLeast64Bits) MaxPointerSize = 64;
424 if (DoubleCalcBits) MaxPointerSize *= 2;
426 return MaxPointerSize;
429 /// If V is a symbolic pointer expression, decompose it into a base pointer
430 /// with a constant offset and a number of scaled symbolic offsets.
432 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
433 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
434 /// specified amount, but which may have other unrepresented high bits. As
435 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
437 /// When DataLayout is around, this function is capable of analyzing everything
438 /// that GetUnderlyingObject can look through. To be able to do that
439 /// GetUnderlyingObject and DecomposeGEPExpression must use the same search
440 /// depth (MaxLookupSearchDepth). When DataLayout not is around, it just looks
441 /// through pointer casts.
442 bool BasicAAResult::DecomposeGEPExpression(const Value *V,
443 DecomposedGEP &Decomposed, const DataLayout &DL, AssumptionCache *AC,
444 DominatorTree *DT) {
445 // Limit recursion depth to limit compile time in crazy cases.
446 unsigned MaxLookup = MaxLookupSearchDepth;
447 SearchTimes++;
449 unsigned MaxPointerSize = getMaxPointerSize(DL);
450 Decomposed.VarIndices.clear();
451 do {
452 // See if this is a bitcast or GEP.
453 const Operator *Op = dyn_cast<Operator>(V);
454 if (!Op) {
455 // The only non-operator case we can handle are GlobalAliases.
456 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
457 if (!GA->isInterposable()) {
458 V = GA->getAliasee();
459 continue;
462 Decomposed.Base = V;
463 return false;
466 if (Op->getOpcode() == Instruction::BitCast ||
467 Op->getOpcode() == Instruction::AddrSpaceCast) {
468 V = Op->getOperand(0);
469 continue;
472 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
473 if (!GEPOp) {
474 if (const auto *Call = dyn_cast<CallBase>(V)) {
475 // CaptureTracking can know about special capturing properties of some
476 // intrinsics like launder.invariant.group, that can't be expressed with
477 // the attributes, but have properties like returning aliasing pointer.
478 // Because some analysis may assume that nocaptured pointer is not
479 // returned from some special intrinsic (because function would have to
480 // be marked with returns attribute), it is crucial to use this function
481 // because it should be in sync with CaptureTracking. Not using it may
482 // cause weird miscompilations where 2 aliasing pointers are assumed to
483 // noalias.
484 if (auto *RP = getArgumentAliasingToReturnedPointer(Call)) {
485 V = RP;
486 continue;
490 // If it's not a GEP, hand it off to SimplifyInstruction to see if it
491 // can come up with something. This matches what GetUnderlyingObject does.
492 if (const Instruction *I = dyn_cast<Instruction>(V))
493 // TODO: Get a DominatorTree and AssumptionCache and use them here
494 // (these are both now available in this function, but this should be
495 // updated when GetUnderlyingObject is updated). TLI should be
496 // provided also.
497 if (const Value *Simplified =
498 SimplifyInstruction(const_cast<Instruction *>(I), DL)) {
499 V = Simplified;
500 continue;
503 Decomposed.Base = V;
504 return false;
507 // Don't attempt to analyze GEPs over unsized objects.
508 if (!GEPOp->getSourceElementType()->isSized()) {
509 Decomposed.Base = V;
510 return false;
513 unsigned AS = GEPOp->getPointerAddressSpace();
514 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
515 gep_type_iterator GTI = gep_type_begin(GEPOp);
516 unsigned PointerSize = DL.getPointerSizeInBits(AS);
517 // Assume all GEP operands are constants until proven otherwise.
518 bool GepHasConstantOffset = true;
519 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
520 I != E; ++I, ++GTI) {
521 const Value *Index = *I;
522 // Compute the (potentially symbolic) offset in bytes for this index.
523 if (StructType *STy = GTI.getStructTypeOrNull()) {
524 // For a struct, add the member offset.
525 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
526 if (FieldNo == 0)
527 continue;
529 Decomposed.StructOffset +=
530 DL.getStructLayout(STy)->getElementOffset(FieldNo);
531 continue;
534 // For an array/pointer, add the element offset, explicitly scaled.
535 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
536 if (CIdx->isZero())
537 continue;
538 Decomposed.OtherOffset +=
539 (DL.getTypeAllocSize(GTI.getIndexedType()) *
540 CIdx->getValue().sextOrSelf(MaxPointerSize))
541 .sextOrTrunc(MaxPointerSize);
542 continue;
545 GepHasConstantOffset = false;
547 APInt Scale(MaxPointerSize, DL.getTypeAllocSize(GTI.getIndexedType()));
548 unsigned ZExtBits = 0, SExtBits = 0;
550 // If the integer type is smaller than the pointer size, it is implicitly
551 // sign extended to pointer size.
552 unsigned Width = Index->getType()->getIntegerBitWidth();
553 if (PointerSize > Width)
554 SExtBits += PointerSize - Width;
556 // Use GetLinearExpression to decompose the index into a C1*V+C2 form.
557 APInt IndexScale(Width, 0), IndexOffset(Width, 0);
558 bool NSW = true, NUW = true;
559 const Value *OrigIndex = Index;
560 Index = GetLinearExpression(Index, IndexScale, IndexOffset, ZExtBits,
561 SExtBits, DL, 0, AC, DT, NSW, NUW);
563 // The GEP index scale ("Scale") scales C1*V+C2, yielding (C1*V+C2)*Scale.
564 // This gives us an aggregate computation of (C1*Scale)*V + C2*Scale.
566 // It can be the case that, even through C1*V+C2 does not overflow for
567 // relevant values of V, (C2*Scale) can overflow. In that case, we cannot
568 // decompose the expression in this way.
570 // FIXME: C1*Scale and the other operations in the decomposed
571 // (C1*Scale)*V+C2*Scale can also overflow. We should check for this
572 // possibility.
573 APInt WideScaledOffset = IndexOffset.sextOrTrunc(MaxPointerSize*2) *
574 Scale.sext(MaxPointerSize*2);
575 if (WideScaledOffset.getMinSignedBits() > MaxPointerSize) {
576 Index = OrigIndex;
577 IndexScale = 1;
578 IndexOffset = 0;
580 ZExtBits = SExtBits = 0;
581 if (PointerSize > Width)
582 SExtBits += PointerSize - Width;
583 } else {
584 Decomposed.OtherOffset += IndexOffset.sextOrTrunc(MaxPointerSize) * Scale;
585 Scale *= IndexScale.sextOrTrunc(MaxPointerSize);
588 // If we already had an occurrence of this index variable, merge this
589 // scale into it. For example, we want to handle:
590 // A[x][x] -> x*16 + x*4 -> x*20
591 // This also ensures that 'x' only appears in the index list once.
592 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
593 if (Decomposed.VarIndices[i].V == Index &&
594 Decomposed.VarIndices[i].ZExtBits == ZExtBits &&
595 Decomposed.VarIndices[i].SExtBits == SExtBits) {
596 Scale += Decomposed.VarIndices[i].Scale;
597 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
598 break;
602 // Make sure that we have a scale that makes sense for this target's
603 // pointer size.
604 Scale = adjustToPointerSize(Scale, PointerSize);
606 if (!!Scale) {
607 VariableGEPIndex Entry = {Index, ZExtBits, SExtBits, Scale};
608 Decomposed.VarIndices.push_back(Entry);
612 // Take care of wrap-arounds
613 if (GepHasConstantOffset) {
614 Decomposed.StructOffset =
615 adjustToPointerSize(Decomposed.StructOffset, PointerSize);
616 Decomposed.OtherOffset =
617 adjustToPointerSize(Decomposed.OtherOffset, PointerSize);
620 // Analyze the base pointer next.
621 V = GEPOp->getOperand(0);
622 } while (--MaxLookup);
624 // If the chain of expressions is too deep, just return early.
625 Decomposed.Base = V;
626 SearchLimitReached++;
627 return true;
630 /// Returns whether the given pointer value points to memory that is local to
631 /// the function, with global constants being considered local to all
632 /// functions.
633 bool BasicAAResult::pointsToConstantMemory(const MemoryLocation &Loc,
634 AAQueryInfo &AAQI, bool OrLocal) {
635 assert(Visited.empty() && "Visited must be cleared after use!");
637 unsigned MaxLookup = 8;
638 SmallVector<const Value *, 16> Worklist;
639 Worklist.push_back(Loc.Ptr);
640 do {
641 const Value *V = GetUnderlyingObject(Worklist.pop_back_val(), DL);
642 if (!Visited.insert(V).second) {
643 Visited.clear();
644 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
647 // An alloca instruction defines local memory.
648 if (OrLocal && isa<AllocaInst>(V))
649 continue;
651 // A global constant counts as local memory for our purposes.
652 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
653 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
654 // global to be marked constant in some modules and non-constant in
655 // others. GV may even be a declaration, not a definition.
656 if (!GV->isConstant()) {
657 Visited.clear();
658 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
660 continue;
663 // If both select values point to local memory, then so does the select.
664 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
665 Worklist.push_back(SI->getTrueValue());
666 Worklist.push_back(SI->getFalseValue());
667 continue;
670 // If all values incoming to a phi node point to local memory, then so does
671 // the phi.
672 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
673 // Don't bother inspecting phi nodes with many operands.
674 if (PN->getNumIncomingValues() > MaxLookup) {
675 Visited.clear();
676 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
678 for (Value *IncValue : PN->incoming_values())
679 Worklist.push_back(IncValue);
680 continue;
683 // Otherwise be conservative.
684 Visited.clear();
685 return AAResultBase::pointsToConstantMemory(Loc, AAQI, OrLocal);
686 } while (!Worklist.empty() && --MaxLookup);
688 Visited.clear();
689 return Worklist.empty();
692 /// Returns the behavior when calling the given call site.
693 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const CallBase *Call) {
694 if (Call->doesNotAccessMemory())
695 // Can't do better than this.
696 return FMRB_DoesNotAccessMemory;
698 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
700 // If the callsite knows it only reads memory, don't return worse
701 // than that.
702 if (Call->onlyReadsMemory())
703 Min = FMRB_OnlyReadsMemory;
704 else if (Call->doesNotReadMemory())
705 Min = FMRB_DoesNotReadMemory;
707 if (Call->onlyAccessesArgMemory())
708 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
709 else if (Call->onlyAccessesInaccessibleMemory())
710 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
711 else if (Call->onlyAccessesInaccessibleMemOrArgMem())
712 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
714 // If the call has operand bundles then aliasing attributes from the function
715 // it calls do not directly apply to the call. This can be made more precise
716 // in the future.
717 if (!Call->hasOperandBundles())
718 if (const Function *F = Call->getCalledFunction())
719 Min =
720 FunctionModRefBehavior(Min & getBestAAResults().getModRefBehavior(F));
722 return Min;
725 /// Returns the behavior when calling the given function. For use when the call
726 /// site is not known.
727 FunctionModRefBehavior BasicAAResult::getModRefBehavior(const Function *F) {
728 // If the function declares it doesn't access memory, we can't do better.
729 if (F->doesNotAccessMemory())
730 return FMRB_DoesNotAccessMemory;
732 FunctionModRefBehavior Min = FMRB_UnknownModRefBehavior;
734 // If the function declares it only reads memory, go with that.
735 if (F->onlyReadsMemory())
736 Min = FMRB_OnlyReadsMemory;
737 else if (F->doesNotReadMemory())
738 Min = FMRB_DoesNotReadMemory;
740 if (F->onlyAccessesArgMemory())
741 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesArgumentPointees);
742 else if (F->onlyAccessesInaccessibleMemory())
743 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleMem);
744 else if (F->onlyAccessesInaccessibleMemOrArgMem())
745 Min = FunctionModRefBehavior(Min & FMRB_OnlyAccessesInaccessibleOrArgMem);
747 return Min;
750 /// Returns true if this is a writeonly (i.e Mod only) parameter.
751 static bool isWriteOnlyParam(const CallBase *Call, unsigned ArgIdx,
752 const TargetLibraryInfo &TLI) {
753 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
754 return true;
756 // We can bound the aliasing properties of memset_pattern16 just as we can
757 // for memcpy/memset. This is particularly important because the
758 // LoopIdiomRecognizer likes to turn loops into calls to memset_pattern16
759 // whenever possible.
760 // FIXME Consider handling this in InferFunctionAttr.cpp together with other
761 // attributes.
762 LibFunc F;
763 if (Call->getCalledFunction() &&
764 TLI.getLibFunc(*Call->getCalledFunction(), F) &&
765 F == LibFunc_memset_pattern16 && TLI.has(F))
766 if (ArgIdx == 0)
767 return true;
769 // TODO: memset_pattern4, memset_pattern8
770 // TODO: _chk variants
771 // TODO: strcmp, strcpy
773 return false;
776 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
777 unsigned ArgIdx) {
778 // Checking for known builtin intrinsics and target library functions.
779 if (isWriteOnlyParam(Call, ArgIdx, TLI))
780 return ModRefInfo::Mod;
782 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
783 return ModRefInfo::Ref;
785 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
786 return ModRefInfo::NoModRef;
788 return AAResultBase::getArgModRefInfo(Call, ArgIdx);
791 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
792 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
793 return II && II->getIntrinsicID() == IID;
796 #ifndef NDEBUG
797 static const Function *getParent(const Value *V) {
798 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
799 if (!inst->getParent())
800 return nullptr;
801 return inst->getParent()->getParent();
804 if (const Argument *arg = dyn_cast<Argument>(V))
805 return arg->getParent();
807 return nullptr;
810 static bool notDifferentParent(const Value *O1, const Value *O2) {
812 const Function *F1 = getParent(O1);
813 const Function *F2 = getParent(O2);
815 return !F1 || !F2 || F1 == F2;
817 #endif
819 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
820 const MemoryLocation &LocB,
821 AAQueryInfo &AAQI) {
822 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
823 "BasicAliasAnalysis doesn't support interprocedural queries.");
825 // If we have a directly cached entry for these locations, we have recursed
826 // through this once, so just return the cached results. Notably, when this
827 // happens, we don't clear the cache.
828 auto CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocA, LocB));
829 if (CacheIt != AAQI.AliasCache.end())
830 return CacheIt->second;
832 CacheIt = AAQI.AliasCache.find(AAQueryInfo::LocPair(LocB, LocA));
833 if (CacheIt != AAQI.AliasCache.end())
834 return CacheIt->second;
836 AliasResult Alias = aliasCheck(LocA.Ptr, LocA.Size, LocA.AATags, LocB.Ptr,
837 LocB.Size, LocB.AATags, AAQI);
839 VisitedPhiBBs.clear();
840 return Alias;
843 /// Checks to see if the specified callsite can clobber the specified memory
844 /// object.
846 /// Since we only look at local properties of this function, we really can't
847 /// say much about this query. We do, however, use simple "address taken"
848 /// analysis on local objects.
849 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
850 const MemoryLocation &Loc,
851 AAQueryInfo &AAQI) {
852 assert(notDifferentParent(Call, Loc.Ptr) &&
853 "AliasAnalysis query involving multiple functions!");
855 const Value *Object = GetUnderlyingObject(Loc.Ptr, DL);
857 // Calls marked 'tail' cannot read or write allocas from the current frame
858 // because the current frame might be destroyed by the time they run. However,
859 // a tail call may use an alloca with byval. Calling with byval copies the
860 // contents of the alloca into argument registers or stack slots, so there is
861 // no lifetime issue.
862 if (isa<AllocaInst>(Object))
863 if (const CallInst *CI = dyn_cast<CallInst>(Call))
864 if (CI->isTailCall() &&
865 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
866 return ModRefInfo::NoModRef;
868 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
869 // modify them even though the alloca is not escaped.
870 if (auto *AI = dyn_cast<AllocaInst>(Object))
871 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
872 return ModRefInfo::Mod;
874 // If the pointer is to a locally allocated object that does not escape,
875 // then the call can not mod/ref the pointer unless the call takes the pointer
876 // as an argument, and itself doesn't capture it.
877 if (!isa<Constant>(Object) && Call != Object &&
878 isNonEscapingLocalObject(Object, &AAQI.IsCapturedCache)) {
880 // Optimistically assume that call doesn't touch Object and check this
881 // assumption in the following loop.
882 ModRefInfo Result = ModRefInfo::NoModRef;
883 bool IsMustAlias = true;
885 unsigned OperandNo = 0;
886 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
887 CI != CE; ++CI, ++OperandNo) {
888 // Only look at the no-capture or byval pointer arguments. If this
889 // pointer were passed to arguments that were neither of these, then it
890 // couldn't be no-capture.
891 if (!(*CI)->getType()->isPointerTy() ||
892 (!Call->doesNotCapture(OperandNo) &&
893 OperandNo < Call->getNumArgOperands() &&
894 !Call->isByValArgument(OperandNo)))
895 continue;
897 // Call doesn't access memory through this operand, so we don't care
898 // if it aliases with Object.
899 if (Call->doesNotAccessMemory(OperandNo))
900 continue;
902 // If this is a no-capture pointer argument, see if we can tell that it
903 // is impossible to alias the pointer we're checking.
904 AliasResult AR = getBestAAResults().alias(MemoryLocation(*CI),
905 MemoryLocation(Object), AAQI);
906 if (AR != MustAlias)
907 IsMustAlias = false;
908 // Operand doesn't alias 'Object', continue looking for other aliases
909 if (AR == NoAlias)
910 continue;
911 // Operand aliases 'Object', but call doesn't modify it. Strengthen
912 // initial assumption and keep looking in case if there are more aliases.
913 if (Call->onlyReadsMemory(OperandNo)) {
914 Result = setRef(Result);
915 continue;
917 // Operand aliases 'Object' but call only writes into it.
918 if (Call->doesNotReadMemory(OperandNo)) {
919 Result = setMod(Result);
920 continue;
922 // This operand aliases 'Object' and call reads and writes into it.
923 // Setting ModRef will not yield an early return below, MustAlias is not
924 // used further.
925 Result = ModRefInfo::ModRef;
926 break;
929 // No operand aliases, reset Must bit. Add below if at least one aliases
930 // and all aliases found are MustAlias.
931 if (isNoModRef(Result))
932 IsMustAlias = false;
934 // Early return if we improved mod ref information
935 if (!isModAndRefSet(Result)) {
936 if (isNoModRef(Result))
937 return ModRefInfo::NoModRef;
938 return IsMustAlias ? setMust(Result) : clearMust(Result);
942 // If the call is to malloc or calloc, we can assume that it doesn't
943 // modify any IR visible value. This is only valid because we assume these
944 // routines do not read values visible in the IR. TODO: Consider special
945 // casing realloc and strdup routines which access only their arguments as
946 // well. Or alternatively, replace all of this with inaccessiblememonly once
947 // that's implemented fully.
948 if (isMallocOrCallocLikeFn(Call, &TLI)) {
949 // Be conservative if the accessed pointer may alias the allocation -
950 // fallback to the generic handling below.
951 if (getBestAAResults().alias(MemoryLocation(Call), Loc, AAQI) == NoAlias)
952 return ModRefInfo::NoModRef;
955 // The semantics of memcpy intrinsics forbid overlap between their respective
956 // operands, i.e., source and destination of any given memcpy must no-alias.
957 // If Loc must-aliases either one of these two locations, then it necessarily
958 // no-aliases the other.
959 if (auto *Inst = dyn_cast<AnyMemCpyInst>(Call)) {
960 AliasResult SrcAA, DestAA;
962 if ((SrcAA = getBestAAResults().alias(MemoryLocation::getForSource(Inst),
963 Loc, AAQI)) == MustAlias)
964 // Loc is exactly the memcpy source thus disjoint from memcpy dest.
965 return ModRefInfo::Ref;
966 if ((DestAA = getBestAAResults().alias(MemoryLocation::getForDest(Inst),
967 Loc, AAQI)) == MustAlias)
968 // The converse case.
969 return ModRefInfo::Mod;
971 // It's also possible for Loc to alias both src and dest, or neither.
972 ModRefInfo rv = ModRefInfo::NoModRef;
973 if (SrcAA != NoAlias)
974 rv = setRef(rv);
975 if (DestAA != NoAlias)
976 rv = setMod(rv);
977 return rv;
980 // While the assume intrinsic is marked as arbitrarily writing so that
981 // proper control dependencies will be maintained, it never aliases any
982 // particular memory location.
983 if (isIntrinsicCall(Call, Intrinsic::assume))
984 return ModRefInfo::NoModRef;
986 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
987 // that proper control dependencies are maintained but they never mods any
988 // particular memory location.
990 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
991 // heap state at the point the guard is issued needs to be consistent in case
992 // the guard invokes the "deopt" continuation.
993 if (isIntrinsicCall(Call, Intrinsic::experimental_guard))
994 return ModRefInfo::Ref;
996 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
997 // writing so that proper control dependencies are maintained but they never
998 // mod any particular memory location visible to the IR.
999 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1000 // intrinsic is now modeled as reading memory. This prevents hoisting the
1001 // invariant.start intrinsic over stores. Consider:
1002 // *ptr = 40;
1003 // *ptr = 50;
1004 // invariant_start(ptr)
1005 // int val = *ptr;
1006 // print(val);
1008 // This cannot be transformed to:
1010 // *ptr = 40;
1011 // invariant_start(ptr)
1012 // *ptr = 50;
1013 // int val = *ptr;
1014 // print(val);
1016 // The transformation will cause the second store to be ignored (based on
1017 // rules of invariant.start) and print 40, while the first program always
1018 // prints 50.
1019 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1020 return ModRefInfo::Ref;
1022 // The AAResultBase base class has some smarts, lets use them.
1023 return AAResultBase::getModRefInfo(Call, Loc, AAQI);
1026 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1027 const CallBase *Call2,
1028 AAQueryInfo &AAQI) {
1029 // While the assume intrinsic is marked as arbitrarily writing so that
1030 // proper control dependencies will be maintained, it never aliases any
1031 // particular memory location.
1032 if (isIntrinsicCall(Call1, Intrinsic::assume) ||
1033 isIntrinsicCall(Call2, Intrinsic::assume))
1034 return ModRefInfo::NoModRef;
1036 // Like assumes, guard intrinsics are also marked as arbitrarily writing so
1037 // that proper control dependencies are maintained but they never mod any
1038 // particular memory location.
1040 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1041 // heap state at the point the guard is issued needs to be consistent in case
1042 // the guard invokes the "deopt" continuation.
1044 // NB! This function is *not* commutative, so we special case two
1045 // possibilities for guard intrinsics.
1047 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1048 return isModSet(createModRefInfo(getModRefBehavior(Call2)))
1049 ? ModRefInfo::Ref
1050 : ModRefInfo::NoModRef;
1052 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1053 return isModSet(createModRefInfo(getModRefBehavior(Call1)))
1054 ? ModRefInfo::Mod
1055 : ModRefInfo::NoModRef;
1057 // The AAResultBase base class has some smarts, lets use them.
1058 return AAResultBase::getModRefInfo(Call1, Call2, AAQI);
1061 /// Provide ad-hoc rules to disambiguate accesses through two GEP operators,
1062 /// both having the exact same pointer operand.
1063 static AliasResult aliasSameBasePointerGEPs(const GEPOperator *GEP1,
1064 LocationSize MaybeV1Size,
1065 const GEPOperator *GEP2,
1066 LocationSize MaybeV2Size,
1067 const DataLayout &DL) {
1068 assert(GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1069 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1070 GEP1->getPointerOperandType() == GEP2->getPointerOperandType() &&
1071 "Expected GEPs with the same pointer operand");
1073 // Try to determine whether GEP1 and GEP2 index through arrays, into structs,
1074 // such that the struct field accesses provably cannot alias.
1075 // We also need at least two indices (the pointer, and the struct field).
1076 if (GEP1->getNumIndices() != GEP2->getNumIndices() ||
1077 GEP1->getNumIndices() < 2)
1078 return MayAlias;
1080 // If we don't know the size of the accesses through both GEPs, we can't
1081 // determine whether the struct fields accessed can't alias.
1082 if (MaybeV1Size == LocationSize::unknown() ||
1083 MaybeV2Size == LocationSize::unknown())
1084 return MayAlias;
1086 const uint64_t V1Size = MaybeV1Size.getValue();
1087 const uint64_t V2Size = MaybeV2Size.getValue();
1089 ConstantInt *C1 =
1090 dyn_cast<ConstantInt>(GEP1->getOperand(GEP1->getNumOperands() - 1));
1091 ConstantInt *C2 =
1092 dyn_cast<ConstantInt>(GEP2->getOperand(GEP2->getNumOperands() - 1));
1094 // If the last (struct) indices are constants and are equal, the other indices
1095 // might be also be dynamically equal, so the GEPs can alias.
1096 if (C1 && C2) {
1097 unsigned BitWidth = std::max(C1->getBitWidth(), C2->getBitWidth());
1098 if (C1->getValue().sextOrSelf(BitWidth) ==
1099 C2->getValue().sextOrSelf(BitWidth))
1100 return MayAlias;
1103 // Find the last-indexed type of the GEP, i.e., the type you'd get if
1104 // you stripped the last index.
1105 // On the way, look at each indexed type. If there's something other
1106 // than an array, different indices can lead to different final types.
1107 SmallVector<Value *, 8> IntermediateIndices;
1109 // Insert the first index; we don't need to check the type indexed
1110 // through it as it only drops the pointer indirection.
1111 assert(GEP1->getNumIndices() > 1 && "Not enough GEP indices to examine");
1112 IntermediateIndices.push_back(GEP1->getOperand(1));
1114 // Insert all the remaining indices but the last one.
1115 // Also, check that they all index through arrays.
1116 for (unsigned i = 1, e = GEP1->getNumIndices() - 1; i != e; ++i) {
1117 if (!isa<ArrayType>(GetElementPtrInst::getIndexedType(
1118 GEP1->getSourceElementType(), IntermediateIndices)))
1119 return MayAlias;
1120 IntermediateIndices.push_back(GEP1->getOperand(i + 1));
1123 auto *Ty = GetElementPtrInst::getIndexedType(
1124 GEP1->getSourceElementType(), IntermediateIndices);
1125 StructType *LastIndexedStruct = dyn_cast<StructType>(Ty);
1127 if (isa<SequentialType>(Ty)) {
1128 // We know that:
1129 // - both GEPs begin indexing from the exact same pointer;
1130 // - the last indices in both GEPs are constants, indexing into a sequential
1131 // type (array or pointer);
1132 // - both GEPs only index through arrays prior to that.
1134 // Because array indices greater than the number of elements are valid in
1135 // GEPs, unless we know the intermediate indices are identical between
1136 // GEP1 and GEP2 we cannot guarantee that the last indexed arrays don't
1137 // partially overlap. We also need to check that the loaded size matches
1138 // the element size, otherwise we could still have overlap.
1139 const uint64_t ElementSize =
1140 DL.getTypeStoreSize(cast<SequentialType>(Ty)->getElementType());
1141 if (V1Size != ElementSize || V2Size != ElementSize)
1142 return MayAlias;
1144 for (unsigned i = 0, e = GEP1->getNumIndices() - 1; i != e; ++i)
1145 if (GEP1->getOperand(i + 1) != GEP2->getOperand(i + 1))
1146 return MayAlias;
1148 // Now we know that the array/pointer that GEP1 indexes into and that
1149 // that GEP2 indexes into must either precisely overlap or be disjoint.
1150 // Because they cannot partially overlap and because fields in an array
1151 // cannot overlap, if we can prove the final indices are different between
1152 // GEP1 and GEP2, we can conclude GEP1 and GEP2 don't alias.
1154 // If the last indices are constants, we've already checked they don't
1155 // equal each other so we can exit early.
1156 if (C1 && C2)
1157 return NoAlias;
1159 Value *GEP1LastIdx = GEP1->getOperand(GEP1->getNumOperands() - 1);
1160 Value *GEP2LastIdx = GEP2->getOperand(GEP2->getNumOperands() - 1);
1161 if (isa<PHINode>(GEP1LastIdx) || isa<PHINode>(GEP2LastIdx)) {
1162 // If one of the indices is a PHI node, be safe and only use
1163 // computeKnownBits so we don't make any assumptions about the
1164 // relationships between the two indices. This is important if we're
1165 // asking about values from different loop iterations. See PR32314.
1166 // TODO: We may be able to change the check so we only do this when
1167 // we definitely looked through a PHINode.
1168 if (GEP1LastIdx != GEP2LastIdx &&
1169 GEP1LastIdx->getType() == GEP2LastIdx->getType()) {
1170 KnownBits Known1 = computeKnownBits(GEP1LastIdx, DL);
1171 KnownBits Known2 = computeKnownBits(GEP2LastIdx, DL);
1172 if (Known1.Zero.intersects(Known2.One) ||
1173 Known1.One.intersects(Known2.Zero))
1174 return NoAlias;
1176 } else if (isKnownNonEqual(GEP1LastIdx, GEP2LastIdx, DL))
1177 return NoAlias;
1179 return MayAlias;
1180 } else if (!LastIndexedStruct || !C1 || !C2) {
1181 return MayAlias;
1184 if (C1->getValue().getActiveBits() > 64 ||
1185 C2->getValue().getActiveBits() > 64)
1186 return MayAlias;
1188 // We know that:
1189 // - both GEPs begin indexing from the exact same pointer;
1190 // - the last indices in both GEPs are constants, indexing into a struct;
1191 // - said indices are different, hence, the pointed-to fields are different;
1192 // - both GEPs only index through arrays prior to that.
1194 // This lets us determine that the struct that GEP1 indexes into and the
1195 // struct that GEP2 indexes into must either precisely overlap or be
1196 // completely disjoint. Because they cannot partially overlap, indexing into
1197 // different non-overlapping fields of the struct will never alias.
1199 // Therefore, the only remaining thing needed to show that both GEPs can't
1200 // alias is that the fields are not overlapping.
1201 const StructLayout *SL = DL.getStructLayout(LastIndexedStruct);
1202 const uint64_t StructSize = SL->getSizeInBytes();
1203 const uint64_t V1Off = SL->getElementOffset(C1->getZExtValue());
1204 const uint64_t V2Off = SL->getElementOffset(C2->getZExtValue());
1206 auto EltsDontOverlap = [StructSize](uint64_t V1Off, uint64_t V1Size,
1207 uint64_t V2Off, uint64_t V2Size) {
1208 return V1Off < V2Off && V1Off + V1Size <= V2Off &&
1209 ((V2Off + V2Size <= StructSize) ||
1210 (V2Off + V2Size - StructSize <= V1Off));
1213 if (EltsDontOverlap(V1Off, V1Size, V2Off, V2Size) ||
1214 EltsDontOverlap(V2Off, V2Size, V1Off, V1Size))
1215 return NoAlias;
1217 return MayAlias;
1220 // If a we have (a) a GEP and (b) a pointer based on an alloca, and the
1221 // beginning of the object the GEP points would have a negative offset with
1222 // repsect to the alloca, that means the GEP can not alias pointer (b).
1223 // Note that the pointer based on the alloca may not be a GEP. For
1224 // example, it may be the alloca itself.
1225 // The same applies if (b) is based on a GlobalVariable. Note that just being
1226 // based on isIdentifiedObject() is not enough - we need an identified object
1227 // that does not permit access to negative offsets. For example, a negative
1228 // offset from a noalias argument or call can be inbounds w.r.t the actual
1229 // underlying object.
1231 // For example, consider:
1233 // struct { int f0, int f1, ...} foo;
1234 // foo alloca;
1235 // foo* random = bar(alloca);
1236 // int *f0 = &alloca.f0
1237 // int *f1 = &random->f1;
1239 // Which is lowered, approximately, to:
1241 // %alloca = alloca %struct.foo
1242 // %random = call %struct.foo* @random(%struct.foo* %alloca)
1243 // %f0 = getelementptr inbounds %struct, %struct.foo* %alloca, i32 0, i32 0
1244 // %f1 = getelementptr inbounds %struct, %struct.foo* %random, i32 0, i32 1
1246 // Assume %f1 and %f0 alias. Then %f1 would point into the object allocated
1247 // by %alloca. Since the %f1 GEP is inbounds, that means %random must also
1248 // point into the same object. But since %f0 points to the beginning of %alloca,
1249 // the highest %f1 can be is (%alloca + 3). This means %random can not be higher
1250 // than (%alloca - 1), and so is not inbounds, a contradiction.
1251 bool BasicAAResult::isGEPBaseAtNegativeOffset(const GEPOperator *GEPOp,
1252 const DecomposedGEP &DecompGEP, const DecomposedGEP &DecompObject,
1253 LocationSize MaybeObjectAccessSize) {
1254 // If the object access size is unknown, or the GEP isn't inbounds, bail.
1255 if (MaybeObjectAccessSize == LocationSize::unknown() || !GEPOp->isInBounds())
1256 return false;
1258 const uint64_t ObjectAccessSize = MaybeObjectAccessSize.getValue();
1260 // We need the object to be an alloca or a globalvariable, and want to know
1261 // the offset of the pointer from the object precisely, so no variable
1262 // indices are allowed.
1263 if (!(isa<AllocaInst>(DecompObject.Base) ||
1264 isa<GlobalVariable>(DecompObject.Base)) ||
1265 !DecompObject.VarIndices.empty())
1266 return false;
1268 APInt ObjectBaseOffset = DecompObject.StructOffset +
1269 DecompObject.OtherOffset;
1271 // If the GEP has no variable indices, we know the precise offset
1272 // from the base, then use it. If the GEP has variable indices,
1273 // we can't get exact GEP offset to identify pointer alias. So return
1274 // false in that case.
1275 if (!DecompGEP.VarIndices.empty())
1276 return false;
1278 APInt GEPBaseOffset = DecompGEP.StructOffset;
1279 GEPBaseOffset += DecompGEP.OtherOffset;
1281 return GEPBaseOffset.sge(ObjectBaseOffset + (int64_t)ObjectAccessSize);
1284 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1285 /// another pointer.
1287 /// We know that V1 is a GEP, but we don't know anything about V2.
1288 /// UnderlyingV1 is GetUnderlyingObject(GEP1, DL), UnderlyingV2 is the same for
1289 /// V2.
1290 AliasResult BasicAAResult::aliasGEP(
1291 const GEPOperator *GEP1, LocationSize V1Size, const AAMDNodes &V1AAInfo,
1292 const Value *V2, LocationSize V2Size, const AAMDNodes &V2AAInfo,
1293 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1294 DecomposedGEP DecompGEP1, DecompGEP2;
1295 unsigned MaxPointerSize = getMaxPointerSize(DL);
1296 DecompGEP1.StructOffset = DecompGEP1.OtherOffset = APInt(MaxPointerSize, 0);
1297 DecompGEP2.StructOffset = DecompGEP2.OtherOffset = APInt(MaxPointerSize, 0);
1299 bool GEP1MaxLookupReached =
1300 DecomposeGEPExpression(GEP1, DecompGEP1, DL, &AC, DT);
1301 bool GEP2MaxLookupReached =
1302 DecomposeGEPExpression(V2, DecompGEP2, DL, &AC, DT);
1304 APInt GEP1BaseOffset = DecompGEP1.StructOffset + DecompGEP1.OtherOffset;
1305 APInt GEP2BaseOffset = DecompGEP2.StructOffset + DecompGEP2.OtherOffset;
1307 assert(DecompGEP1.Base == UnderlyingV1 && DecompGEP2.Base == UnderlyingV2 &&
1308 "DecomposeGEPExpression returned a result different from "
1309 "GetUnderlyingObject");
1311 // If the GEP's offset relative to its base is such that the base would
1312 // fall below the start of the object underlying V2, then the GEP and V2
1313 // cannot alias.
1314 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1315 isGEPBaseAtNegativeOffset(GEP1, DecompGEP1, DecompGEP2, V2Size))
1316 return NoAlias;
1317 // If we have two gep instructions with must-alias or not-alias'ing base
1318 // pointers, figure out if the indexes to the GEP tell us anything about the
1319 // derived pointer.
1320 if (const GEPOperator *GEP2 = dyn_cast<GEPOperator>(V2)) {
1321 // Check for the GEP base being at a negative offset, this time in the other
1322 // direction.
1323 if (!GEP1MaxLookupReached && !GEP2MaxLookupReached &&
1324 isGEPBaseAtNegativeOffset(GEP2, DecompGEP2, DecompGEP1, V1Size))
1325 return NoAlias;
1326 // Do the base pointers alias?
1327 AliasResult BaseAlias =
1328 aliasCheck(UnderlyingV1, LocationSize::unknown(), AAMDNodes(),
1329 UnderlyingV2, LocationSize::unknown(), AAMDNodes(), AAQI);
1331 // Check for geps of non-aliasing underlying pointers where the offsets are
1332 // identical.
1333 if ((BaseAlias == MayAlias) && V1Size == V2Size) {
1334 // Do the base pointers alias assuming type and size.
1335 AliasResult PreciseBaseAlias = aliasCheck(
1336 UnderlyingV1, V1Size, V1AAInfo, UnderlyingV2, V2Size, V2AAInfo, AAQI);
1337 if (PreciseBaseAlias == NoAlias) {
1338 // See if the computed offset from the common pointer tells us about the
1339 // relation of the resulting pointer.
1340 // If the max search depth is reached the result is undefined
1341 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1342 return MayAlias;
1344 // Same offsets.
1345 if (GEP1BaseOffset == GEP2BaseOffset &&
1346 DecompGEP1.VarIndices == DecompGEP2.VarIndices)
1347 return NoAlias;
1351 // If we get a No or May, then return it immediately, no amount of analysis
1352 // will improve this situation.
1353 if (BaseAlias != MustAlias) {
1354 assert(BaseAlias == NoAlias || BaseAlias == MayAlias);
1355 return BaseAlias;
1358 // Otherwise, we have a MustAlias. Since the base pointers alias each other
1359 // exactly, see if the computed offset from the common pointer tells us
1360 // about the relation of the resulting pointer.
1361 // If we know the two GEPs are based off of the exact same pointer (and not
1362 // just the same underlying object), see if that tells us anything about
1363 // the resulting pointers.
1364 if (GEP1->getPointerOperand()->stripPointerCastsAndInvariantGroups() ==
1365 GEP2->getPointerOperand()->stripPointerCastsAndInvariantGroups() &&
1366 GEP1->getPointerOperandType() == GEP2->getPointerOperandType()) {
1367 AliasResult R = aliasSameBasePointerGEPs(GEP1, V1Size, GEP2, V2Size, DL);
1368 // If we couldn't find anything interesting, don't abandon just yet.
1369 if (R != MayAlias)
1370 return R;
1373 // If the max search depth is reached, the result is undefined
1374 if (GEP2MaxLookupReached || GEP1MaxLookupReached)
1375 return MayAlias;
1377 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1378 // symbolic difference.
1379 GEP1BaseOffset -= GEP2BaseOffset;
1380 GetIndexDifference(DecompGEP1.VarIndices, DecompGEP2.VarIndices);
1382 } else {
1383 // Check to see if these two pointers are related by the getelementptr
1384 // instruction. If one pointer is a GEP with a non-zero index of the other
1385 // pointer, we know they cannot alias.
1387 // If both accesses are unknown size, we can't do anything useful here.
1388 if (V1Size == LocationSize::unknown() && V2Size == LocationSize::unknown())
1389 return MayAlias;
1391 AliasResult R = aliasCheck(UnderlyingV1, LocationSize::unknown(),
1392 AAMDNodes(), V2, LocationSize::unknown(),
1393 V2AAInfo, AAQI, nullptr, UnderlyingV2);
1394 if (R != MustAlias) {
1395 // If V2 may alias GEP base pointer, conservatively returns MayAlias.
1396 // If V2 is known not to alias GEP base pointer, then the two values
1397 // cannot alias per GEP semantics: "Any memory access must be done through
1398 // a pointer value associated with an address range of the memory access,
1399 // otherwise the behavior is undefined.".
1400 assert(R == NoAlias || R == MayAlias);
1401 return R;
1404 // If the max search depth is reached the result is undefined
1405 if (GEP1MaxLookupReached)
1406 return MayAlias;
1409 // In the two GEP Case, if there is no difference in the offsets of the
1410 // computed pointers, the resultant pointers are a must alias. This
1411 // happens when we have two lexically identical GEP's (for example).
1413 // In the other case, if we have getelementptr <ptr>, 0, 0, 0, 0, ... and V2
1414 // must aliases the GEP, the end result is a must alias also.
1415 if (GEP1BaseOffset == 0 && DecompGEP1.VarIndices.empty())
1416 return MustAlias;
1418 // If there is a constant difference between the pointers, but the difference
1419 // is less than the size of the associated memory object, then we know
1420 // that the objects are partially overlapping. If the difference is
1421 // greater, we know they do not overlap.
1422 if (GEP1BaseOffset != 0 && DecompGEP1.VarIndices.empty()) {
1423 if (GEP1BaseOffset.sge(0)) {
1424 if (V2Size != LocationSize::unknown()) {
1425 if (GEP1BaseOffset.ult(V2Size.getValue()))
1426 return PartialAlias;
1427 return NoAlias;
1429 } else {
1430 // We have the situation where:
1431 // + +
1432 // | BaseOffset |
1433 // ---------------->|
1434 // |-->V1Size |-------> V2Size
1435 // GEP1 V2
1436 // We need to know that V2Size is not unknown, otherwise we might have
1437 // stripped a gep with negative index ('gep <ptr>, -1, ...).
1438 if (V1Size != LocationSize::unknown() &&
1439 V2Size != LocationSize::unknown()) {
1440 if ((-GEP1BaseOffset).ult(V1Size.getValue()))
1441 return PartialAlias;
1442 return NoAlias;
1447 if (!DecompGEP1.VarIndices.empty()) {
1448 APInt Modulo(MaxPointerSize, 0);
1449 bool AllPositive = true;
1450 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1452 // Try to distinguish something like &A[i][1] against &A[42][0].
1453 // Grab the least significant bit set in any of the scales. We
1454 // don't need std::abs here (even if the scale's negative) as we'll
1455 // be ^'ing Modulo with itself later.
1456 Modulo |= DecompGEP1.VarIndices[i].Scale;
1458 if (AllPositive) {
1459 // If the Value could change between cycles, then any reasoning about
1460 // the Value this cycle may not hold in the next cycle. We'll just
1461 // give up if we can't determine conditions that hold for every cycle:
1462 const Value *V = DecompGEP1.VarIndices[i].V;
1464 KnownBits Known = computeKnownBits(V, DL, 0, &AC, nullptr, DT);
1465 bool SignKnownZero = Known.isNonNegative();
1466 bool SignKnownOne = Known.isNegative();
1468 // Zero-extension widens the variable, and so forces the sign
1469 // bit to zero.
1470 bool IsZExt = DecompGEP1.VarIndices[i].ZExtBits > 0 || isa<ZExtInst>(V);
1471 SignKnownZero |= IsZExt;
1472 SignKnownOne &= !IsZExt;
1474 // If the variable begins with a zero then we know it's
1475 // positive, regardless of whether the value is signed or
1476 // unsigned.
1477 APInt Scale = DecompGEP1.VarIndices[i].Scale;
1478 AllPositive =
1479 (SignKnownZero && Scale.sge(0)) || (SignKnownOne && Scale.slt(0));
1483 Modulo = Modulo ^ (Modulo & (Modulo - 1));
1485 // We can compute the difference between the two addresses
1486 // mod Modulo. Check whether that difference guarantees that the
1487 // two locations do not alias.
1488 APInt ModOffset = GEP1BaseOffset & (Modulo - 1);
1489 if (V1Size != LocationSize::unknown() &&
1490 V2Size != LocationSize::unknown() && ModOffset.uge(V2Size.getValue()) &&
1491 (Modulo - ModOffset).uge(V1Size.getValue()))
1492 return NoAlias;
1494 // If we know all the variables are positive, then GEP1 >= GEP1BasePtr.
1495 // If GEP1BasePtr > V2 (GEP1BaseOffset > 0) then we know the pointers
1496 // don't alias if V2Size can fit in the gap between V2 and GEP1BasePtr.
1497 if (AllPositive && GEP1BaseOffset.sgt(0) &&
1498 V2Size != LocationSize::unknown() &&
1499 GEP1BaseOffset.uge(V2Size.getValue()))
1500 return NoAlias;
1502 if (constantOffsetHeuristic(DecompGEP1.VarIndices, V1Size, V2Size,
1503 GEP1BaseOffset, &AC, DT))
1504 return NoAlias;
1507 // Statically, we can see that the base objects are the same, but the
1508 // pointers have dynamic offsets which we can't resolve. And none of our
1509 // little tricks above worked.
1510 return MayAlias;
1513 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1514 // If the results agree, take it.
1515 if (A == B)
1516 return A;
1517 // A mix of PartialAlias and MustAlias is PartialAlias.
1518 if ((A == PartialAlias && B == MustAlias) ||
1519 (B == PartialAlias && A == MustAlias))
1520 return PartialAlias;
1521 // Otherwise, we don't know anything.
1522 return MayAlias;
1525 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1526 /// against another.
1527 AliasResult
1528 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1529 const AAMDNodes &SIAAInfo, const Value *V2,
1530 LocationSize V2Size, const AAMDNodes &V2AAInfo,
1531 const Value *UnderV2, AAQueryInfo &AAQI) {
1532 // If the values are Selects with the same condition, we can do a more precise
1533 // check: just check for aliases between the values on corresponding arms.
1534 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1535 if (SI->getCondition() == SI2->getCondition()) {
1536 AliasResult Alias =
1537 aliasCheck(SI->getTrueValue(), SISize, SIAAInfo, SI2->getTrueValue(),
1538 V2Size, V2AAInfo, AAQI);
1539 if (Alias == MayAlias)
1540 return MayAlias;
1541 AliasResult ThisAlias =
1542 aliasCheck(SI->getFalseValue(), SISize, SIAAInfo,
1543 SI2->getFalseValue(), V2Size, V2AAInfo, AAQI);
1544 return MergeAliasResults(ThisAlias, Alias);
1547 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1548 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1549 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, SI->getTrueValue(),
1550 SISize, SIAAInfo, AAQI, UnderV2);
1551 if (Alias == MayAlias)
1552 return MayAlias;
1554 AliasResult ThisAlias = aliasCheck(V2, V2Size, V2AAInfo, SI->getFalseValue(),
1555 SISize, SIAAInfo, AAQI, UnderV2);
1556 return MergeAliasResults(ThisAlias, Alias);
1559 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1560 /// another.
1561 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1562 const AAMDNodes &PNAAInfo, const Value *V2,
1563 LocationSize V2Size,
1564 const AAMDNodes &V2AAInfo,
1565 const Value *UnderV2, AAQueryInfo &AAQI) {
1566 // Track phi nodes we have visited. We use this information when we determine
1567 // value equivalence.
1568 VisitedPhiBBs.insert(PN->getParent());
1570 // If the values are PHIs in the same block, we can do a more precise
1571 // as well as efficient check: just check for aliases between the values
1572 // on corresponding edges.
1573 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1574 if (PN2->getParent() == PN->getParent()) {
1575 AAQueryInfo::LocPair Locs(MemoryLocation(PN, PNSize, PNAAInfo),
1576 MemoryLocation(V2, V2Size, V2AAInfo));
1577 if (PN > V2)
1578 std::swap(Locs.first, Locs.second);
1579 // Analyse the PHIs' inputs under the assumption that the PHIs are
1580 // NoAlias.
1581 // If the PHIs are May/MustAlias there must be (recursively) an input
1582 // operand from outside the PHIs' cycle that is MayAlias/MustAlias or
1583 // there must be an operation on the PHIs within the PHIs' value cycle
1584 // that causes a MayAlias.
1585 // Pretend the phis do not alias.
1586 AliasResult Alias = NoAlias;
1587 AliasResult OrigAliasResult;
1589 // Limited lifetime iterator invalidated by the aliasCheck call below.
1590 auto CacheIt = AAQI.AliasCache.find(Locs);
1591 assert((CacheIt != AAQI.AliasCache.end()) &&
1592 "There must exist an entry for the phi node");
1593 OrigAliasResult = CacheIt->second;
1594 CacheIt->second = NoAlias;
1597 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1598 AliasResult ThisAlias =
1599 aliasCheck(PN->getIncomingValue(i), PNSize, PNAAInfo,
1600 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)),
1601 V2Size, V2AAInfo, AAQI);
1602 Alias = MergeAliasResults(ThisAlias, Alias);
1603 if (Alias == MayAlias)
1604 break;
1607 // Reset if speculation failed.
1608 if (Alias != NoAlias) {
1609 auto Pair =
1610 AAQI.AliasCache.insert(std::make_pair(Locs, OrigAliasResult));
1611 assert(!Pair.second && "Entry must have existed");
1612 Pair.first->second = OrigAliasResult;
1614 return Alias;
1617 SmallVector<Value *, 4> V1Srcs;
1618 bool isRecursive = false;
1619 if (PV) {
1620 // If we have PhiValues then use it to get the underlying phi values.
1621 const PhiValues::ValueSet &PhiValueSet = PV->getValuesForPhi(PN);
1622 // If we have more phi values than the search depth then return MayAlias
1623 // conservatively to avoid compile time explosion. The worst possible case
1624 // is if both sides are PHI nodes. In which case, this is O(m x n) time
1625 // where 'm' and 'n' are the number of PHI sources.
1626 if (PhiValueSet.size() > MaxLookupSearchDepth)
1627 return MayAlias;
1628 // Add the values to V1Srcs
1629 for (Value *PV1 : PhiValueSet) {
1630 if (EnableRecPhiAnalysis) {
1631 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1632 // Check whether the incoming value is a GEP that advances the pointer
1633 // result of this PHI node (e.g. in a loop). If this is the case, we
1634 // would recurse and always get a MayAlias. Handle this case specially
1635 // below.
1636 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1637 isa<ConstantInt>(PV1GEP->idx_begin())) {
1638 isRecursive = true;
1639 continue;
1643 V1Srcs.push_back(PV1);
1645 } else {
1646 // If we don't have PhiInfo then just look at the operands of the phi itself
1647 // FIXME: Remove this once we can guarantee that we have PhiInfo always
1648 SmallPtrSet<Value *, 4> UniqueSrc;
1649 for (Value *PV1 : PN->incoming_values()) {
1650 if (isa<PHINode>(PV1))
1651 // If any of the source itself is a PHI, return MayAlias conservatively
1652 // to avoid compile time explosion. The worst possible case is if both
1653 // sides are PHI nodes. In which case, this is O(m x n) time where 'm'
1654 // and 'n' are the number of PHI sources.
1655 return MayAlias;
1657 if (EnableRecPhiAnalysis)
1658 if (GEPOperator *PV1GEP = dyn_cast<GEPOperator>(PV1)) {
1659 // Check whether the incoming value is a GEP that advances the pointer
1660 // result of this PHI node (e.g. in a loop). If this is the case, we
1661 // would recurse and always get a MayAlias. Handle this case specially
1662 // below.
1663 if (PV1GEP->getPointerOperand() == PN && PV1GEP->getNumIndices() == 1 &&
1664 isa<ConstantInt>(PV1GEP->idx_begin())) {
1665 isRecursive = true;
1666 continue;
1670 if (UniqueSrc.insert(PV1).second)
1671 V1Srcs.push_back(PV1);
1675 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1676 // value. This should only be possible in blocks unreachable from the entry
1677 // block, but return MayAlias just in case.
1678 if (V1Srcs.empty())
1679 return MayAlias;
1681 // If this PHI node is recursive, set the size of the accessed memory to
1682 // unknown to represent all the possible values the GEP could advance the
1683 // pointer to.
1684 if (isRecursive)
1685 PNSize = LocationSize::unknown();
1687 AliasResult Alias = aliasCheck(V2, V2Size, V2AAInfo, V1Srcs[0], PNSize,
1688 PNAAInfo, AAQI, UnderV2);
1690 // Early exit if the check of the first PHI source against V2 is MayAlias.
1691 // Other results are not possible.
1692 if (Alias == MayAlias)
1693 return MayAlias;
1695 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1696 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1697 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1698 Value *V = V1Srcs[i];
1700 AliasResult ThisAlias =
1701 aliasCheck(V2, V2Size, V2AAInfo, V, PNSize, PNAAInfo, AAQI, UnderV2);
1702 Alias = MergeAliasResults(ThisAlias, Alias);
1703 if (Alias == MayAlias)
1704 break;
1707 return Alias;
1710 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1711 /// array references.
1712 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1713 AAMDNodes V1AAInfo, const Value *V2,
1714 LocationSize V2Size, AAMDNodes V2AAInfo,
1715 AAQueryInfo &AAQI, const Value *O1,
1716 const Value *O2) {
1717 // If either of the memory references is empty, it doesn't matter what the
1718 // pointer values are.
1719 if (V1Size.isZero() || V2Size.isZero())
1720 return NoAlias;
1722 // Strip off any casts if they exist.
1723 V1 = V1->stripPointerCastsAndInvariantGroups();
1724 V2 = V2->stripPointerCastsAndInvariantGroups();
1726 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1727 // value for undef that aliases nothing in the program.
1728 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1729 return NoAlias;
1731 // Are we checking for alias of the same value?
1732 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1733 // different iterations. We must therefore make sure that this is not the
1734 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1735 // happen by looking at the visited phi nodes and making sure they cannot
1736 // reach the value.
1737 if (isValueEqualInPotentialCycles(V1, V2))
1738 return MustAlias;
1740 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1741 return NoAlias; // Scalars cannot alias each other
1743 // Figure out what objects these things are pointing to if we can.
1744 if (O1 == nullptr)
1745 O1 = GetUnderlyingObject(V1, DL, MaxLookupSearchDepth);
1747 if (O2 == nullptr)
1748 O2 = GetUnderlyingObject(V2, DL, MaxLookupSearchDepth);
1750 // Null values in the default address space don't point to any object, so they
1751 // don't alias any other pointer.
1752 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1753 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1754 return NoAlias;
1755 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1756 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1757 return NoAlias;
1759 if (O1 != O2) {
1760 // If V1/V2 point to two different objects, we know that we have no alias.
1761 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1762 return NoAlias;
1764 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1765 if ((isa<Constant>(O1) && isIdentifiedObject(O2) && !isa<Constant>(O2)) ||
1766 (isa<Constant>(O2) && isIdentifiedObject(O1) && !isa<Constant>(O1)))
1767 return NoAlias;
1769 // Function arguments can't alias with things that are known to be
1770 // unambigously identified at the function level.
1771 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1772 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1773 return NoAlias;
1775 // If one pointer is the result of a call/invoke or load and the other is a
1776 // non-escaping local object within the same function, then we know the
1777 // object couldn't escape to a point where the call could return it.
1779 // Note that if the pointers are in different functions, there are a
1780 // variety of complications. A call with a nocapture argument may still
1781 // temporary store the nocapture argument's value in a temporary memory
1782 // location if that memory location doesn't escape. Or it may pass a
1783 // nocapture value to other functions as long as they don't capture it.
1784 if (isEscapeSource(O1) &&
1785 isNonEscapingLocalObject(O2, &AAQI.IsCapturedCache))
1786 return NoAlias;
1787 if (isEscapeSource(O2) &&
1788 isNonEscapingLocalObject(O1, &AAQI.IsCapturedCache))
1789 return NoAlias;
1792 // If the size of one access is larger than the entire object on the other
1793 // side, then we know such behavior is undefined and can assume no alias.
1794 bool NullIsValidLocation = NullPointerIsDefined(&F);
1795 if ((V1Size.isPrecise() && isObjectSmallerThan(O2, V1Size.getValue(), DL, TLI,
1796 NullIsValidLocation)) ||
1797 (V2Size.isPrecise() && isObjectSmallerThan(O1, V2Size.getValue(), DL, TLI,
1798 NullIsValidLocation)))
1799 return NoAlias;
1801 // Check the cache before climbing up use-def chains. This also terminates
1802 // otherwise infinitely recursive queries.
1803 AAQueryInfo::LocPair Locs(MemoryLocation(V1, V1Size, V1AAInfo),
1804 MemoryLocation(V2, V2Size, V2AAInfo));
1805 if (V1 > V2)
1806 std::swap(Locs.first, Locs.second);
1807 std::pair<AAQueryInfo::AliasCacheT::iterator, bool> Pair =
1808 AAQI.AliasCache.try_emplace(Locs, MayAlias);
1809 if (!Pair.second)
1810 return Pair.first->second;
1812 // FIXME: This isn't aggressively handling alias(GEP, PHI) for example: if the
1813 // GEP can't simplify, we don't even look at the PHI cases.
1814 if (!isa<GEPOperator>(V1) && isa<GEPOperator>(V2)) {
1815 std::swap(V1, V2);
1816 std::swap(V1Size, V2Size);
1817 std::swap(O1, O2);
1818 std::swap(V1AAInfo, V2AAInfo);
1820 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1821 AliasResult Result =
1822 aliasGEP(GV1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O1, O2, AAQI);
1823 if (Result != MayAlias) {
1824 auto ItInsPair = AAQI.AliasCache.insert(std::make_pair(Locs, Result));
1825 assert(!ItInsPair.second && "Entry must have existed");
1826 ItInsPair.first->second = Result;
1827 return Result;
1831 if (isa<PHINode>(V2) && !isa<PHINode>(V1)) {
1832 std::swap(V1, V2);
1833 std::swap(O1, O2);
1834 std::swap(V1Size, V2Size);
1835 std::swap(V1AAInfo, V2AAInfo);
1837 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1838 AliasResult Result =
1839 aliasPHI(PN, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1840 if (Result != MayAlias) {
1841 Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1842 assert(!Pair.second && "Entry must have existed");
1843 return Pair.first->second = Result;
1847 if (isa<SelectInst>(V2) && !isa<SelectInst>(V1)) {
1848 std::swap(V1, V2);
1849 std::swap(O1, O2);
1850 std::swap(V1Size, V2Size);
1851 std::swap(V1AAInfo, V2AAInfo);
1853 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1854 AliasResult Result =
1855 aliasSelect(S1, V1Size, V1AAInfo, V2, V2Size, V2AAInfo, O2, AAQI);
1856 if (Result != MayAlias) {
1857 Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1858 assert(!Pair.second && "Entry must have existed");
1859 return Pair.first->second = Result;
1863 // If both pointers are pointing into the same object and one of them
1864 // accesses the entire object, then the accesses must overlap in some way.
1865 if (O1 == O2)
1866 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1867 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1868 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation))) {
1869 Pair = AAQI.AliasCache.try_emplace(Locs, PartialAlias);
1870 assert(!Pair.second && "Entry must have existed");
1871 return Pair.first->second = PartialAlias;
1874 // Recurse back into the best AA results we have, potentially with refined
1875 // memory locations. We have already ensured that BasicAA has a MayAlias
1876 // cache result for these, so any recursion back into BasicAA won't loop.
1877 AliasResult Result = getBestAAResults().alias(Locs.first, Locs.second, AAQI);
1878 Pair = AAQI.AliasCache.try_emplace(Locs, Result);
1879 assert(!Pair.second && "Entry must have existed");
1880 return Pair.first->second = Result;
1883 /// Check whether two Values can be considered equivalent.
1885 /// In addition to pointer equivalence of \p V1 and \p V2 this checks whether
1886 /// they can not be part of a cycle in the value graph by looking at all
1887 /// visited phi nodes an making sure that the phis cannot reach the value. We
1888 /// have to do this because we are looking through phi nodes (That is we say
1889 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1890 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1891 const Value *V2) {
1892 if (V != V2)
1893 return false;
1895 const Instruction *Inst = dyn_cast<Instruction>(V);
1896 if (!Inst)
1897 return true;
1899 if (VisitedPhiBBs.empty())
1900 return true;
1902 if (VisitedPhiBBs.size() > MaxNumPhiBBsValueReachabilityCheck)
1903 return false;
1905 // Make sure that the visited phis cannot reach the Value. This ensures that
1906 // the Values cannot come from different iterations of a potential cycle the
1907 // phi nodes could be involved in.
1908 for (auto *P : VisitedPhiBBs)
1909 if (isPotentiallyReachable(&P->front(), Inst, nullptr, DT, LI))
1910 return false;
1912 return true;
1915 /// Computes the symbolic difference between two de-composed GEPs.
1917 /// Dest and Src are the variable indices from two decomposed GetElementPtr
1918 /// instructions GEP1 and GEP2 which have common base pointers.
1919 void BasicAAResult::GetIndexDifference(
1920 SmallVectorImpl<VariableGEPIndex> &Dest,
1921 const SmallVectorImpl<VariableGEPIndex> &Src) {
1922 if (Src.empty())
1923 return;
1925 for (unsigned i = 0, e = Src.size(); i != e; ++i) {
1926 const Value *V = Src[i].V;
1927 unsigned ZExtBits = Src[i].ZExtBits, SExtBits = Src[i].SExtBits;
1928 APInt Scale = Src[i].Scale;
1930 // Find V in Dest. This is N^2, but pointer indices almost never have more
1931 // than a few variable indexes.
1932 for (unsigned j = 0, e = Dest.size(); j != e; ++j) {
1933 if (!isValueEqualInPotentialCycles(Dest[j].V, V) ||
1934 Dest[j].ZExtBits != ZExtBits || Dest[j].SExtBits != SExtBits)
1935 continue;
1937 // If we found it, subtract off Scale V's from the entry in Dest. If it
1938 // goes to zero, remove the entry.
1939 if (Dest[j].Scale != Scale)
1940 Dest[j].Scale -= Scale;
1941 else
1942 Dest.erase(Dest.begin() + j);
1943 Scale = 0;
1944 break;
1947 // If we didn't consume this entry, add it to the end of the Dest list.
1948 if (!!Scale) {
1949 VariableGEPIndex Entry = {V, ZExtBits, SExtBits, -Scale};
1950 Dest.push_back(Entry);
1955 bool BasicAAResult::constantOffsetHeuristic(
1956 const SmallVectorImpl<VariableGEPIndex> &VarIndices,
1957 LocationSize MaybeV1Size, LocationSize MaybeV2Size, APInt BaseOffset,
1958 AssumptionCache *AC, DominatorTree *DT) {
1959 if (VarIndices.size() != 2 || MaybeV1Size == LocationSize::unknown() ||
1960 MaybeV2Size == LocationSize::unknown())
1961 return false;
1963 const uint64_t V1Size = MaybeV1Size.getValue();
1964 const uint64_t V2Size = MaybeV2Size.getValue();
1966 const VariableGEPIndex &Var0 = VarIndices[0], &Var1 = VarIndices[1];
1968 if (Var0.ZExtBits != Var1.ZExtBits || Var0.SExtBits != Var1.SExtBits ||
1969 Var0.Scale != -Var1.Scale)
1970 return false;
1972 unsigned Width = Var1.V->getType()->getIntegerBitWidth();
1974 // We'll strip off the Extensions of Var0 and Var1 and do another round
1975 // of GetLinearExpression decomposition. In the example above, if Var0
1976 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1978 APInt V0Scale(Width, 0), V0Offset(Width, 0), V1Scale(Width, 0),
1979 V1Offset(Width, 0);
1980 bool NSW = true, NUW = true;
1981 unsigned V0ZExtBits = 0, V0SExtBits = 0, V1ZExtBits = 0, V1SExtBits = 0;
1982 const Value *V0 = GetLinearExpression(Var0.V, V0Scale, V0Offset, V0ZExtBits,
1983 V0SExtBits, DL, 0, AC, DT, NSW, NUW);
1984 NSW = true;
1985 NUW = true;
1986 const Value *V1 = GetLinearExpression(Var1.V, V1Scale, V1Offset, V1ZExtBits,
1987 V1SExtBits, DL, 0, AC, DT, NSW, NUW);
1989 if (V0Scale != V1Scale || V0ZExtBits != V1ZExtBits ||
1990 V0SExtBits != V1SExtBits || !isValueEqualInPotentialCycles(V0, V1))
1991 return false;
1993 // We have a hit - Var0 and Var1 only differ by a constant offset!
1995 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1996 // Var1 is possible to calculate, but we're just interested in the absolute
1997 // minimum difference between the two. The minimum distance may occur due to
1998 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1999 // the minimum distance between %i and %i + 5 is 3.
2000 APInt MinDiff = V0Offset - V1Offset, Wrapped = -MinDiff;
2001 MinDiff = APIntOps::umin(MinDiff, Wrapped);
2002 APInt MinDiffBytes =
2003 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
2005 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
2006 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
2007 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
2008 // V2Size can fit in the MinDiffBytes gap.
2009 return MinDiffBytes.uge(V1Size + BaseOffset.abs()) &&
2010 MinDiffBytes.uge(V2Size + BaseOffset.abs());
2013 //===----------------------------------------------------------------------===//
2014 // BasicAliasAnalysis Pass
2015 //===----------------------------------------------------------------------===//
2017 AnalysisKey BasicAA::Key;
2019 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
2020 return BasicAAResult(F.getParent()->getDataLayout(),
2022 AM.getResult<TargetLibraryAnalysis>(F),
2023 AM.getResult<AssumptionAnalysis>(F),
2024 &AM.getResult<DominatorTreeAnalysis>(F),
2025 AM.getCachedResult<LoopAnalysis>(F),
2026 AM.getCachedResult<PhiValuesAnalysis>(F));
2029 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
2030 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
2033 char BasicAAWrapperPass::ID = 0;
2035 void BasicAAWrapperPass::anchor() {}
2037 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basicaa",
2038 "Basic Alias Analysis (stateless AA impl)", false, true)
2039 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
2040 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
2041 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
2042 INITIALIZE_PASS_END(BasicAAWrapperPass, "basicaa",
2043 "Basic Alias Analysis (stateless AA impl)", false, true)
2045 FunctionPass *llvm::createBasicAAWrapperPass() {
2046 return new BasicAAWrapperPass();
2049 bool BasicAAWrapperPass::runOnFunction(Function &F) {
2050 auto &ACT = getAnalysis<AssumptionCacheTracker>();
2051 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
2052 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
2053 auto *LIWP = getAnalysisIfAvailable<LoopInfoWrapperPass>();
2054 auto *PVWP = getAnalysisIfAvailable<PhiValuesWrapperPass>();
2056 Result.reset(new BasicAAResult(F.getParent()->getDataLayout(), F, TLIWP.getTLI(),
2057 ACT.getAssumptionCache(F), &DTWP.getDomTree(),
2058 LIWP ? &LIWP->getLoopInfo() : nullptr,
2059 PVWP ? &PVWP->getResult() : nullptr));
2061 return false;
2064 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2065 AU.setPreservesAll();
2066 AU.addRequired<AssumptionCacheTracker>();
2067 AU.addRequired<DominatorTreeWrapperPass>();
2068 AU.addRequired<TargetLibraryInfoWrapperPass>();
2069 AU.addUsedIfAvailable<PhiValuesWrapperPass>();
2072 BasicAAResult llvm::createLegacyPMBasicAAResult(Pass &P, Function &F) {
2073 return BasicAAResult(
2074 F.getParent()->getDataLayout(),
2076 P.getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(),
2077 P.getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F));