[clang][bytecode] Fix reporting failed local constexpr initializers (#123588)
[llvm-project.git] / llvm / lib / Analysis / BasicAliasAnalysis.cpp
blobb2a3f3390e000d69fcf9e4f1d55cff08945f3bd1
1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/PatternMatch.h"
48 #include "llvm/IR/Type.h"
49 #include "llvm/IR/User.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/InitializePasses.h"
52 #include "llvm/Pass.h"
53 #include "llvm/Support/Casting.h"
54 #include "llvm/Support/CommandLine.h"
55 #include "llvm/Support/Compiler.h"
56 #include "llvm/Support/KnownBits.h"
57 #include "llvm/Support/SaveAndRestore.h"
58 #include <cassert>
59 #include <cstdint>
60 #include <cstdlib>
61 #include <optional>
62 #include <utility>
64 #define DEBUG_TYPE "basicaa"
66 using namespace llvm;
68 /// Enable analysis of recursive PHI nodes.
69 static cl::opt<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden,
70 cl::init(true));
72 static cl::opt<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
73 cl::Hidden, cl::init(true));
75 /// SearchLimitReached / SearchTimes shows how often the limit of
76 /// to decompose GEPs is reached. It will affect the precision
77 /// of basic alias analysis.
78 STATISTIC(SearchLimitReached, "Number of times the limit to "
79 "decompose GEPs is reached");
80 STATISTIC(SearchTimes, "Number of times a GEP is decomposed");
82 // The max limit of the search depth in DecomposeGEPExpression() and
83 // getUnderlyingObject().
84 static const unsigned MaxLookupSearchDepth = 6;
86 bool BasicAAResult::invalidate(Function &Fn, const PreservedAnalyses &PA,
87 FunctionAnalysisManager::Invalidator &Inv) {
88 // We don't care if this analysis itself is preserved, it has no state. But
89 // we need to check that the analyses it depends on have been. Note that we
90 // may be created without handles to some analyses and in that case don't
91 // depend on them.
92 if (Inv.invalidate<AssumptionAnalysis>(Fn, PA) ||
93 (DT_ && Inv.invalidate<DominatorTreeAnalysis>(Fn, PA)))
94 return true;
96 // Otherwise this analysis result remains valid.
97 return false;
100 //===----------------------------------------------------------------------===//
101 // Useful predicates
102 //===----------------------------------------------------------------------===//
104 /// Returns the size of the object specified by V or UnknownSize if unknown.
105 static std::optional<TypeSize> getObjectSize(const Value *V,
106 const DataLayout &DL,
107 const TargetLibraryInfo &TLI,
108 bool NullIsValidLoc,
109 bool RoundToAlign = false) {
110 uint64_t Size;
111 ObjectSizeOpts Opts;
112 Opts.RoundToAlign = RoundToAlign;
113 Opts.NullIsUnknownSize = NullIsValidLoc;
114 if (getObjectSize(V, Size, DL, &TLI, Opts))
115 return TypeSize::getFixed(Size);
116 return std::nullopt;
119 /// Returns true if we can prove that the object specified by V is smaller than
120 /// Size. Bails out early unless the root object is passed as the first
121 /// parameter.
122 static bool isObjectSmallerThan(const Value *V, TypeSize Size,
123 const DataLayout &DL,
124 const TargetLibraryInfo &TLI,
125 bool NullIsValidLoc) {
126 // Note that the meanings of the "object" are slightly different in the
127 // following contexts:
128 // c1: llvm::getObjectSize()
129 // c2: llvm.objectsize() intrinsic
130 // c3: isObjectSmallerThan()
131 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
132 // refers to the "entire object".
134 // Consider this example:
135 // char *p = (char*)malloc(100)
136 // char *q = p+80;
138 // In the context of c1 and c2, the "object" pointed by q refers to the
139 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
141 // In the context of c3, the "object" refers to the chunk of memory being
142 // allocated. So, the "object" has 100 bytes, and q points to the middle the
143 // "object". However, unless p, the root object, is passed as the first
144 // parameter, the call to isIdentifiedObject() makes isObjectSmallerThan()
145 // bail out early.
146 if (!isIdentifiedObject(V))
147 return false;
149 // This function needs to use the aligned object size because we allow
150 // reads a bit past the end given sufficient alignment.
151 std::optional<TypeSize> ObjectSize = getObjectSize(V, DL, TLI, NullIsValidLoc,
152 /*RoundToAlign*/ true);
154 return ObjectSize && TypeSize::isKnownLT(*ObjectSize, Size);
157 /// Return the minimal extent from \p V to the end of the underlying object,
158 /// assuming the result is used in an aliasing query. E.g., we do use the query
159 /// location size and the fact that null pointers cannot alias here.
160 static TypeSize getMinimalExtentFrom(const Value &V,
161 const LocationSize &LocSize,
162 const DataLayout &DL,
163 bool NullIsValidLoc) {
164 // If we have dereferenceability information we know a lower bound for the
165 // extent as accesses for a lower offset would be valid. We need to exclude
166 // the "or null" part if null is a valid pointer. We can ignore frees, as an
167 // access after free would be undefined behavior.
168 bool CanBeNull, CanBeFreed;
169 uint64_t DerefBytes =
170 V.getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed);
171 DerefBytes = (CanBeNull && NullIsValidLoc) ? 0 : DerefBytes;
172 // If queried with a precise location size, we assume that location size to be
173 // accessed, thus valid.
174 if (LocSize.isPrecise())
175 DerefBytes = std::max(DerefBytes, LocSize.getValue().getKnownMinValue());
176 return TypeSize::getFixed(DerefBytes);
179 /// Returns true if we can prove that the object specified by V has size Size.
180 static bool isObjectSize(const Value *V, TypeSize Size, const DataLayout &DL,
181 const TargetLibraryInfo &TLI, bool NullIsValidLoc) {
182 std::optional<TypeSize> ObjectSize =
183 getObjectSize(V, DL, TLI, NullIsValidLoc);
184 return ObjectSize && *ObjectSize == Size;
187 /// Return true if both V1 and V2 are VScale
188 static bool areBothVScale(const Value *V1, const Value *V2) {
189 return PatternMatch::match(V1, PatternMatch::m_VScale()) &&
190 PatternMatch::match(V2, PatternMatch::m_VScale());
193 //===----------------------------------------------------------------------===//
194 // CaptureAnalysis implementations
195 //===----------------------------------------------------------------------===//
197 CaptureAnalysis::~CaptureAnalysis() = default;
199 bool SimpleCaptureAnalysis::isNotCapturedBefore(const Value *Object,
200 const Instruction *I,
201 bool OrAt) {
202 return isNonEscapingLocalObject(Object, &IsCapturedCache);
205 static bool isNotInCycle(const Instruction *I, const DominatorTree *DT,
206 const LoopInfo *LI) {
207 BasicBlock *BB = const_cast<BasicBlock *>(I->getParent());
208 SmallVector<BasicBlock *> Succs(successors(BB));
209 return Succs.empty() ||
210 !isPotentiallyReachableFromMany(Succs, BB, nullptr, DT, LI);
213 bool EarliestEscapeAnalysis::isNotCapturedBefore(const Value *Object,
214 const Instruction *I,
215 bool OrAt) {
216 if (!isIdentifiedFunctionLocal(Object))
217 return false;
219 auto Iter = EarliestEscapes.insert({Object, nullptr});
220 if (Iter.second) {
221 Instruction *EarliestCapture = FindEarliestCapture(
222 Object, *const_cast<Function *>(DT.getRoot()->getParent()),
223 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT);
224 if (EarliestCapture)
225 Inst2Obj[EarliestCapture].push_back(Object);
226 Iter.first->second = EarliestCapture;
229 // No capturing instruction.
230 if (!Iter.first->second)
231 return true;
233 // No context instruction means any use is capturing.
234 if (!I)
235 return false;
237 if (I == Iter.first->second) {
238 if (OrAt)
239 return false;
240 return isNotInCycle(I, &DT, LI);
243 return !isPotentiallyReachable(Iter.first->second, I, nullptr, &DT, LI);
246 void EarliestEscapeAnalysis::removeInstruction(Instruction *I) {
247 auto Iter = Inst2Obj.find(I);
248 if (Iter != Inst2Obj.end()) {
249 for (const Value *Obj : Iter->second)
250 EarliestEscapes.erase(Obj);
251 Inst2Obj.erase(I);
255 //===----------------------------------------------------------------------===//
256 // GetElementPtr Instruction Decomposition and Analysis
257 //===----------------------------------------------------------------------===//
259 namespace {
260 /// Represents zext(sext(trunc(V))).
261 struct CastedValue {
262 const Value *V;
263 unsigned ZExtBits = 0;
264 unsigned SExtBits = 0;
265 unsigned TruncBits = 0;
266 /// Whether trunc(V) is non-negative.
267 bool IsNonNegative = false;
269 explicit CastedValue(const Value *V) : V(V) {}
270 explicit CastedValue(const Value *V, unsigned ZExtBits, unsigned SExtBits,
271 unsigned TruncBits, bool IsNonNegative)
272 : V(V), ZExtBits(ZExtBits), SExtBits(SExtBits), TruncBits(TruncBits),
273 IsNonNegative(IsNonNegative) {}
275 unsigned getBitWidth() const {
276 return V->getType()->getPrimitiveSizeInBits() - TruncBits + ZExtBits +
277 SExtBits;
280 CastedValue withValue(const Value *NewV, bool PreserveNonNeg) const {
281 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits,
282 IsNonNegative && PreserveNonNeg);
285 /// Replace V with zext(NewV)
286 CastedValue withZExtOfValue(const Value *NewV, bool ZExtNonNegative) const {
287 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
288 NewV->getType()->getPrimitiveSizeInBits();
289 if (ExtendBy <= TruncBits)
290 // zext<nneg>(trunc(zext(NewV))) == zext<nneg>(trunc(NewV))
291 // The nneg can be preserved on the outer zext here.
292 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
293 IsNonNegative);
295 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
296 ExtendBy -= TruncBits;
297 // zext<nneg>(zext(NewV)) == zext(NewV)
298 // zext(zext<nneg>(NewV)) == zext<nneg>(NewV)
299 // The nneg can be preserved from the inner zext here but must be dropped
300 // from the outer.
301 return CastedValue(NewV, ZExtBits + SExtBits + ExtendBy, 0, 0,
302 ZExtNonNegative);
305 /// Replace V with sext(NewV)
306 CastedValue withSExtOfValue(const Value *NewV) const {
307 unsigned ExtendBy = V->getType()->getPrimitiveSizeInBits() -
308 NewV->getType()->getPrimitiveSizeInBits();
309 if (ExtendBy <= TruncBits)
310 // zext<nneg>(trunc(sext(NewV))) == zext<nneg>(trunc(NewV))
311 // The nneg can be preserved on the outer zext here
312 return CastedValue(NewV, ZExtBits, SExtBits, TruncBits - ExtendBy,
313 IsNonNegative);
315 // zext(sext(sext(NewV)))
316 ExtendBy -= TruncBits;
317 // zext<nneg>(sext(sext(NewV))) = zext<nneg>(sext(NewV))
318 // The nneg can be preserved on the outer zext here
319 return CastedValue(NewV, ZExtBits, SExtBits + ExtendBy, 0, IsNonNegative);
322 APInt evaluateWith(APInt N) const {
323 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
324 "Incompatible bit width");
325 if (TruncBits) N = N.trunc(N.getBitWidth() - TruncBits);
326 if (SExtBits) N = N.sext(N.getBitWidth() + SExtBits);
327 if (ZExtBits) N = N.zext(N.getBitWidth() + ZExtBits);
328 return N;
331 ConstantRange evaluateWith(ConstantRange N) const {
332 assert(N.getBitWidth() == V->getType()->getPrimitiveSizeInBits() &&
333 "Incompatible bit width");
334 if (TruncBits) N = N.truncate(N.getBitWidth() - TruncBits);
335 if (IsNonNegative && !N.isAllNonNegative())
336 N = N.intersectWith(
337 ConstantRange(APInt::getZero(N.getBitWidth()),
338 APInt::getSignedMinValue(N.getBitWidth())));
339 if (SExtBits) N = N.signExtend(N.getBitWidth() + SExtBits);
340 if (ZExtBits) N = N.zeroExtend(N.getBitWidth() + ZExtBits);
341 return N;
344 bool canDistributeOver(bool NUW, bool NSW) const {
345 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
346 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
347 // trunc(x op y) == trunc(x) op trunc(y)
348 return (!ZExtBits || NUW) && (!SExtBits || NSW);
351 bool hasSameCastsAs(const CastedValue &Other) const {
352 if (V->getType() != Other.V->getType())
353 return false;
355 if (ZExtBits == Other.ZExtBits && SExtBits == Other.SExtBits &&
356 TruncBits == Other.TruncBits)
357 return true;
358 // If either CastedValue has a nneg zext then the sext/zext bits are
359 // interchangable for that value.
360 if (IsNonNegative || Other.IsNonNegative)
361 return (ZExtBits + SExtBits == Other.ZExtBits + Other.SExtBits &&
362 TruncBits == Other.TruncBits);
363 return false;
367 /// Represents zext(sext(trunc(V))) * Scale + Offset.
368 struct LinearExpression {
369 CastedValue Val;
370 APInt Scale;
371 APInt Offset;
373 /// True if all operations in this expression are NUW.
374 bool IsNUW;
375 /// True if all operations in this expression are NSW.
376 bool IsNSW;
378 LinearExpression(const CastedValue &Val, const APInt &Scale,
379 const APInt &Offset, bool IsNUW, bool IsNSW)
380 : Val(Val), Scale(Scale), Offset(Offset), IsNUW(IsNUW), IsNSW(IsNSW) {}
382 LinearExpression(const CastedValue &Val)
383 : Val(Val), IsNUW(true), IsNSW(true) {
384 unsigned BitWidth = Val.getBitWidth();
385 Scale = APInt(BitWidth, 1);
386 Offset = APInt(BitWidth, 0);
389 LinearExpression mul(const APInt &Other, bool MulIsNUW, bool MulIsNSW) const {
390 // The check for zero offset is necessary, because generally
391 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
392 bool NSW = IsNSW && (Other.isOne() || (MulIsNSW && Offset.isZero()));
393 bool NUW = IsNUW && (Other.isOne() || MulIsNUW);
394 return LinearExpression(Val, Scale * Other, Offset * Other, NUW, NSW);
399 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
400 /// B are constant integers.
401 static LinearExpression GetLinearExpression(
402 const CastedValue &Val, const DataLayout &DL, unsigned Depth,
403 AssumptionCache *AC, DominatorTree *DT) {
404 // Limit our recursion depth.
405 if (Depth == 6)
406 return Val;
408 if (const ConstantInt *Const = dyn_cast<ConstantInt>(Val.V))
409 return LinearExpression(Val, APInt(Val.getBitWidth(), 0),
410 Val.evaluateWith(Const->getValue()), true, true);
412 if (const BinaryOperator *BOp = dyn_cast<BinaryOperator>(Val.V)) {
413 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(BOp->getOperand(1))) {
414 APInt RHS = Val.evaluateWith(RHSC->getValue());
415 // The only non-OBO case we deal with is or, and only limited to the
416 // case where it is both nuw and nsw.
417 bool NUW = true, NSW = true;
418 if (isa<OverflowingBinaryOperator>(BOp)) {
419 NUW &= BOp->hasNoUnsignedWrap();
420 NSW &= BOp->hasNoSignedWrap();
422 if (!Val.canDistributeOver(NUW, NSW))
423 return Val;
425 // While we can distribute over trunc, we cannot preserve nowrap flags
426 // in that case.
427 if (Val.TruncBits)
428 NUW = NSW = false;
430 LinearExpression E(Val);
431 switch (BOp->getOpcode()) {
432 default:
433 // We don't understand this instruction, so we can't decompose it any
434 // further.
435 return Val;
436 case Instruction::Or:
437 // X|C == X+C if it is disjoint. Otherwise we can't analyze it.
438 if (!cast<PossiblyDisjointInst>(BOp)->isDisjoint())
439 return Val;
441 [[fallthrough]];
442 case Instruction::Add: {
443 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
444 Depth + 1, AC, DT);
445 E.Offset += RHS;
446 E.IsNUW &= NUW;
447 E.IsNSW &= NSW;
448 break;
450 case Instruction::Sub: {
451 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
452 Depth + 1, AC, DT);
453 E.Offset -= RHS;
454 E.IsNUW = false; // sub nuw x, y is not add nuw x, -y.
455 E.IsNSW &= NSW;
456 break;
458 case Instruction::Mul:
459 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), false), DL,
460 Depth + 1, AC, DT)
461 .mul(RHS, NUW, NSW);
462 break;
463 case Instruction::Shl:
464 // We're trying to linearize an expression of the kind:
465 // shl i8 -128, 36
466 // where the shift count exceeds the bitwidth of the type.
467 // We can't decompose this further (the expression would return
468 // a poison value).
469 if (RHS.getLimitedValue() > Val.getBitWidth())
470 return Val;
472 E = GetLinearExpression(Val.withValue(BOp->getOperand(0), NSW), DL,
473 Depth + 1, AC, DT);
474 E.Offset <<= RHS.getLimitedValue();
475 E.Scale <<= RHS.getLimitedValue();
476 E.IsNUW &= NUW;
477 E.IsNSW &= NSW;
478 break;
480 return E;
484 if (const auto *ZExt = dyn_cast<ZExtInst>(Val.V))
485 return GetLinearExpression(
486 Val.withZExtOfValue(ZExt->getOperand(0), ZExt->hasNonNeg()), DL,
487 Depth + 1, AC, DT);
489 if (isa<SExtInst>(Val.V))
490 return GetLinearExpression(
491 Val.withSExtOfValue(cast<CastInst>(Val.V)->getOperand(0)),
492 DL, Depth + 1, AC, DT);
494 return Val;
497 namespace {
498 // A linear transformation of a Value; this class represents
499 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
500 struct VariableGEPIndex {
501 CastedValue Val;
502 APInt Scale;
504 // Context instruction to use when querying information about this index.
505 const Instruction *CxtI;
507 /// True if all operations in this expression are NSW.
508 bool IsNSW;
510 /// True if the index should be subtracted rather than added. We don't simply
511 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
512 /// non-wrapping, while X + INT_MIN*(-1) wraps.
513 bool IsNegated;
515 bool hasNegatedScaleOf(const VariableGEPIndex &Other) const {
516 if (IsNegated == Other.IsNegated)
517 return Scale == -Other.Scale;
518 return Scale == Other.Scale;
521 void dump() const {
522 print(dbgs());
523 dbgs() << "\n";
525 void print(raw_ostream &OS) const {
526 OS << "(V=" << Val.V->getName()
527 << ", zextbits=" << Val.ZExtBits
528 << ", sextbits=" << Val.SExtBits
529 << ", truncbits=" << Val.TruncBits
530 << ", scale=" << Scale
531 << ", nsw=" << IsNSW
532 << ", negated=" << IsNegated << ")";
537 // Represents the internal structure of a GEP, decomposed into a base pointer,
538 // constant offsets, and variable scaled indices.
539 struct BasicAAResult::DecomposedGEP {
540 // Base pointer of the GEP
541 const Value *Base;
542 // Total constant offset from base.
543 APInt Offset;
544 // Scaled variable (non-constant) indices.
545 SmallVector<VariableGEPIndex, 4> VarIndices;
546 // Nowrap flags common to all GEP operations involved in expression.
547 GEPNoWrapFlags NWFlags = GEPNoWrapFlags::all();
549 void dump() const {
550 print(dbgs());
551 dbgs() << "\n";
553 void print(raw_ostream &OS) const {
554 OS << ", inbounds=" << (NWFlags.isInBounds() ? "1" : "0")
555 << ", nuw=" << (NWFlags.hasNoUnsignedWrap() ? "1" : "0")
556 << "(DecomposedGEP Base=" << Base->getName() << ", Offset=" << Offset
557 << ", VarIndices=[";
558 for (size_t i = 0; i < VarIndices.size(); i++) {
559 if (i != 0)
560 OS << ", ";
561 VarIndices[i].print(OS);
563 OS << "])";
568 /// If V is a symbolic pointer expression, decompose it into a base pointer
569 /// with a constant offset and a number of scaled symbolic offsets.
571 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
572 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
573 /// specified amount, but which may have other unrepresented high bits. As
574 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
575 BasicAAResult::DecomposedGEP
576 BasicAAResult::DecomposeGEPExpression(const Value *V, const DataLayout &DL,
577 AssumptionCache *AC, DominatorTree *DT) {
578 // Limit recursion depth to limit compile time in crazy cases.
579 unsigned MaxLookup = MaxLookupSearchDepth;
580 SearchTimes++;
581 const Instruction *CxtI = dyn_cast<Instruction>(V);
583 unsigned IndexSize = DL.getIndexTypeSizeInBits(V->getType());
584 DecomposedGEP Decomposed;
585 Decomposed.Offset = APInt(IndexSize, 0);
586 do {
587 // See if this is a bitcast or GEP.
588 const Operator *Op = dyn_cast<Operator>(V);
589 if (!Op) {
590 // The only non-operator case we can handle are GlobalAliases.
591 if (const GlobalAlias *GA = dyn_cast<GlobalAlias>(V)) {
592 if (!GA->isInterposable()) {
593 V = GA->getAliasee();
594 continue;
597 Decomposed.Base = V;
598 return Decomposed;
601 if (Op->getOpcode() == Instruction::BitCast ||
602 Op->getOpcode() == Instruction::AddrSpaceCast) {
603 Value *NewV = Op->getOperand(0);
604 // Don't look through casts between address spaces with differing index
605 // widths.
606 if (DL.getIndexTypeSizeInBits(NewV->getType()) != IndexSize) {
607 Decomposed.Base = V;
608 return Decomposed;
610 V = NewV;
611 continue;
614 const GEPOperator *GEPOp = dyn_cast<GEPOperator>(Op);
615 if (!GEPOp) {
616 if (const auto *PHI = dyn_cast<PHINode>(V)) {
617 // Look through single-arg phi nodes created by LCSSA.
618 if (PHI->getNumIncomingValues() == 1) {
619 V = PHI->getIncomingValue(0);
620 continue;
622 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
623 // CaptureTracking can know about special capturing properties of some
624 // intrinsics like launder.invariant.group, that can't be expressed with
625 // the attributes, but have properties like returning aliasing pointer.
626 // Because some analysis may assume that nocaptured pointer is not
627 // returned from some special intrinsic (because function would have to
628 // be marked with returns attribute), it is crucial to use this function
629 // because it should be in sync with CaptureTracking. Not using it may
630 // cause weird miscompilations where 2 aliasing pointers are assumed to
631 // noalias.
632 if (auto *RP = getArgumentAliasingToReturnedPointer(Call, false)) {
633 V = RP;
634 continue;
638 Decomposed.Base = V;
639 return Decomposed;
642 // Track the common nowrap flags for all GEPs we see.
643 Decomposed.NWFlags &= GEPOp->getNoWrapFlags();
645 assert(GEPOp->getSourceElementType()->isSized() && "GEP must be sized");
647 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
648 gep_type_iterator GTI = gep_type_begin(GEPOp);
649 for (User::const_op_iterator I = GEPOp->op_begin() + 1, E = GEPOp->op_end();
650 I != E; ++I, ++GTI) {
651 const Value *Index = *I;
652 // Compute the (potentially symbolic) offset in bytes for this index.
653 if (StructType *STy = GTI.getStructTypeOrNull()) {
654 // For a struct, add the member offset.
655 unsigned FieldNo = cast<ConstantInt>(Index)->getZExtValue();
656 if (FieldNo == 0)
657 continue;
659 Decomposed.Offset += DL.getStructLayout(STy)->getElementOffset(FieldNo);
660 continue;
663 // For an array/pointer, add the element offset, explicitly scaled.
664 if (const ConstantInt *CIdx = dyn_cast<ConstantInt>(Index)) {
665 if (CIdx->isZero())
666 continue;
668 // Don't attempt to analyze GEPs if the scalable index is not zero.
669 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
670 if (AllocTypeSize.isScalable()) {
671 Decomposed.Base = V;
672 return Decomposed;
675 Decomposed.Offset += AllocTypeSize.getFixedValue() *
676 CIdx->getValue().sextOrTrunc(IndexSize);
677 continue;
680 TypeSize AllocTypeSize = GTI.getSequentialElementStride(DL);
681 if (AllocTypeSize.isScalable()) {
682 Decomposed.Base = V;
683 return Decomposed;
686 // If the integer type is smaller than the index size, it is implicitly
687 // sign extended or truncated to index size.
688 bool NUSW = GEPOp->hasNoUnsignedSignedWrap();
689 bool NUW = GEPOp->hasNoUnsignedWrap();
690 bool NonNeg = NUSW && NUW;
691 unsigned Width = Index->getType()->getIntegerBitWidth();
692 unsigned SExtBits = IndexSize > Width ? IndexSize - Width : 0;
693 unsigned TruncBits = IndexSize < Width ? Width - IndexSize : 0;
694 LinearExpression LE = GetLinearExpression(
695 CastedValue(Index, 0, SExtBits, TruncBits, NonNeg), DL, 0, AC, DT);
697 // Scale by the type size.
698 unsigned TypeSize = AllocTypeSize.getFixedValue();
699 LE = LE.mul(APInt(IndexSize, TypeSize), NUW, NUSW);
700 Decomposed.Offset += LE.Offset;
701 APInt Scale = LE.Scale;
702 if (!LE.IsNUW)
703 Decomposed.NWFlags = Decomposed.NWFlags.withoutNoUnsignedWrap();
705 // If we already had an occurrence of this index variable, merge this
706 // scale into it. For example, we want to handle:
707 // A[x][x] -> x*16 + x*4 -> x*20
708 // This also ensures that 'x' only appears in the index list once.
709 for (unsigned i = 0, e = Decomposed.VarIndices.size(); i != e; ++i) {
710 if ((Decomposed.VarIndices[i].Val.V == LE.Val.V ||
711 areBothVScale(Decomposed.VarIndices[i].Val.V, LE.Val.V)) &&
712 Decomposed.VarIndices[i].Val.hasSameCastsAs(LE.Val)) {
713 Scale += Decomposed.VarIndices[i].Scale;
714 // We cannot guarantee no-wrap for the merge.
715 LE.IsNSW = LE.IsNUW = false;
716 Decomposed.VarIndices.erase(Decomposed.VarIndices.begin() + i);
717 break;
721 if (!!Scale) {
722 VariableGEPIndex Entry = {LE.Val, Scale, CxtI, LE.IsNSW,
723 /* IsNegated */ false};
724 Decomposed.VarIndices.push_back(Entry);
728 // Analyze the base pointer next.
729 V = GEPOp->getOperand(0);
730 } while (--MaxLookup);
732 // If the chain of expressions is too deep, just return early.
733 Decomposed.Base = V;
734 SearchLimitReached++;
735 return Decomposed;
738 ModRefInfo BasicAAResult::getModRefInfoMask(const MemoryLocation &Loc,
739 AAQueryInfo &AAQI,
740 bool IgnoreLocals) {
741 assert(Visited.empty() && "Visited must be cleared after use!");
742 auto _ = make_scope_exit([&] { Visited.clear(); });
744 unsigned MaxLookup = 8;
745 SmallVector<const Value *, 16> Worklist;
746 Worklist.push_back(Loc.Ptr);
747 ModRefInfo Result = ModRefInfo::NoModRef;
749 do {
750 const Value *V = getUnderlyingObject(Worklist.pop_back_val());
751 if (!Visited.insert(V).second)
752 continue;
754 // Ignore allocas if we were instructed to do so.
755 if (IgnoreLocals && isa<AllocaInst>(V))
756 continue;
758 // If the location points to memory that is known to be invariant for
759 // the life of the underlying SSA value, then we can exclude Mod from
760 // the set of valid memory effects.
762 // An argument that is marked readonly and noalias is known to be
763 // invariant while that function is executing.
764 if (const Argument *Arg = dyn_cast<Argument>(V)) {
765 if (Arg->hasNoAliasAttr() && Arg->onlyReadsMemory()) {
766 Result |= ModRefInfo::Ref;
767 continue;
771 // A global constant can't be mutated.
772 if (const GlobalVariable *GV = dyn_cast<GlobalVariable>(V)) {
773 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
774 // global to be marked constant in some modules and non-constant in
775 // others. GV may even be a declaration, not a definition.
776 if (!GV->isConstant())
777 return ModRefInfo::ModRef;
778 continue;
781 // If both select values point to local memory, then so does the select.
782 if (const SelectInst *SI = dyn_cast<SelectInst>(V)) {
783 Worklist.push_back(SI->getTrueValue());
784 Worklist.push_back(SI->getFalseValue());
785 continue;
788 // If all values incoming to a phi node point to local memory, then so does
789 // the phi.
790 if (const PHINode *PN = dyn_cast<PHINode>(V)) {
791 // Don't bother inspecting phi nodes with many operands.
792 if (PN->getNumIncomingValues() > MaxLookup)
793 return ModRefInfo::ModRef;
794 append_range(Worklist, PN->incoming_values());
795 continue;
798 // Otherwise be conservative.
799 return ModRefInfo::ModRef;
800 } while (!Worklist.empty() && --MaxLookup);
802 // If we hit the maximum number of instructions to examine, be conservative.
803 if (!Worklist.empty())
804 return ModRefInfo::ModRef;
806 return Result;
809 static bool isIntrinsicCall(const CallBase *Call, Intrinsic::ID IID) {
810 const IntrinsicInst *II = dyn_cast<IntrinsicInst>(Call);
811 return II && II->getIntrinsicID() == IID;
814 /// Returns the behavior when calling the given call site.
815 MemoryEffects BasicAAResult::getMemoryEffects(const CallBase *Call,
816 AAQueryInfo &AAQI) {
817 MemoryEffects Min = Call->getAttributes().getMemoryEffects();
819 if (const Function *F = dyn_cast<Function>(Call->getCalledOperand())) {
820 MemoryEffects FuncME = AAQI.AAR.getMemoryEffects(F);
821 // Operand bundles on the call may also read or write memory, in addition
822 // to the behavior of the called function.
823 if (Call->hasReadingOperandBundles())
824 FuncME |= MemoryEffects::readOnly();
825 if (Call->hasClobberingOperandBundles())
826 FuncME |= MemoryEffects::writeOnly();
827 Min &= FuncME;
830 return Min;
833 /// Returns the behavior when calling the given function. For use when the call
834 /// site is not known.
835 MemoryEffects BasicAAResult::getMemoryEffects(const Function *F) {
836 switch (F->getIntrinsicID()) {
837 case Intrinsic::experimental_guard:
838 case Intrinsic::experimental_deoptimize:
839 // These intrinsics can read arbitrary memory, and additionally modref
840 // inaccessible memory to model control dependence.
841 return MemoryEffects::readOnly() |
842 MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef);
845 return F->getMemoryEffects();
848 ModRefInfo BasicAAResult::getArgModRefInfo(const CallBase *Call,
849 unsigned ArgIdx) {
850 if (Call->paramHasAttr(ArgIdx, Attribute::WriteOnly))
851 return ModRefInfo::Mod;
853 if (Call->paramHasAttr(ArgIdx, Attribute::ReadOnly))
854 return ModRefInfo::Ref;
856 if (Call->paramHasAttr(ArgIdx, Attribute::ReadNone))
857 return ModRefInfo::NoModRef;
859 return ModRefInfo::ModRef;
862 #ifndef NDEBUG
863 static const Function *getParent(const Value *V) {
864 if (const Instruction *inst = dyn_cast<Instruction>(V)) {
865 if (!inst->getParent())
866 return nullptr;
867 return inst->getParent()->getParent();
870 if (const Argument *arg = dyn_cast<Argument>(V))
871 return arg->getParent();
873 return nullptr;
876 static bool notDifferentParent(const Value *O1, const Value *O2) {
878 const Function *F1 = getParent(O1);
879 const Function *F2 = getParent(O2);
881 return !F1 || !F2 || F1 == F2;
883 #endif
885 AliasResult BasicAAResult::alias(const MemoryLocation &LocA,
886 const MemoryLocation &LocB, AAQueryInfo &AAQI,
887 const Instruction *CtxI) {
888 assert(notDifferentParent(LocA.Ptr, LocB.Ptr) &&
889 "BasicAliasAnalysis doesn't support interprocedural queries.");
890 return aliasCheck(LocA.Ptr, LocA.Size, LocB.Ptr, LocB.Size, AAQI, CtxI);
893 /// Checks to see if the specified callsite can clobber the specified memory
894 /// object.
896 /// Since we only look at local properties of this function, we really can't
897 /// say much about this query. We do, however, use simple "address taken"
898 /// analysis on local objects.
899 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call,
900 const MemoryLocation &Loc,
901 AAQueryInfo &AAQI) {
902 assert(notDifferentParent(Call, Loc.Ptr) &&
903 "AliasAnalysis query involving multiple functions!");
905 const Value *Object = getUnderlyingObject(Loc.Ptr);
907 // Calls marked 'tail' cannot read or write allocas from the current frame
908 // because the current frame might be destroyed by the time they run. However,
909 // a tail call may use an alloca with byval. Calling with byval copies the
910 // contents of the alloca into argument registers or stack slots, so there is
911 // no lifetime issue.
912 if (isa<AllocaInst>(Object))
913 if (const CallInst *CI = dyn_cast<CallInst>(Call))
914 if (CI->isTailCall() &&
915 !CI->getAttributes().hasAttrSomewhere(Attribute::ByVal))
916 return ModRefInfo::NoModRef;
918 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
919 // modify them even though the alloca is not escaped.
920 if (auto *AI = dyn_cast<AllocaInst>(Object))
921 if (!AI->isStaticAlloca() && isIntrinsicCall(Call, Intrinsic::stackrestore))
922 return ModRefInfo::Mod;
924 // A call can access a locally allocated object either because it is passed as
925 // an argument to the call, or because it has escaped prior to the call.
927 // Make sure the object has not escaped here, and then check that none of the
928 // call arguments alias the object below.
930 // We model calls that can return twice (setjmp) as clobbering non-escaping
931 // objects, to model any accesses that may occur prior to the second return.
932 // As an exception, ignore allocas, as setjmp is not required to preserve
933 // non-volatile stores for them.
934 if (!isa<Constant>(Object) && Call != Object &&
935 AAQI.CA->isNotCapturedBefore(Object, Call, /*OrAt*/ false) &&
936 (isa<AllocaInst>(Object) || !Call->hasFnAttr(Attribute::ReturnsTwice))) {
938 // Optimistically assume that call doesn't touch Object and check this
939 // assumption in the following loop.
940 ModRefInfo Result = ModRefInfo::NoModRef;
942 unsigned OperandNo = 0;
943 for (auto CI = Call->data_operands_begin(), CE = Call->data_operands_end();
944 CI != CE; ++CI, ++OperandNo) {
945 if (!(*CI)->getType()->isPointerTy())
946 continue;
948 // Call doesn't access memory through this operand, so we don't care
949 // if it aliases with Object.
950 if (Call->doesNotAccessMemory(OperandNo))
951 continue;
953 // If this is a no-capture pointer argument, see if we can tell that it
954 // is impossible to alias the pointer we're checking.
955 AliasResult AR =
956 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(*CI),
957 MemoryLocation::getBeforeOrAfter(Object), AAQI);
958 // Operand doesn't alias 'Object', continue looking for other aliases
959 if (AR == AliasResult::NoAlias)
960 continue;
961 // Operand aliases 'Object', but call doesn't modify it. Strengthen
962 // initial assumption and keep looking in case if there are more aliases.
963 if (Call->onlyReadsMemory(OperandNo)) {
964 Result |= ModRefInfo::Ref;
965 continue;
967 // Operand aliases 'Object' but call only writes into it.
968 if (Call->onlyWritesMemory(OperandNo)) {
969 Result |= ModRefInfo::Mod;
970 continue;
972 // This operand aliases 'Object' and call reads and writes into it.
973 // Setting ModRef will not yield an early return below, MustAlias is not
974 // used further.
975 Result = ModRefInfo::ModRef;
976 break;
979 // Early return if we improved mod ref information
980 if (!isModAndRefSet(Result))
981 return Result;
984 // If the call is malloc/calloc like, we can assume that it doesn't
985 // modify any IR visible value. This is only valid because we assume these
986 // routines do not read values visible in the IR. TODO: Consider special
987 // casing realloc and strdup routines which access only their arguments as
988 // well. Or alternatively, replace all of this with inaccessiblememonly once
989 // that's implemented fully.
990 if (isMallocOrCallocLikeFn(Call, &TLI)) {
991 // Be conservative if the accessed pointer may alias the allocation -
992 // fallback to the generic handling below.
993 if (AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(Call), Loc, AAQI) ==
994 AliasResult::NoAlias)
995 return ModRefInfo::NoModRef;
998 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
999 // writing so that proper control dependencies are maintained but they never
1000 // mod any particular memory location visible to the IR.
1001 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
1002 // intrinsic is now modeled as reading memory. This prevents hoisting the
1003 // invariant.start intrinsic over stores. Consider:
1004 // *ptr = 40;
1005 // *ptr = 50;
1006 // invariant_start(ptr)
1007 // int val = *ptr;
1008 // print(val);
1010 // This cannot be transformed to:
1012 // *ptr = 40;
1013 // invariant_start(ptr)
1014 // *ptr = 50;
1015 // int val = *ptr;
1016 // print(val);
1018 // The transformation will cause the second store to be ignored (based on
1019 // rules of invariant.start) and print 40, while the first program always
1020 // prints 50.
1021 if (isIntrinsicCall(Call, Intrinsic::invariant_start))
1022 return ModRefInfo::Ref;
1024 // Be conservative.
1025 return ModRefInfo::ModRef;
1028 ModRefInfo BasicAAResult::getModRefInfo(const CallBase *Call1,
1029 const CallBase *Call2,
1030 AAQueryInfo &AAQI) {
1031 // Guard intrinsics are marked as arbitrarily writing so that proper control
1032 // dependencies are maintained but they never mods any particular memory
1033 // location.
1035 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
1036 // heap state at the point the guard is issued needs to be consistent in case
1037 // the guard invokes the "deopt" continuation.
1039 // NB! This function is *not* commutative, so we special case two
1040 // possibilities for guard intrinsics.
1042 if (isIntrinsicCall(Call1, Intrinsic::experimental_guard))
1043 return isModSet(getMemoryEffects(Call2, AAQI).getModRef())
1044 ? ModRefInfo::Ref
1045 : ModRefInfo::NoModRef;
1047 if (isIntrinsicCall(Call2, Intrinsic::experimental_guard))
1048 return isModSet(getMemoryEffects(Call1, AAQI).getModRef())
1049 ? ModRefInfo::Mod
1050 : ModRefInfo::NoModRef;
1052 // Be conservative.
1053 return ModRefInfo::ModRef;
1056 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1057 /// another pointer.
1059 /// We know that V1 is a GEP, but we don't know anything about V2.
1060 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1061 /// V2.
1062 AliasResult BasicAAResult::aliasGEP(
1063 const GEPOperator *GEP1, LocationSize V1Size,
1064 const Value *V2, LocationSize V2Size,
1065 const Value *UnderlyingV1, const Value *UnderlyingV2, AAQueryInfo &AAQI) {
1066 auto BaseObjectsAlias = [&]() {
1067 AliasResult BaseAlias =
1068 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1),
1069 MemoryLocation::getBeforeOrAfter(UnderlyingV2), AAQI);
1070 return BaseAlias == AliasResult::NoAlias ? AliasResult::NoAlias
1071 : AliasResult::MayAlias;
1074 if (!V1Size.hasValue() && !V2Size.hasValue()) {
1075 // TODO: This limitation exists for compile-time reasons. Relax it if we
1076 // can avoid exponential pathological cases.
1077 if (!isa<GEPOperator>(V2))
1078 return AliasResult::MayAlias;
1080 // If both accesses have unknown size, we can only check whether the base
1081 // objects don't alias.
1082 return BaseObjectsAlias();
1085 DominatorTree *DT = getDT(AAQI);
1086 DecomposedGEP DecompGEP1 = DecomposeGEPExpression(GEP1, DL, &AC, DT);
1087 DecomposedGEP DecompGEP2 = DecomposeGEPExpression(V2, DL, &AC, DT);
1089 // Bail if we were not able to decompose anything.
1090 if (DecompGEP1.Base == GEP1 && DecompGEP2.Base == V2)
1091 return AliasResult::MayAlias;
1093 // Fall back to base objects if pointers have different index widths.
1094 if (DecompGEP1.Offset.getBitWidth() != DecompGEP2.Offset.getBitWidth())
1095 return BaseObjectsAlias();
1097 // Swap GEP1 and GEP2 if GEP2 has more variable indices.
1098 if (DecompGEP1.VarIndices.size() < DecompGEP2.VarIndices.size()) {
1099 std::swap(DecompGEP1, DecompGEP2);
1100 std::swap(V1Size, V2Size);
1101 std::swap(UnderlyingV1, UnderlyingV2);
1104 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1105 // symbolic difference.
1106 subtractDecomposedGEPs(DecompGEP1, DecompGEP2, AAQI);
1108 // If an inbounds GEP would have to start from an out of bounds address
1109 // for the two to alias, then we can assume noalias.
1110 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1111 // size
1113 if (DecompGEP1.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1114 V2Size.hasValue() && !V2Size.isScalable() &&
1115 DecompGEP1.Offset.sge(V2Size.getValue()) &&
1116 isBaseOfObject(DecompGEP2.Base))
1117 return AliasResult::NoAlias;
1119 // Symmetric case to above.
1120 if (DecompGEP2.NWFlags.isInBounds() && DecompGEP1.VarIndices.empty() &&
1121 V1Size.hasValue() && !V1Size.isScalable() &&
1122 DecompGEP1.Offset.sle(-V1Size.getValue()) &&
1123 isBaseOfObject(DecompGEP1.Base))
1124 return AliasResult::NoAlias;
1126 // For GEPs with identical offsets, we can preserve the size and AAInfo
1127 // when performing the alias check on the underlying objects.
1128 if (DecompGEP1.Offset == 0 && DecompGEP1.VarIndices.empty())
1129 return AAQI.AAR.alias(MemoryLocation(DecompGEP1.Base, V1Size),
1130 MemoryLocation(DecompGEP2.Base, V2Size), AAQI);
1132 // Do the base pointers alias?
1133 AliasResult BaseAlias =
1134 AAQI.AAR.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1.Base),
1135 MemoryLocation::getBeforeOrAfter(DecompGEP2.Base), AAQI);
1137 // If we get a No or May, then return it immediately, no amount of analysis
1138 // will improve this situation.
1139 if (BaseAlias != AliasResult::MustAlias) {
1140 assert(BaseAlias == AliasResult::NoAlias ||
1141 BaseAlias == AliasResult::MayAlias);
1142 return BaseAlias;
1145 // If there is a constant difference between the pointers, but the difference
1146 // is less than the size of the associated memory object, then we know
1147 // that the objects are partially overlapping. If the difference is
1148 // greater, we know they do not overlap.
1149 if (DecompGEP1.VarIndices.empty()) {
1150 APInt &Off = DecompGEP1.Offset;
1152 // Initialize for Off >= 0 (V2 <= GEP1) case.
1153 LocationSize VLeftSize = V2Size;
1154 LocationSize VRightSize = V1Size;
1155 const bool Swapped = Off.isNegative();
1157 if (Swapped) {
1158 // Swap if we have the situation where:
1159 // + +
1160 // | BaseOffset |
1161 // ---------------->|
1162 // |-->V1Size |-------> V2Size
1163 // GEP1 V2
1164 std::swap(VLeftSize, VRightSize);
1165 Off = -Off;
1168 if (!VLeftSize.hasValue())
1169 return AliasResult::MayAlias;
1171 const TypeSize LSize = VLeftSize.getValue();
1172 if (!LSize.isScalable()) {
1173 if (Off.ult(LSize)) {
1174 // Conservatively drop processing if a phi was visited and/or offset is
1175 // too big.
1176 AliasResult AR = AliasResult::PartialAlias;
1177 if (VRightSize.hasValue() && !VRightSize.isScalable() &&
1178 Off.ule(INT32_MAX) && (Off + VRightSize.getValue()).ule(LSize)) {
1179 // Memory referenced by right pointer is nested. Save the offset in
1180 // cache. Note that originally offset estimated as GEP1-V2, but
1181 // AliasResult contains the shift that represents GEP1+Offset=V2.
1182 AR.setOffset(-Off.getSExtValue());
1183 AR.swap(Swapped);
1185 return AR;
1187 return AliasResult::NoAlias;
1188 } else {
1189 // We can use the getVScaleRange to prove that Off >= (CR.upper * LSize).
1190 ConstantRange CR = getVScaleRange(&F, Off.getBitWidth());
1191 bool Overflow;
1192 APInt UpperRange = CR.getUnsignedMax().umul_ov(
1193 APInt(Off.getBitWidth(), LSize.getKnownMinValue()), Overflow);
1194 if (!Overflow && Off.uge(UpperRange))
1195 return AliasResult::NoAlias;
1199 // VScale Alias Analysis - Given one scalable offset between accesses and a
1200 // scalable typesize, we can divide each side by vscale, treating both values
1201 // as a constant. We prove that Offset/vscale >= TypeSize/vscale.
1202 if (DecompGEP1.VarIndices.size() == 1 &&
1203 DecompGEP1.VarIndices[0].Val.TruncBits == 0 &&
1204 DecompGEP1.Offset.isZero() &&
1205 PatternMatch::match(DecompGEP1.VarIndices[0].Val.V,
1206 PatternMatch::m_VScale())) {
1207 const VariableGEPIndex &ScalableVar = DecompGEP1.VarIndices[0];
1208 APInt Scale =
1209 ScalableVar.IsNegated ? -ScalableVar.Scale : ScalableVar.Scale;
1210 LocationSize VLeftSize = Scale.isNegative() ? V1Size : V2Size;
1212 // Check if the offset is known to not overflow, if it does then attempt to
1213 // prove it with the known values of vscale_range.
1214 bool Overflows = !DecompGEP1.VarIndices[0].IsNSW;
1215 if (Overflows) {
1216 ConstantRange CR = getVScaleRange(&F, Scale.getBitWidth());
1217 (void)CR.getSignedMax().smul_ov(Scale, Overflows);
1220 if (!Overflows) {
1221 // Note that we do not check that the typesize is scalable, as vscale >= 1
1222 // so noalias still holds so long as the dependency distance is at least
1223 // as big as the typesize.
1224 if (VLeftSize.hasValue() &&
1225 Scale.abs().uge(VLeftSize.getValue().getKnownMinValue()))
1226 return AliasResult::NoAlias;
1230 // If the difference between pointers is Offset +<nuw> Indices then we know
1231 // that the addition does not wrap the pointer index type (add nuw) and the
1232 // constant Offset is a lower bound on the distance between the pointers. We
1233 // can then prove NoAlias via Offset u>= VLeftSize.
1234 // + + +
1235 // | BaseOffset | +<nuw> Indices |
1236 // ---------------->|-------------------->|
1237 // |-->V2Size | |-------> V1Size
1238 // LHS RHS
1239 if (!DecompGEP1.VarIndices.empty() &&
1240 DecompGEP1.NWFlags.hasNoUnsignedWrap() && V2Size.hasValue() &&
1241 !V2Size.isScalable() && DecompGEP1.Offset.uge(V2Size.getValue()))
1242 return AliasResult::NoAlias;
1244 // Bail on analysing scalable LocationSize
1245 if (V1Size.isScalable() || V2Size.isScalable())
1246 return AliasResult::MayAlias;
1248 // We need to know both acess sizes for all the following heuristics.
1249 if (!V1Size.hasValue() || !V2Size.hasValue())
1250 return AliasResult::MayAlias;
1252 APInt GCD;
1253 ConstantRange OffsetRange = ConstantRange(DecompGEP1.Offset);
1254 for (unsigned i = 0, e = DecompGEP1.VarIndices.size(); i != e; ++i) {
1255 const VariableGEPIndex &Index = DecompGEP1.VarIndices[i];
1256 const APInt &Scale = Index.Scale;
1257 APInt ScaleForGCD = Scale;
1258 if (!Index.IsNSW)
1259 ScaleForGCD =
1260 APInt::getOneBitSet(Scale.getBitWidth(), Scale.countr_zero());
1262 if (i == 0)
1263 GCD = ScaleForGCD.abs();
1264 else
1265 GCD = APIntOps::GreatestCommonDivisor(GCD, ScaleForGCD.abs());
1267 ConstantRange CR = computeConstantRange(Index.Val.V, /* ForSigned */ false,
1268 true, &AC, Index.CxtI);
1269 KnownBits Known =
1270 computeKnownBits(Index.Val.V, DL, 0, &AC, Index.CxtI, DT);
1271 CR = CR.intersectWith(
1272 ConstantRange::fromKnownBits(Known, /* Signed */ true),
1273 ConstantRange::Signed);
1274 CR = Index.Val.evaluateWith(CR).sextOrTrunc(OffsetRange.getBitWidth());
1276 assert(OffsetRange.getBitWidth() == Scale.getBitWidth() &&
1277 "Bit widths are normalized to MaxIndexSize");
1278 if (Index.IsNSW)
1279 CR = CR.smul_sat(ConstantRange(Scale));
1280 else
1281 CR = CR.smul_fast(ConstantRange(Scale));
1283 if (Index.IsNegated)
1284 OffsetRange = OffsetRange.sub(CR);
1285 else
1286 OffsetRange = OffsetRange.add(CR);
1289 // We now have accesses at two offsets from the same base:
1290 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1291 // 2. 0 with size V2Size
1292 // Using arithmetic modulo GCD, the accesses are at
1293 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1294 // into the range [V2Size..GCD), then we know they cannot overlap.
1295 APInt ModOffset = DecompGEP1.Offset.srem(GCD);
1296 if (ModOffset.isNegative())
1297 ModOffset += GCD; // We want mod, not rem.
1298 if (ModOffset.uge(V2Size.getValue()) &&
1299 (GCD - ModOffset).uge(V1Size.getValue()))
1300 return AliasResult::NoAlias;
1302 // Compute ranges of potentially accessed bytes for both accesses. If the
1303 // interseciton is empty, there can be no overlap.
1304 unsigned BW = OffsetRange.getBitWidth();
1305 ConstantRange Range1 = OffsetRange.add(
1306 ConstantRange(APInt(BW, 0), APInt(BW, V1Size.getValue())));
1307 ConstantRange Range2 =
1308 ConstantRange(APInt(BW, 0), APInt(BW, V2Size.getValue()));
1309 if (Range1.intersectWith(Range2).isEmptySet())
1310 return AliasResult::NoAlias;
1312 // Try to determine the range of values for VarIndex such that
1313 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1314 std::optional<APInt> MinAbsVarIndex;
1315 if (DecompGEP1.VarIndices.size() == 1) {
1316 // VarIndex = Scale*V.
1317 const VariableGEPIndex &Var = DecompGEP1.VarIndices[0];
1318 if (Var.Val.TruncBits == 0 &&
1319 isKnownNonZero(Var.Val.V, SimplifyQuery(DL, DT, &AC, Var.CxtI))) {
1320 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1321 // potentially wrapping math.
1322 auto MultiplyByScaleNoWrap = [](const VariableGEPIndex &Var) {
1323 if (Var.IsNSW)
1324 return true;
1326 int ValOrigBW = Var.Val.V->getType()->getPrimitiveSizeInBits();
1327 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1328 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1329 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1330 int MaxScaleValueBW = Var.Val.getBitWidth() - ValOrigBW;
1331 if (MaxScaleValueBW <= 0)
1332 return false;
1333 return Var.Scale.ule(
1334 APInt::getMaxValue(MaxScaleValueBW).zext(Var.Scale.getBitWidth()));
1336 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1337 // presence of potentially wrapping math.
1338 if (MultiplyByScaleNoWrap(Var)) {
1339 // If V != 0 then abs(VarIndex) >= abs(Scale).
1340 MinAbsVarIndex = Var.Scale.abs();
1343 } else if (DecompGEP1.VarIndices.size() == 2) {
1344 // VarIndex = Scale*V0 + (-Scale)*V1.
1345 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1346 // Check that MayBeCrossIteration is false, to avoid reasoning about
1347 // inequality of values across loop iterations.
1348 const VariableGEPIndex &Var0 = DecompGEP1.VarIndices[0];
1349 const VariableGEPIndex &Var1 = DecompGEP1.VarIndices[1];
1350 if (Var0.hasNegatedScaleOf(Var1) && Var0.Val.TruncBits == 0 &&
1351 Var0.Val.hasSameCastsAs(Var1.Val) && !AAQI.MayBeCrossIteration &&
1352 isKnownNonEqual(Var0.Val.V, Var1.Val.V, DL, &AC, /* CxtI */ nullptr,
1353 DT))
1354 MinAbsVarIndex = Var0.Scale.abs();
1357 if (MinAbsVarIndex) {
1358 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1359 APInt OffsetLo = DecompGEP1.Offset - *MinAbsVarIndex;
1360 APInt OffsetHi = DecompGEP1.Offset + *MinAbsVarIndex;
1361 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1362 if (OffsetLo.isNegative() && (-OffsetLo).uge(V1Size.getValue()) &&
1363 OffsetHi.isNonNegative() && OffsetHi.uge(V2Size.getValue()))
1364 return AliasResult::NoAlias;
1367 if (constantOffsetHeuristic(DecompGEP1, V1Size, V2Size, &AC, DT, AAQI))
1368 return AliasResult::NoAlias;
1370 // Statically, we can see that the base objects are the same, but the
1371 // pointers have dynamic offsets which we can't resolve. And none of our
1372 // little tricks above worked.
1373 return AliasResult::MayAlias;
1376 static AliasResult MergeAliasResults(AliasResult A, AliasResult B) {
1377 // If the results agree, take it.
1378 if (A == B)
1379 return A;
1380 // A mix of PartialAlias and MustAlias is PartialAlias.
1381 if ((A == AliasResult::PartialAlias && B == AliasResult::MustAlias) ||
1382 (B == AliasResult::PartialAlias && A == AliasResult::MustAlias))
1383 return AliasResult::PartialAlias;
1384 // Otherwise, we don't know anything.
1385 return AliasResult::MayAlias;
1388 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1389 /// against another.
1390 AliasResult
1391 BasicAAResult::aliasSelect(const SelectInst *SI, LocationSize SISize,
1392 const Value *V2, LocationSize V2Size,
1393 AAQueryInfo &AAQI) {
1394 // If the values are Selects with the same condition, we can do a more precise
1395 // check: just check for aliases between the values on corresponding arms.
1396 if (const SelectInst *SI2 = dyn_cast<SelectInst>(V2))
1397 if (isValueEqualInPotentialCycles(SI->getCondition(), SI2->getCondition(),
1398 AAQI)) {
1399 AliasResult Alias =
1400 AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1401 MemoryLocation(SI2->getTrueValue(), V2Size), AAQI);
1402 if (Alias == AliasResult::MayAlias)
1403 return AliasResult::MayAlias;
1404 AliasResult ThisAlias =
1405 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1406 MemoryLocation(SI2->getFalseValue(), V2Size), AAQI);
1407 return MergeAliasResults(ThisAlias, Alias);
1410 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1411 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1412 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(SI->getTrueValue(), SISize),
1413 MemoryLocation(V2, V2Size), AAQI);
1414 if (Alias == AliasResult::MayAlias)
1415 return AliasResult::MayAlias;
1417 AliasResult ThisAlias =
1418 AAQI.AAR.alias(MemoryLocation(SI->getFalseValue(), SISize),
1419 MemoryLocation(V2, V2Size), AAQI);
1420 return MergeAliasResults(ThisAlias, Alias);
1423 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1424 /// another.
1425 AliasResult BasicAAResult::aliasPHI(const PHINode *PN, LocationSize PNSize,
1426 const Value *V2, LocationSize V2Size,
1427 AAQueryInfo &AAQI) {
1428 if (!PN->getNumIncomingValues())
1429 return AliasResult::NoAlias;
1430 // If the values are PHIs in the same block, we can do a more precise
1431 // as well as efficient check: just check for aliases between the values
1432 // on corresponding edges. Don't do this if we are analyzing across
1433 // iterations, as we may pick a different phi entry in different iterations.
1434 if (const PHINode *PN2 = dyn_cast<PHINode>(V2))
1435 if (PN2->getParent() == PN->getParent() && !AAQI.MayBeCrossIteration) {
1436 std::optional<AliasResult> Alias;
1437 for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
1438 AliasResult ThisAlias = AAQI.AAR.alias(
1439 MemoryLocation(PN->getIncomingValue(i), PNSize),
1440 MemoryLocation(
1441 PN2->getIncomingValueForBlock(PN->getIncomingBlock(i)), V2Size),
1442 AAQI);
1443 if (Alias)
1444 *Alias = MergeAliasResults(*Alias, ThisAlias);
1445 else
1446 Alias = ThisAlias;
1447 if (*Alias == AliasResult::MayAlias)
1448 break;
1450 return *Alias;
1453 SmallVector<Value *, 4> V1Srcs;
1454 // If a phi operand recurses back to the phi, we can still determine NoAlias
1455 // if we don't alias the underlying objects of the other phi operands, as we
1456 // know that the recursive phi needs to be based on them in some way.
1457 bool isRecursive = false;
1458 auto CheckForRecPhi = [&](Value *PV) {
1459 if (!EnableRecPhiAnalysis)
1460 return false;
1461 if (getUnderlyingObject(PV) == PN) {
1462 isRecursive = true;
1463 return true;
1465 return false;
1468 SmallPtrSet<Value *, 4> UniqueSrc;
1469 Value *OnePhi = nullptr;
1470 for (Value *PV1 : PN->incoming_values()) {
1471 // Skip the phi itself being the incoming value.
1472 if (PV1 == PN)
1473 continue;
1475 if (isa<PHINode>(PV1)) {
1476 if (OnePhi && OnePhi != PV1) {
1477 // To control potential compile time explosion, we choose to be
1478 // conserviate when we have more than one Phi input. It is important
1479 // that we handle the single phi case as that lets us handle LCSSA
1480 // phi nodes and (combined with the recursive phi handling) simple
1481 // pointer induction variable patterns.
1482 return AliasResult::MayAlias;
1484 OnePhi = PV1;
1487 if (CheckForRecPhi(PV1))
1488 continue;
1490 if (UniqueSrc.insert(PV1).second)
1491 V1Srcs.push_back(PV1);
1494 if (OnePhi && UniqueSrc.size() > 1)
1495 // Out of an abundance of caution, allow only the trivial lcssa and
1496 // recursive phi cases.
1497 return AliasResult::MayAlias;
1499 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1500 // value. This should only be possible in blocks unreachable from the entry
1501 // block, but return MayAlias just in case.
1502 if (V1Srcs.empty())
1503 return AliasResult::MayAlias;
1505 // If this PHI node is recursive, indicate that the pointer may be moved
1506 // across iterations. We can only prove NoAlias if different underlying
1507 // objects are involved.
1508 if (isRecursive)
1509 PNSize = LocationSize::beforeOrAfterPointer();
1511 // In the recursive alias queries below, we may compare values from two
1512 // different loop iterations.
1513 SaveAndRestore SavedMayBeCrossIteration(AAQI.MayBeCrossIteration, true);
1515 AliasResult Alias = AAQI.AAR.alias(MemoryLocation(V1Srcs[0], PNSize),
1516 MemoryLocation(V2, V2Size), AAQI);
1518 // Early exit if the check of the first PHI source against V2 is MayAlias.
1519 // Other results are not possible.
1520 if (Alias == AliasResult::MayAlias)
1521 return AliasResult::MayAlias;
1522 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1523 // remain valid to all elements and needs to conservatively return MayAlias.
1524 if (isRecursive && Alias != AliasResult::NoAlias)
1525 return AliasResult::MayAlias;
1527 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1528 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1529 for (unsigned i = 1, e = V1Srcs.size(); i != e; ++i) {
1530 Value *V = V1Srcs[i];
1532 AliasResult ThisAlias = AAQI.AAR.alias(
1533 MemoryLocation(V, PNSize), MemoryLocation(V2, V2Size), AAQI);
1534 Alias = MergeAliasResults(ThisAlias, Alias);
1535 if (Alias == AliasResult::MayAlias)
1536 break;
1539 return Alias;
1542 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1543 /// array references.
1544 AliasResult BasicAAResult::aliasCheck(const Value *V1, LocationSize V1Size,
1545 const Value *V2, LocationSize V2Size,
1546 AAQueryInfo &AAQI,
1547 const Instruction *CtxI) {
1548 // If either of the memory references is empty, it doesn't matter what the
1549 // pointer values are.
1550 if (V1Size.isZero() || V2Size.isZero())
1551 return AliasResult::NoAlias;
1553 // Strip off any casts if they exist.
1554 V1 = V1->stripPointerCastsForAliasAnalysis();
1555 V2 = V2->stripPointerCastsForAliasAnalysis();
1557 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1558 // value for undef that aliases nothing in the program.
1559 if (isa<UndefValue>(V1) || isa<UndefValue>(V2))
1560 return AliasResult::NoAlias;
1562 // Are we checking for alias of the same value?
1563 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1564 // different iterations. We must therefore make sure that this is not the
1565 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1566 // happen by looking at the visited phi nodes and making sure they cannot
1567 // reach the value.
1568 if (isValueEqualInPotentialCycles(V1, V2, AAQI))
1569 return AliasResult::MustAlias;
1571 if (!V1->getType()->isPointerTy() || !V2->getType()->isPointerTy())
1572 return AliasResult::NoAlias; // Scalars cannot alias each other
1574 // Figure out what objects these things are pointing to if we can.
1575 const Value *O1 = getUnderlyingObject(V1, MaxLookupSearchDepth);
1576 const Value *O2 = getUnderlyingObject(V2, MaxLookupSearchDepth);
1578 // Null values in the default address space don't point to any object, so they
1579 // don't alias any other pointer.
1580 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O1))
1581 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1582 return AliasResult::NoAlias;
1583 if (const ConstantPointerNull *CPN = dyn_cast<ConstantPointerNull>(O2))
1584 if (!NullPointerIsDefined(&F, CPN->getType()->getAddressSpace()))
1585 return AliasResult::NoAlias;
1587 if (O1 != O2) {
1588 // If V1/V2 point to two different objects, we know that we have no alias.
1589 if (isIdentifiedObject(O1) && isIdentifiedObject(O2))
1590 return AliasResult::NoAlias;
1592 // Function arguments can't alias with things that are known to be
1593 // unambigously identified at the function level.
1594 if ((isa<Argument>(O1) && isIdentifiedFunctionLocal(O2)) ||
1595 (isa<Argument>(O2) && isIdentifiedFunctionLocal(O1)))
1596 return AliasResult::NoAlias;
1598 // If one pointer is the result of a call/invoke or load and the other is a
1599 // non-escaping local object within the same function, then we know the
1600 // object couldn't escape to a point where the call could return it.
1602 // Note that if the pointers are in different functions, there are a
1603 // variety of complications. A call with a nocapture argument may still
1604 // temporary store the nocapture argument's value in a temporary memory
1605 // location if that memory location doesn't escape. Or it may pass a
1606 // nocapture value to other functions as long as they don't capture it.
1607 if (isEscapeSource(O1) && AAQI.CA->isNotCapturedBefore(
1608 O2, dyn_cast<Instruction>(O1), /*OrAt*/ true))
1609 return AliasResult::NoAlias;
1610 if (isEscapeSource(O2) && AAQI.CA->isNotCapturedBefore(
1611 O1, dyn_cast<Instruction>(O2), /*OrAt*/ true))
1612 return AliasResult::NoAlias;
1615 // If the size of one access is larger than the entire object on the other
1616 // side, then we know such behavior is undefined and can assume no alias.
1617 bool NullIsValidLocation = NullPointerIsDefined(&F);
1618 if ((isObjectSmallerThan(
1619 O2, getMinimalExtentFrom(*V1, V1Size, DL, NullIsValidLocation), DL,
1620 TLI, NullIsValidLocation)) ||
1621 (isObjectSmallerThan(
1622 O1, getMinimalExtentFrom(*V2, V2Size, DL, NullIsValidLocation), DL,
1623 TLI, NullIsValidLocation)))
1624 return AliasResult::NoAlias;
1626 if (EnableSeparateStorageAnalysis) {
1627 for (AssumptionCache::ResultElem &Elem : AC.assumptionsFor(O1)) {
1628 if (!Elem || Elem.Index == AssumptionCache::ExprResultIdx)
1629 continue;
1631 AssumeInst *Assume = cast<AssumeInst>(Elem);
1632 OperandBundleUse OBU = Assume->getOperandBundleAt(Elem.Index);
1633 if (OBU.getTagName() == "separate_storage") {
1634 assert(OBU.Inputs.size() == 2);
1635 const Value *Hint1 = OBU.Inputs[0].get();
1636 const Value *Hint2 = OBU.Inputs[1].get();
1637 // This is often a no-op; instcombine rewrites this for us. No-op
1638 // getUnderlyingObject calls are fast, though.
1639 const Value *HintO1 = getUnderlyingObject(Hint1);
1640 const Value *HintO2 = getUnderlyingObject(Hint2);
1642 DominatorTree *DT = getDT(AAQI);
1643 auto ValidAssumeForPtrContext = [&](const Value *Ptr) {
1644 if (const Instruction *PtrI = dyn_cast<Instruction>(Ptr)) {
1645 return isValidAssumeForContext(Assume, PtrI, DT,
1646 /* AllowEphemerals */ true);
1648 if (const Argument *PtrA = dyn_cast<Argument>(Ptr)) {
1649 const Instruction *FirstI =
1650 &*PtrA->getParent()->getEntryBlock().begin();
1651 return isValidAssumeForContext(Assume, FirstI, DT,
1652 /* AllowEphemerals */ true);
1654 return false;
1657 if ((O1 == HintO1 && O2 == HintO2) || (O1 == HintO2 && O2 == HintO1)) {
1658 // Note that we go back to V1 and V2 for the
1659 // ValidAssumeForPtrContext checks; they're dominated by O1 and O2,
1660 // so strictly more assumptions are valid for them.
1661 if ((CtxI && isValidAssumeForContext(Assume, CtxI, DT,
1662 /* AllowEphemerals */ true)) ||
1663 ValidAssumeForPtrContext(V1) || ValidAssumeForPtrContext(V2)) {
1664 return AliasResult::NoAlias;
1671 // If one the accesses may be before the accessed pointer, canonicalize this
1672 // by using unknown after-pointer sizes for both accesses. This is
1673 // equivalent, because regardless of which pointer is lower, one of them
1674 // will always came after the other, as long as the underlying objects aren't
1675 // disjoint. We do this so that the rest of BasicAA does not have to deal
1676 // with accesses before the base pointer, and to improve cache utilization by
1677 // merging equivalent states.
1678 if (V1Size.mayBeBeforePointer() || V2Size.mayBeBeforePointer()) {
1679 V1Size = LocationSize::afterPointer();
1680 V2Size = LocationSize::afterPointer();
1683 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1684 // for recursive queries. For this reason, this limit is chosen to be large
1685 // enough to be very rarely hit, while still being small enough to avoid
1686 // stack overflows.
1687 if (AAQI.Depth >= 512)
1688 return AliasResult::MayAlias;
1690 // Check the cache before climbing up use-def chains. This also terminates
1691 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1692 // cache key, because some cases where MayBeCrossIteration==false returns
1693 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1694 AAQueryInfo::LocPair Locs({V1, V1Size, AAQI.MayBeCrossIteration},
1695 {V2, V2Size, AAQI.MayBeCrossIteration});
1696 const bool Swapped = V1 > V2;
1697 if (Swapped)
1698 std::swap(Locs.first, Locs.second);
1699 const auto &Pair = AAQI.AliasCache.try_emplace(
1700 Locs, AAQueryInfo::CacheEntry{AliasResult::NoAlias, 0});
1701 if (!Pair.second) {
1702 auto &Entry = Pair.first->second;
1703 if (!Entry.isDefinitive()) {
1704 // Remember that we used an assumption. This may either be a direct use
1705 // of an assumption, or a use of an entry that may itself be based on an
1706 // assumption.
1707 ++AAQI.NumAssumptionUses;
1708 if (Entry.isAssumption())
1709 ++Entry.NumAssumptionUses;
1711 // Cache contains sorted {V1,V2} pairs but we should return original order.
1712 auto Result = Entry.Result;
1713 Result.swap(Swapped);
1714 return Result;
1717 int OrigNumAssumptionUses = AAQI.NumAssumptionUses;
1718 unsigned OrigNumAssumptionBasedResults = AAQI.AssumptionBasedResults.size();
1719 AliasResult Result =
1720 aliasCheckRecursive(V1, V1Size, V2, V2Size, AAQI, O1, O2);
1722 auto It = AAQI.AliasCache.find(Locs);
1723 assert(It != AAQI.AliasCache.end() && "Must be in cache");
1724 auto &Entry = It->second;
1726 // Check whether a NoAlias assumption has been used, but disproven.
1727 bool AssumptionDisproven =
1728 Entry.NumAssumptionUses > 0 && Result != AliasResult::NoAlias;
1729 if (AssumptionDisproven)
1730 Result = AliasResult::MayAlias;
1732 // This is a definitive result now, when considered as a root query.
1733 AAQI.NumAssumptionUses -= Entry.NumAssumptionUses;
1734 Entry.Result = Result;
1735 // Cache contains sorted {V1,V2} pairs.
1736 Entry.Result.swap(Swapped);
1738 // If the assumption has been disproven, remove any results that may have
1739 // been based on this assumption. Do this after the Entry updates above to
1740 // avoid iterator invalidation.
1741 if (AssumptionDisproven)
1742 while (AAQI.AssumptionBasedResults.size() > OrigNumAssumptionBasedResults)
1743 AAQI.AliasCache.erase(AAQI.AssumptionBasedResults.pop_back_val());
1745 // The result may still be based on assumptions higher up in the chain.
1746 // Remember it, so it can be purged from the cache later.
1747 if (OrigNumAssumptionUses != AAQI.NumAssumptionUses &&
1748 Result != AliasResult::MayAlias) {
1749 AAQI.AssumptionBasedResults.push_back(Locs);
1750 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::AssumptionBased;
1751 } else {
1752 Entry.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1755 // Depth is incremented before this function is called, so Depth==1 indicates
1756 // a root query.
1757 if (AAQI.Depth == 1) {
1758 // Any remaining assumption based results must be based on proven
1759 // assumptions, so convert them to definitive results.
1760 for (const auto &Loc : AAQI.AssumptionBasedResults) {
1761 auto It = AAQI.AliasCache.find(Loc);
1762 if (It != AAQI.AliasCache.end())
1763 It->second.NumAssumptionUses = AAQueryInfo::CacheEntry::Definitive;
1765 AAQI.AssumptionBasedResults.clear();
1766 AAQI.NumAssumptionUses = 0;
1768 return Result;
1771 AliasResult BasicAAResult::aliasCheckRecursive(
1772 const Value *V1, LocationSize V1Size,
1773 const Value *V2, LocationSize V2Size,
1774 AAQueryInfo &AAQI, const Value *O1, const Value *O2) {
1775 if (const GEPOperator *GV1 = dyn_cast<GEPOperator>(V1)) {
1776 AliasResult Result = aliasGEP(GV1, V1Size, V2, V2Size, O1, O2, AAQI);
1777 if (Result != AliasResult::MayAlias)
1778 return Result;
1779 } else if (const GEPOperator *GV2 = dyn_cast<GEPOperator>(V2)) {
1780 AliasResult Result = aliasGEP(GV2, V2Size, V1, V1Size, O2, O1, AAQI);
1781 Result.swap();
1782 if (Result != AliasResult::MayAlias)
1783 return Result;
1786 if (const PHINode *PN = dyn_cast<PHINode>(V1)) {
1787 AliasResult Result = aliasPHI(PN, V1Size, V2, V2Size, AAQI);
1788 if (Result != AliasResult::MayAlias)
1789 return Result;
1790 } else if (const PHINode *PN = dyn_cast<PHINode>(V2)) {
1791 AliasResult Result = aliasPHI(PN, V2Size, V1, V1Size, AAQI);
1792 Result.swap();
1793 if (Result != AliasResult::MayAlias)
1794 return Result;
1797 if (const SelectInst *S1 = dyn_cast<SelectInst>(V1)) {
1798 AliasResult Result = aliasSelect(S1, V1Size, V2, V2Size, AAQI);
1799 if (Result != AliasResult::MayAlias)
1800 return Result;
1801 } else if (const SelectInst *S2 = dyn_cast<SelectInst>(V2)) {
1802 AliasResult Result = aliasSelect(S2, V2Size, V1, V1Size, AAQI);
1803 Result.swap();
1804 if (Result != AliasResult::MayAlias)
1805 return Result;
1808 // If both pointers are pointing into the same object and one of them
1809 // accesses the entire object, then the accesses must overlap in some way.
1810 if (O1 == O2) {
1811 bool NullIsValidLocation = NullPointerIsDefined(&F);
1812 if (V1Size.isPrecise() && V2Size.isPrecise() &&
1813 (isObjectSize(O1, V1Size.getValue(), DL, TLI, NullIsValidLocation) ||
1814 isObjectSize(O2, V2Size.getValue(), DL, TLI, NullIsValidLocation)))
1815 return AliasResult::PartialAlias;
1818 return AliasResult::MayAlias;
1821 /// Check whether two Values can be considered equivalent.
1823 /// If the values may come from different cycle iterations, this will also
1824 /// check that the values are not part of cycle. We have to do this because we
1825 /// are looking through phi nodes, that is we say
1826 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1827 bool BasicAAResult::isValueEqualInPotentialCycles(const Value *V,
1828 const Value *V2,
1829 const AAQueryInfo &AAQI) {
1830 if (V != V2)
1831 return false;
1833 if (!AAQI.MayBeCrossIteration)
1834 return true;
1836 // Non-instructions and instructions in the entry block cannot be part of
1837 // a loop.
1838 const Instruction *Inst = dyn_cast<Instruction>(V);
1839 if (!Inst || Inst->getParent()->isEntryBlock())
1840 return true;
1842 return isNotInCycle(Inst, getDT(AAQI), /*LI*/ nullptr);
1845 /// Computes the symbolic difference between two de-composed GEPs.
1846 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP &DestGEP,
1847 const DecomposedGEP &SrcGEP,
1848 const AAQueryInfo &AAQI) {
1849 // Drop nuw flag from GEP if subtraction of constant offsets overflows in an
1850 // unsigned sense.
1851 if (DestGEP.Offset.ult(SrcGEP.Offset))
1852 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1854 DestGEP.Offset -= SrcGEP.Offset;
1855 for (const VariableGEPIndex &Src : SrcGEP.VarIndices) {
1856 // Find V in Dest. This is N^2, but pointer indices almost never have more
1857 // than a few variable indexes.
1858 bool Found = false;
1859 for (auto I : enumerate(DestGEP.VarIndices)) {
1860 VariableGEPIndex &Dest = I.value();
1861 if ((!isValueEqualInPotentialCycles(Dest.Val.V, Src.Val.V, AAQI) &&
1862 !areBothVScale(Dest.Val.V, Src.Val.V)) ||
1863 !Dest.Val.hasSameCastsAs(Src.Val))
1864 continue;
1866 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1867 if (Dest.IsNegated) {
1868 Dest.Scale = -Dest.Scale;
1869 Dest.IsNegated = false;
1870 Dest.IsNSW = false;
1873 // If we found it, subtract off Scale V's from the entry in Dest. If it
1874 // goes to zero, remove the entry.
1875 if (Dest.Scale != Src.Scale) {
1876 // Drop nuw flag from GEP if subtraction of V's Scale overflows in an
1877 // unsigned sense.
1878 if (Dest.Scale.ult(Src.Scale))
1879 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1881 Dest.Scale -= Src.Scale;
1882 Dest.IsNSW = false;
1883 } else {
1884 DestGEP.VarIndices.erase(DestGEP.VarIndices.begin() + I.index());
1886 Found = true;
1887 break;
1890 // If we didn't consume this entry, add it to the end of the Dest list.
1891 if (!Found) {
1892 VariableGEPIndex Entry = {Src.Val, Src.Scale, Src.CxtI, Src.IsNSW,
1893 /* IsNegated */ true};
1894 DestGEP.VarIndices.push_back(Entry);
1896 // Drop nuw flag when we have unconsumed variable indices from SrcGEP.
1897 DestGEP.NWFlags = DestGEP.NWFlags.withoutNoUnsignedWrap();
1902 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP &GEP,
1903 LocationSize MaybeV1Size,
1904 LocationSize MaybeV2Size,
1905 AssumptionCache *AC,
1906 DominatorTree *DT,
1907 const AAQueryInfo &AAQI) {
1908 if (GEP.VarIndices.size() != 2 || !MaybeV1Size.hasValue() ||
1909 !MaybeV2Size.hasValue())
1910 return false;
1912 const uint64_t V1Size = MaybeV1Size.getValue();
1913 const uint64_t V2Size = MaybeV2Size.getValue();
1915 const VariableGEPIndex &Var0 = GEP.VarIndices[0], &Var1 = GEP.VarIndices[1];
1917 if (Var0.Val.TruncBits != 0 || !Var0.Val.hasSameCastsAs(Var1.Val) ||
1918 !Var0.hasNegatedScaleOf(Var1) ||
1919 Var0.Val.V->getType() != Var1.Val.V->getType())
1920 return false;
1922 // We'll strip off the Extensions of Var0 and Var1 and do another round
1923 // of GetLinearExpression decomposition. In the example above, if Var0
1924 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1926 LinearExpression E0 =
1927 GetLinearExpression(CastedValue(Var0.Val.V), DL, 0, AC, DT);
1928 LinearExpression E1 =
1929 GetLinearExpression(CastedValue(Var1.Val.V), DL, 0, AC, DT);
1930 if (E0.Scale != E1.Scale || !E0.Val.hasSameCastsAs(E1.Val) ||
1931 !isValueEqualInPotentialCycles(E0.Val.V, E1.Val.V, AAQI))
1932 return false;
1934 // We have a hit - Var0 and Var1 only differ by a constant offset!
1936 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1937 // Var1 is possible to calculate, but we're just interested in the absolute
1938 // minimum difference between the two. The minimum distance may occur due to
1939 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1940 // the minimum distance between %i and %i + 5 is 3.
1941 APInt MinDiff = E0.Offset - E1.Offset, Wrapped = -MinDiff;
1942 MinDiff = APIntOps::umin(MinDiff, Wrapped);
1943 APInt MinDiffBytes =
1944 MinDiff.zextOrTrunc(Var0.Scale.getBitWidth()) * Var0.Scale.abs();
1946 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1947 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1948 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1949 // V2Size can fit in the MinDiffBytes gap.
1950 return MinDiffBytes.uge(V1Size + GEP.Offset.abs()) &&
1951 MinDiffBytes.uge(V2Size + GEP.Offset.abs());
1954 //===----------------------------------------------------------------------===//
1955 // BasicAliasAnalysis Pass
1956 //===----------------------------------------------------------------------===//
1958 AnalysisKey BasicAA::Key;
1960 BasicAAResult BasicAA::run(Function &F, FunctionAnalysisManager &AM) {
1961 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
1962 auto &AC = AM.getResult<AssumptionAnalysis>(F);
1963 auto *DT = &AM.getResult<DominatorTreeAnalysis>(F);
1964 return BasicAAResult(F.getDataLayout(), F, TLI, AC, DT);
1967 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID) {
1968 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1971 char BasicAAWrapperPass::ID = 0;
1973 void BasicAAWrapperPass::anchor() {}
1975 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass, "basic-aa",
1976 "Basic Alias Analysis (stateless AA impl)", true, true)
1977 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
1978 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
1979 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
1980 INITIALIZE_PASS_END(BasicAAWrapperPass, "basic-aa",
1981 "Basic Alias Analysis (stateless AA impl)", true, true)
1983 FunctionPass *llvm::createBasicAAWrapperPass() {
1984 return new BasicAAWrapperPass();
1987 bool BasicAAWrapperPass::runOnFunction(Function &F) {
1988 auto &ACT = getAnalysis<AssumptionCacheTracker>();
1989 auto &TLIWP = getAnalysis<TargetLibraryInfoWrapperPass>();
1990 auto &DTWP = getAnalysis<DominatorTreeWrapperPass>();
1992 Result.reset(new BasicAAResult(F.getDataLayout(), F,
1993 TLIWP.getTLI(F), ACT.getAssumptionCache(F),
1994 &DTWP.getDomTree()));
1996 return false;
1999 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2000 AU.setPreservesAll();
2001 AU.addRequiredTransitive<AssumptionCacheTracker>();
2002 AU.addRequiredTransitive<DominatorTreeWrapperPass>();
2003 AU.addRequiredTransitive<TargetLibraryInfoWrapperPass>();