1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/SaveAndRestore.h"
63 #define DEBUG_TYPE "basicaa"
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt
<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden
,
71 static cl::opt
<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden
, cl::init(false));
74 /// SearchLimitReached / SearchTimes shows how often the limit of
75 /// to decompose GEPs is reached. It will affect the precision
76 /// of basic alias analysis.
77 STATISTIC(SearchLimitReached
, "Number of times the limit to "
78 "decompose GEPs is reached");
79 STATISTIC(SearchTimes
, "Number of times a GEP is decomposed");
81 // The max limit of the search depth in DecomposeGEPExpression() and
82 // getUnderlyingObject().
83 static const unsigned MaxLookupSearchDepth
= 6;
85 bool BasicAAResult::invalidate(Function
&Fn
, const PreservedAnalyses
&PA
,
86 FunctionAnalysisManager::Invalidator
&Inv
) {
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
91 if (Inv
.invalidate
<AssumptionAnalysis
>(Fn
, PA
) ||
92 (DT
&& Inv
.invalidate
<DominatorTreeAnalysis
>(Fn
, PA
)))
95 // Otherwise this analysis result remains valid.
99 //===----------------------------------------------------------------------===//
101 //===----------------------------------------------------------------------===//
103 /// Returns the size of the object specified by V or UnknownSize if unknown.
104 static uint64_t getObjectSize(const Value
*V
, const DataLayout
&DL
,
105 const TargetLibraryInfo
&TLI
,
107 bool RoundToAlign
= false) {
110 Opts
.RoundToAlign
= RoundToAlign
;
111 Opts
.NullIsUnknownSize
= NullIsValidLoc
;
112 if (getObjectSize(V
, Size
, DL
, &TLI
, Opts
))
114 return MemoryLocation::UnknownSize
;
117 /// Returns true if we can prove that the object specified by V is smaller than
119 static bool isObjectSmallerThan(const Value
*V
, uint64_t Size
,
120 const DataLayout
&DL
,
121 const TargetLibraryInfo
&TLI
,
122 bool NullIsValidLoc
) {
123 // Note that the meanings of the "object" are slightly different in the
124 // following contexts:
125 // c1: llvm::getObjectSize()
126 // c2: llvm.objectsize() intrinsic
127 // c3: isObjectSmallerThan()
128 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
129 // refers to the "entire object".
131 // Consider this example:
132 // char *p = (char*)malloc(100)
135 // In the context of c1 and c2, the "object" pointed by q refers to the
136 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
138 // However, in the context of c3, the "object" refers to the chunk of memory
139 // being allocated. So, the "object" has 100 bytes, and q points to the middle
140 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
141 // parameter, before the llvm::getObjectSize() is called to get the size of
142 // entire object, we should:
143 // - either rewind the pointer q to the base-address of the object in
144 // question (in this case rewind to p), or
145 // - just give up. It is up to caller to make sure the pointer is pointing
146 // to the base address the object.
148 // We go for 2nd option for simplicity.
149 if (!isIdentifiedObject(V
))
152 // This function needs to use the aligned object size because we allow
153 // reads a bit past the end given sufficient alignment.
154 uint64_t ObjectSize
= getObjectSize(V
, DL
, TLI
, NullIsValidLoc
,
155 /*RoundToAlign*/ true);
157 return ObjectSize
!= MemoryLocation::UnknownSize
&& ObjectSize
< Size
;
160 /// Return the minimal extent from \p V to the end of the underlying object,
161 /// assuming the result is used in an aliasing query. E.g., we do use the query
162 /// location size and the fact that null pointers cannot alias here.
163 static uint64_t getMinimalExtentFrom(const Value
&V
,
164 const LocationSize
&LocSize
,
165 const DataLayout
&DL
,
166 bool NullIsValidLoc
) {
167 // If we have dereferenceability information we know a lower bound for the
168 // extent as accesses for a lower offset would be valid. We need to exclude
169 // the "or null" part if null is a valid pointer. We can ignore frees, as an
170 // access after free would be undefined behavior.
171 bool CanBeNull
, CanBeFreed
;
172 uint64_t DerefBytes
=
173 V
.getPointerDereferenceableBytes(DL
, CanBeNull
, CanBeFreed
);
174 DerefBytes
= (CanBeNull
&& NullIsValidLoc
) ? 0 : DerefBytes
;
175 // If queried with a precise location size, we assume that location size to be
176 // accessed, thus valid.
177 if (LocSize
.isPrecise())
178 DerefBytes
= std::max(DerefBytes
, LocSize
.getValue());
182 /// Returns true if we can prove that the object specified by V has size Size.
183 static bool isObjectSize(const Value
*V
, uint64_t Size
, const DataLayout
&DL
,
184 const TargetLibraryInfo
&TLI
, bool NullIsValidLoc
) {
185 uint64_t ObjectSize
= getObjectSize(V
, DL
, TLI
, NullIsValidLoc
);
186 return ObjectSize
!= MemoryLocation::UnknownSize
&& ObjectSize
== Size
;
189 //===----------------------------------------------------------------------===//
190 // CaptureInfo implementations
191 //===----------------------------------------------------------------------===//
193 CaptureInfo::~CaptureInfo() = default;
195 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value
*Object
,
196 const Instruction
*I
) {
197 return isNonEscapingLocalObject(Object
, &IsCapturedCache
);
200 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value
*Object
,
201 const Instruction
*I
) {
202 if (!isIdentifiedFunctionLocal(Object
))
205 auto Iter
= EarliestEscapes
.insert({Object
, nullptr});
207 Instruction
*EarliestCapture
= FindEarliestCapture(
208 Object
, *const_cast<Function
*>(I
->getFunction()),
209 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT
, EphValues
);
210 if (EarliestCapture
) {
211 auto Ins
= Inst2Obj
.insert({EarliestCapture
, {}});
212 Ins
.first
->second
.push_back(Object
);
214 Iter
.first
->second
= EarliestCapture
;
217 // No capturing instruction.
218 if (!Iter
.first
->second
)
221 return I
!= Iter
.first
->second
&&
222 !isPotentiallyReachable(Iter
.first
->second
, I
, nullptr, &DT
, &LI
);
225 void EarliestEscapeInfo::removeInstruction(Instruction
*I
) {
226 auto Iter
= Inst2Obj
.find(I
);
227 if (Iter
!= Inst2Obj
.end()) {
228 for (const Value
*Obj
: Iter
->second
)
229 EarliestEscapes
.erase(Obj
);
234 //===----------------------------------------------------------------------===//
235 // GetElementPtr Instruction Decomposition and Analysis
236 //===----------------------------------------------------------------------===//
239 /// Represents zext(sext(trunc(V))).
242 unsigned ZExtBits
= 0;
243 unsigned SExtBits
= 0;
244 unsigned TruncBits
= 0;
246 explicit CastedValue(const Value
*V
) : V(V
) {}
247 explicit CastedValue(const Value
*V
, unsigned ZExtBits
, unsigned SExtBits
,
249 : V(V
), ZExtBits(ZExtBits
), SExtBits(SExtBits
), TruncBits(TruncBits
) {}
251 unsigned getBitWidth() const {
252 return V
->getType()->getPrimitiveSizeInBits() - TruncBits
+ ZExtBits
+
256 CastedValue
withValue(const Value
*NewV
) const {
257 return CastedValue(NewV
, ZExtBits
, SExtBits
, TruncBits
);
260 /// Replace V with zext(NewV)
261 CastedValue
withZExtOfValue(const Value
*NewV
) const {
262 unsigned ExtendBy
= V
->getType()->getPrimitiveSizeInBits() -
263 NewV
->getType()->getPrimitiveSizeInBits();
264 if (ExtendBy
<= TruncBits
)
265 return CastedValue(NewV
, ZExtBits
, SExtBits
, TruncBits
- ExtendBy
);
267 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
268 ExtendBy
-= TruncBits
;
269 return CastedValue(NewV
, ZExtBits
+ SExtBits
+ ExtendBy
, 0, 0);
272 /// Replace V with sext(NewV)
273 CastedValue
withSExtOfValue(const Value
*NewV
) const {
274 unsigned ExtendBy
= V
->getType()->getPrimitiveSizeInBits() -
275 NewV
->getType()->getPrimitiveSizeInBits();
276 if (ExtendBy
<= TruncBits
)
277 return CastedValue(NewV
, ZExtBits
, SExtBits
, TruncBits
- ExtendBy
);
279 // zext(sext(sext(NewV)))
280 ExtendBy
-= TruncBits
;
281 return CastedValue(NewV
, ZExtBits
, SExtBits
+ ExtendBy
, 0);
284 APInt
evaluateWith(APInt N
) const {
285 assert(N
.getBitWidth() == V
->getType()->getPrimitiveSizeInBits() &&
286 "Incompatible bit width");
287 if (TruncBits
) N
= N
.trunc(N
.getBitWidth() - TruncBits
);
288 if (SExtBits
) N
= N
.sext(N
.getBitWidth() + SExtBits
);
289 if (ZExtBits
) N
= N
.zext(N
.getBitWidth() + ZExtBits
);
293 ConstantRange
evaluateWith(ConstantRange N
) const {
294 assert(N
.getBitWidth() == V
->getType()->getPrimitiveSizeInBits() &&
295 "Incompatible bit width");
296 if (TruncBits
) N
= N
.truncate(N
.getBitWidth() - TruncBits
);
297 if (SExtBits
) N
= N
.signExtend(N
.getBitWidth() + SExtBits
);
298 if (ZExtBits
) N
= N
.zeroExtend(N
.getBitWidth() + ZExtBits
);
302 bool canDistributeOver(bool NUW
, bool NSW
) const {
303 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
304 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
305 // trunc(x op y) == trunc(x) op trunc(y)
306 return (!ZExtBits
|| NUW
) && (!SExtBits
|| NSW
);
309 bool hasSameCastsAs(const CastedValue
&Other
) const {
310 return ZExtBits
== Other
.ZExtBits
&& SExtBits
== Other
.SExtBits
&&
311 TruncBits
== Other
.TruncBits
;
315 /// Represents zext(sext(trunc(V))) * Scale + Offset.
316 struct LinearExpression
{
321 /// True if all operations in this expression are NSW.
324 LinearExpression(const CastedValue
&Val
, const APInt
&Scale
,
325 const APInt
&Offset
, bool IsNSW
)
326 : Val(Val
), Scale(Scale
), Offset(Offset
), IsNSW(IsNSW
) {}
328 LinearExpression(const CastedValue
&Val
) : Val(Val
), IsNSW(true) {
329 unsigned BitWidth
= Val
.getBitWidth();
330 Scale
= APInt(BitWidth
, 1);
331 Offset
= APInt(BitWidth
, 0);
334 LinearExpression
mul(const APInt
&Other
, bool MulIsNSW
) const {
335 // The check for zero offset is necessary, because generally
336 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
337 bool NSW
= IsNSW
&& (Other
.isOne() || (MulIsNSW
&& Offset
.isZero()));
338 return LinearExpression(Val
, Scale
* Other
, Offset
* Other
, NSW
);
343 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
344 /// B are constant integers.
345 static LinearExpression
GetLinearExpression(
346 const CastedValue
&Val
, const DataLayout
&DL
, unsigned Depth
,
347 AssumptionCache
*AC
, DominatorTree
*DT
) {
348 // Limit our recursion depth.
352 if (const ConstantInt
*Const
= dyn_cast
<ConstantInt
>(Val
.V
))
353 return LinearExpression(Val
, APInt(Val
.getBitWidth(), 0),
354 Val
.evaluateWith(Const
->getValue()), true);
356 if (const BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(Val
.V
)) {
357 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(BOp
->getOperand(1))) {
358 APInt RHS
= Val
.evaluateWith(RHSC
->getValue());
359 // The only non-OBO case we deal with is or, and only limited to the
360 // case where it is both nuw and nsw.
361 bool NUW
= true, NSW
= true;
362 if (isa
<OverflowingBinaryOperator
>(BOp
)) {
363 NUW
&= BOp
->hasNoUnsignedWrap();
364 NSW
&= BOp
->hasNoSignedWrap();
366 if (!Val
.canDistributeOver(NUW
, NSW
))
369 // While we can distribute over trunc, we cannot preserve nowrap flags
374 LinearExpression
E(Val
);
375 switch (BOp
->getOpcode()) {
377 // We don't understand this instruction, so we can't decompose it any
380 case Instruction::Or
:
381 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
383 if (!MaskedValueIsZero(BOp
->getOperand(0), RHSC
->getValue(), DL
, 0, AC
,
388 case Instruction::Add
: {
389 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
395 case Instruction::Sub
: {
396 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
402 case Instruction::Mul
:
403 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
407 case Instruction::Shl
:
408 // We're trying to linearize an expression of the kind:
410 // where the shift count exceeds the bitwidth of the type.
411 // We can't decompose this further (the expression would return
413 if (RHS
.getLimitedValue() > Val
.getBitWidth())
416 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
418 E
.Offset
<<= RHS
.getLimitedValue();
419 E
.Scale
<<= RHS
.getLimitedValue();
427 if (isa
<ZExtInst
>(Val
.V
))
428 return GetLinearExpression(
429 Val
.withZExtOfValue(cast
<CastInst
>(Val
.V
)->getOperand(0)),
430 DL
, Depth
+ 1, AC
, DT
);
432 if (isa
<SExtInst
>(Val
.V
))
433 return GetLinearExpression(
434 Val
.withSExtOfValue(cast
<CastInst
>(Val
.V
)->getOperand(0)),
435 DL
, Depth
+ 1, AC
, DT
);
440 /// To ensure a pointer offset fits in an integer of size IndexSize
441 /// (in bits) when that size is smaller than the maximum index size. This is
442 /// an issue, for example, in particular for 32b pointers with negative indices
443 /// that rely on two's complement wrap-arounds for precise alias information
444 /// where the maximum index size is 64b.
445 static APInt
adjustToIndexSize(const APInt
&Offset
, unsigned IndexSize
) {
446 assert(IndexSize
<= Offset
.getBitWidth() && "Invalid IndexSize!");
447 unsigned ShiftBits
= Offset
.getBitWidth() - IndexSize
;
448 return (Offset
<< ShiftBits
).ashr(ShiftBits
);
452 // A linear transformation of a Value; this class represents
453 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
454 struct VariableGEPIndex
{
458 // Context instruction to use when querying information about this index.
459 const Instruction
*CxtI
;
461 /// True if all operations in this expression are NSW.
468 void print(raw_ostream
&OS
) const {
469 OS
<< "(V=" << Val
.V
->getName()
470 << ", zextbits=" << Val
.ZExtBits
471 << ", sextbits=" << Val
.SExtBits
472 << ", truncbits=" << Val
.TruncBits
473 << ", scale=" << Scale
<< ")";
478 // Represents the internal structure of a GEP, decomposed into a base pointer,
479 // constant offsets, and variable scaled indices.
480 struct BasicAAResult::DecomposedGEP
{
481 // Base pointer of the GEP
483 // Total constant offset from base.
485 // Scaled variable (non-constant) indices.
486 SmallVector
<VariableGEPIndex
, 4> VarIndices
;
487 // Are all operations inbounds GEPs or non-indexing operations?
488 // (std::nullopt iff expression doesn't involve any geps)
489 std::optional
<bool> InBounds
;
495 void print(raw_ostream
&OS
) const {
496 OS
<< "(DecomposedGEP Base=" << Base
->getName()
497 << ", Offset=" << Offset
499 for (size_t i
= 0; i
< VarIndices
.size(); i
++) {
502 VarIndices
[i
].print(OS
);
509 /// If V is a symbolic pointer expression, decompose it into a base pointer
510 /// with a constant offset and a number of scaled symbolic offsets.
512 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
513 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
514 /// specified amount, but which may have other unrepresented high bits. As
515 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
516 BasicAAResult::DecomposedGEP
517 BasicAAResult::DecomposeGEPExpression(const Value
*V
, const DataLayout
&DL
,
518 AssumptionCache
*AC
, DominatorTree
*DT
) {
519 // Limit recursion depth to limit compile time in crazy cases.
520 unsigned MaxLookup
= MaxLookupSearchDepth
;
522 const Instruction
*CxtI
= dyn_cast
<Instruction
>(V
);
524 unsigned MaxIndexSize
= DL
.getMaxIndexSizeInBits();
525 DecomposedGEP Decomposed
;
526 Decomposed
.Offset
= APInt(MaxIndexSize
, 0);
528 // See if this is a bitcast or GEP.
529 const Operator
*Op
= dyn_cast
<Operator
>(V
);
531 // The only non-operator case we can handle are GlobalAliases.
532 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
533 if (!GA
->isInterposable()) {
534 V
= GA
->getAliasee();
542 if (Op
->getOpcode() == Instruction::BitCast
||
543 Op
->getOpcode() == Instruction::AddrSpaceCast
) {
544 V
= Op
->getOperand(0);
548 const GEPOperator
*GEPOp
= dyn_cast
<GEPOperator
>(Op
);
550 if (const auto *PHI
= dyn_cast
<PHINode
>(V
)) {
551 // Look through single-arg phi nodes created by LCSSA.
552 if (PHI
->getNumIncomingValues() == 1) {
553 V
= PHI
->getIncomingValue(0);
556 } else if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
557 // CaptureTracking can know about special capturing properties of some
558 // intrinsics like launder.invariant.group, that can't be expressed with
559 // the attributes, but have properties like returning aliasing pointer.
560 // Because some analysis may assume that nocaptured pointer is not
561 // returned from some special intrinsic (because function would have to
562 // be marked with returns attribute), it is crucial to use this function
563 // because it should be in sync with CaptureTracking. Not using it may
564 // cause weird miscompilations where 2 aliasing pointers are assumed to
566 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, false)) {
576 // Track whether we've seen at least one in bounds gep, and if so, whether
577 // all geps parsed were in bounds.
578 if (Decomposed
.InBounds
== std::nullopt
)
579 Decomposed
.InBounds
= GEPOp
->isInBounds();
580 else if (!GEPOp
->isInBounds())
581 Decomposed
.InBounds
= false;
583 assert(GEPOp
->getSourceElementType()->isSized() && "GEP must be sized");
585 unsigned AS
= GEPOp
->getPointerAddressSpace();
586 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
587 gep_type_iterator GTI
= gep_type_begin(GEPOp
);
588 unsigned IndexSize
= DL
.getIndexSizeInBits(AS
);
589 // Assume all GEP operands are constants until proven otherwise.
590 bool GepHasConstantOffset
= true;
591 for (User::const_op_iterator I
= GEPOp
->op_begin() + 1, E
= GEPOp
->op_end();
592 I
!= E
; ++I
, ++GTI
) {
593 const Value
*Index
= *I
;
594 // Compute the (potentially symbolic) offset in bytes for this index.
595 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
596 // For a struct, add the member offset.
597 unsigned FieldNo
= cast
<ConstantInt
>(Index
)->getZExtValue();
601 Decomposed
.Offset
+= DL
.getStructLayout(STy
)->getElementOffset(FieldNo
);
605 // For an array/pointer, add the element offset, explicitly scaled.
606 if (const ConstantInt
*CIdx
= dyn_cast
<ConstantInt
>(Index
)) {
610 // Don't attempt to analyze GEPs if the scalable index is not zero.
611 TypeSize AllocTypeSize
= DL
.getTypeAllocSize(GTI
.getIndexedType());
612 if (AllocTypeSize
.isScalable()) {
617 Decomposed
.Offset
+= AllocTypeSize
.getFixedValue() *
618 CIdx
->getValue().sextOrTrunc(MaxIndexSize
);
622 TypeSize AllocTypeSize
= DL
.getTypeAllocSize(GTI
.getIndexedType());
623 if (AllocTypeSize
.isScalable()) {
628 GepHasConstantOffset
= false;
630 // If the integer type is smaller than the index size, it is implicitly
631 // sign extended or truncated to index size.
632 unsigned Width
= Index
->getType()->getIntegerBitWidth();
633 unsigned SExtBits
= IndexSize
> Width
? IndexSize
- Width
: 0;
634 unsigned TruncBits
= IndexSize
< Width
? Width
- IndexSize
: 0;
635 LinearExpression LE
= GetLinearExpression(
636 CastedValue(Index
, 0, SExtBits
, TruncBits
), DL
, 0, AC
, DT
);
638 // Scale by the type size.
639 unsigned TypeSize
= AllocTypeSize
.getFixedValue();
640 LE
= LE
.mul(APInt(IndexSize
, TypeSize
), GEPOp
->isInBounds());
641 Decomposed
.Offset
+= LE
.Offset
.sext(MaxIndexSize
);
642 APInt Scale
= LE
.Scale
.sext(MaxIndexSize
);
644 // If we already had an occurrence of this index variable, merge this
645 // scale into it. For example, we want to handle:
646 // A[x][x] -> x*16 + x*4 -> x*20
647 // This also ensures that 'x' only appears in the index list once.
648 for (unsigned i
= 0, e
= Decomposed
.VarIndices
.size(); i
!= e
; ++i
) {
649 if (Decomposed
.VarIndices
[i
].Val
.V
== LE
.Val
.V
&&
650 Decomposed
.VarIndices
[i
].Val
.hasSameCastsAs(LE
.Val
)) {
651 Scale
+= Decomposed
.VarIndices
[i
].Scale
;
652 Decomposed
.VarIndices
.erase(Decomposed
.VarIndices
.begin() + i
);
657 // Make sure that we have a scale that makes sense for this target's
659 Scale
= adjustToIndexSize(Scale
, IndexSize
);
662 VariableGEPIndex Entry
= {LE
.Val
, Scale
, CxtI
, LE
.IsNSW
};
663 Decomposed
.VarIndices
.push_back(Entry
);
667 // Take care of wrap-arounds
668 if (GepHasConstantOffset
)
669 Decomposed
.Offset
= adjustToIndexSize(Decomposed
.Offset
, IndexSize
);
671 // Analyze the base pointer next.
672 V
= GEPOp
->getOperand(0);
673 } while (--MaxLookup
);
675 // If the chain of expressions is too deep, just return early.
677 SearchLimitReached
++;
681 ModRefInfo
BasicAAResult::getModRefInfoMask(const MemoryLocation
&Loc
,
684 assert(Visited
.empty() && "Visited must be cleared after use!");
685 auto _
= make_scope_exit([&] { Visited
.clear(); });
687 unsigned MaxLookup
= 8;
688 SmallVector
<const Value
*, 16> Worklist
;
689 Worklist
.push_back(Loc
.Ptr
);
690 ModRefInfo Result
= ModRefInfo::NoModRef
;
693 const Value
*V
= getUnderlyingObject(Worklist
.pop_back_val());
694 if (!Visited
.insert(V
).second
)
697 // Ignore allocas if we were instructed to do so.
698 if (IgnoreLocals
&& isa
<AllocaInst
>(V
))
701 // If the location points to memory that is known to be invariant for
702 // the life of the underlying SSA value, then we can exclude Mod from
703 // the set of valid memory effects.
705 // An argument that is marked readonly and noalias is known to be
706 // invariant while that function is executing.
707 if (const Argument
*Arg
= dyn_cast
<Argument
>(V
)) {
708 if (Arg
->hasNoAliasAttr() && Arg
->onlyReadsMemory()) {
709 Result
|= ModRefInfo::Ref
;
714 // A global constant can't be mutated.
715 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
)) {
716 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
717 // global to be marked constant in some modules and non-constant in
718 // others. GV may even be a declaration, not a definition.
719 if (!GV
->isConstant())
720 return AAResultBase::getModRefInfoMask(Loc
, AAQI
, IgnoreLocals
);
724 // If both select values point to local memory, then so does the select.
725 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
726 Worklist
.push_back(SI
->getTrueValue());
727 Worklist
.push_back(SI
->getFalseValue());
731 // If all values incoming to a phi node point to local memory, then so does
733 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
734 // Don't bother inspecting phi nodes with many operands.
735 if (PN
->getNumIncomingValues() > MaxLookup
)
736 return AAResultBase::getModRefInfoMask(Loc
, AAQI
, IgnoreLocals
);
737 append_range(Worklist
, PN
->incoming_values());
741 // Otherwise be conservative.
742 return AAResultBase::getModRefInfoMask(Loc
, AAQI
, IgnoreLocals
);
743 } while (!Worklist
.empty() && --MaxLookup
);
745 // If we hit the maximum number of instructions to examine, be conservative.
746 if (!Worklist
.empty())
747 return AAResultBase::getModRefInfoMask(Loc
, AAQI
, IgnoreLocals
);
752 static bool isIntrinsicCall(const CallBase
*Call
, Intrinsic::ID IID
) {
753 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Call
);
754 return II
&& II
->getIntrinsicID() == IID
;
757 /// Returns the behavior when calling the given call site.
758 MemoryEffects
BasicAAResult::getMemoryEffects(const CallBase
*Call
,
760 MemoryEffects Min
= Call
->getAttributes().getMemoryEffects();
762 if (const Function
*F
= dyn_cast
<Function
>(Call
->getCalledOperand())) {
763 MemoryEffects FuncME
= AAQI
.AAR
.getMemoryEffects(F
);
764 // Operand bundles on the call may also read or write memory, in addition
765 // to the behavior of the called function.
766 if (Call
->hasReadingOperandBundles())
767 FuncME
|= MemoryEffects::readOnly();
768 if (Call
->hasClobberingOperandBundles())
769 FuncME
|= MemoryEffects::writeOnly();
776 /// Returns the behavior when calling the given function. For use when the call
777 /// site is not known.
778 MemoryEffects
BasicAAResult::getMemoryEffects(const Function
*F
) {
779 switch (F
->getIntrinsicID()) {
780 case Intrinsic::experimental_guard
:
781 case Intrinsic::experimental_deoptimize
:
782 // These intrinsics can read arbitrary memory, and additionally modref
783 // inaccessible memory to model control dependence.
784 return MemoryEffects::readOnly() |
785 MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef
);
788 return F
->getMemoryEffects();
791 ModRefInfo
BasicAAResult::getArgModRefInfo(const CallBase
*Call
,
793 if (Call
->paramHasAttr(ArgIdx
, Attribute::WriteOnly
))
794 return ModRefInfo::Mod
;
796 if (Call
->paramHasAttr(ArgIdx
, Attribute::ReadOnly
))
797 return ModRefInfo::Ref
;
799 if (Call
->paramHasAttr(ArgIdx
, Attribute::ReadNone
))
800 return ModRefInfo::NoModRef
;
802 return AAResultBase::getArgModRefInfo(Call
, ArgIdx
);
806 static const Function
*getParent(const Value
*V
) {
807 if (const Instruction
*inst
= dyn_cast
<Instruction
>(V
)) {
808 if (!inst
->getParent())
810 return inst
->getParent()->getParent();
813 if (const Argument
*arg
= dyn_cast
<Argument
>(V
))
814 return arg
->getParent();
819 static bool notDifferentParent(const Value
*O1
, const Value
*O2
) {
821 const Function
*F1
= getParent(O1
);
822 const Function
*F2
= getParent(O2
);
824 return !F1
|| !F2
|| F1
== F2
;
828 AliasResult
BasicAAResult::alias(const MemoryLocation
&LocA
,
829 const MemoryLocation
&LocB
, AAQueryInfo
&AAQI
,
830 const Instruction
*CtxI
) {
831 assert(notDifferentParent(LocA
.Ptr
, LocB
.Ptr
) &&
832 "BasicAliasAnalysis doesn't support interprocedural queries.");
833 return aliasCheck(LocA
.Ptr
, LocA
.Size
, LocB
.Ptr
, LocB
.Size
, AAQI
, CtxI
);
836 /// Checks to see if the specified callsite can clobber the specified memory
839 /// Since we only look at local properties of this function, we really can't
840 /// say much about this query. We do, however, use simple "address taken"
841 /// analysis on local objects.
842 ModRefInfo
BasicAAResult::getModRefInfo(const CallBase
*Call
,
843 const MemoryLocation
&Loc
,
845 assert(notDifferentParent(Call
, Loc
.Ptr
) &&
846 "AliasAnalysis query involving multiple functions!");
848 const Value
*Object
= getUnderlyingObject(Loc
.Ptr
);
850 // Calls marked 'tail' cannot read or write allocas from the current frame
851 // because the current frame might be destroyed by the time they run. However,
852 // a tail call may use an alloca with byval. Calling with byval copies the
853 // contents of the alloca into argument registers or stack slots, so there is
854 // no lifetime issue.
855 if (isa
<AllocaInst
>(Object
))
856 if (const CallInst
*CI
= dyn_cast
<CallInst
>(Call
))
857 if (CI
->isTailCall() &&
858 !CI
->getAttributes().hasAttrSomewhere(Attribute::ByVal
))
859 return ModRefInfo::NoModRef
;
861 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
862 // modify them even though the alloca is not escaped.
863 if (auto *AI
= dyn_cast
<AllocaInst
>(Object
))
864 if (!AI
->isStaticAlloca() && isIntrinsicCall(Call
, Intrinsic::stackrestore
))
865 return ModRefInfo::Mod
;
867 // If the pointer is to a locally allocated object that does not escape,
868 // then the call can not mod/ref the pointer unless the call takes the pointer
869 // as an argument, and itself doesn't capture it.
870 if (!isa
<Constant
>(Object
) && Call
!= Object
&&
871 AAQI
.CI
->isNotCapturedBeforeOrAt(Object
, Call
)) {
873 // Optimistically assume that call doesn't touch Object and check this
874 // assumption in the following loop.
875 ModRefInfo Result
= ModRefInfo::NoModRef
;
877 unsigned OperandNo
= 0;
878 for (auto CI
= Call
->data_operands_begin(), CE
= Call
->data_operands_end();
879 CI
!= CE
; ++CI
, ++OperandNo
) {
880 // Only look at the no-capture or byval pointer arguments. If this
881 // pointer were passed to arguments that were neither of these, then it
882 // couldn't be no-capture.
883 if (!(*CI
)->getType()->isPointerTy() ||
884 (!Call
->doesNotCapture(OperandNo
) && OperandNo
< Call
->arg_size() &&
885 !Call
->isByValArgument(OperandNo
)))
888 // Call doesn't access memory through this operand, so we don't care
889 // if it aliases with Object.
890 if (Call
->doesNotAccessMemory(OperandNo
))
893 // If this is a no-capture pointer argument, see if we can tell that it
894 // is impossible to alias the pointer we're checking.
896 AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(*CI
),
897 MemoryLocation::getBeforeOrAfter(Object
), AAQI
);
898 // Operand doesn't alias 'Object', continue looking for other aliases
899 if (AR
== AliasResult::NoAlias
)
901 // Operand aliases 'Object', but call doesn't modify it. Strengthen
902 // initial assumption and keep looking in case if there are more aliases.
903 if (Call
->onlyReadsMemory(OperandNo
)) {
904 Result
|= ModRefInfo::Ref
;
907 // Operand aliases 'Object' but call only writes into it.
908 if (Call
->onlyWritesMemory(OperandNo
)) {
909 Result
|= ModRefInfo::Mod
;
912 // This operand aliases 'Object' and call reads and writes into it.
913 // Setting ModRef will not yield an early return below, MustAlias is not
915 Result
= ModRefInfo::ModRef
;
919 // Early return if we improved mod ref information
920 if (!isModAndRefSet(Result
))
924 // If the call is malloc/calloc like, we can assume that it doesn't
925 // modify any IR visible value. This is only valid because we assume these
926 // routines do not read values visible in the IR. TODO: Consider special
927 // casing realloc and strdup routines which access only their arguments as
928 // well. Or alternatively, replace all of this with inaccessiblememonly once
929 // that's implemented fully.
930 if (isMallocOrCallocLikeFn(Call
, &TLI
)) {
931 // Be conservative if the accessed pointer may alias the allocation -
932 // fallback to the generic handling below.
933 if (AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(Call
), Loc
, AAQI
) ==
934 AliasResult::NoAlias
)
935 return ModRefInfo::NoModRef
;
938 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
939 // writing so that proper control dependencies are maintained but they never
940 // mod any particular memory location visible to the IR.
941 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
942 // intrinsic is now modeled as reading memory. This prevents hoisting the
943 // invariant.start intrinsic over stores. Consider:
946 // invariant_start(ptr)
950 // This cannot be transformed to:
953 // invariant_start(ptr)
958 // The transformation will cause the second store to be ignored (based on
959 // rules of invariant.start) and print 40, while the first program always
961 if (isIntrinsicCall(Call
, Intrinsic::invariant_start
))
962 return ModRefInfo::Ref
;
964 // The AAResultBase base class has some smarts, lets use them.
965 return AAResultBase::getModRefInfo(Call
, Loc
, AAQI
);
968 ModRefInfo
BasicAAResult::getModRefInfo(const CallBase
*Call1
,
969 const CallBase
*Call2
,
971 // Guard intrinsics are marked as arbitrarily writing so that proper control
972 // dependencies are maintained but they never mods any particular memory
975 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
976 // heap state at the point the guard is issued needs to be consistent in case
977 // the guard invokes the "deopt" continuation.
979 // NB! This function is *not* commutative, so we special case two
980 // possibilities for guard intrinsics.
982 if (isIntrinsicCall(Call1
, Intrinsic::experimental_guard
))
983 return isModSet(getMemoryEffects(Call2
, AAQI
).getModRef())
985 : ModRefInfo::NoModRef
;
987 if (isIntrinsicCall(Call2
, Intrinsic::experimental_guard
))
988 return isModSet(getMemoryEffects(Call1
, AAQI
).getModRef())
990 : ModRefInfo::NoModRef
;
992 // The AAResultBase base class has some smarts, lets use them.
993 return AAResultBase::getModRefInfo(Call1
, Call2
, AAQI
);
996 /// Return true if we know V to the base address of the corresponding memory
997 /// object. This implies that any address less than V must be out of bounds
998 /// for the underlying object. Note that just being isIdentifiedObject() is
999 /// not enough - For example, a negative offset from a noalias argument or call
1000 /// can be inbounds w.r.t the actual underlying object.
1001 static bool isBaseOfObject(const Value
*V
) {
1002 // TODO: We can handle other cases here
1003 // 1) For GC languages, arguments to functions are often required to be
1005 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1006 return (isa
<AllocaInst
>(V
) || isa
<GlobalVariable
>(V
));
1009 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1010 /// another pointer.
1012 /// We know that V1 is a GEP, but we don't know anything about V2.
1013 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1015 AliasResult
BasicAAResult::aliasGEP(
1016 const GEPOperator
*GEP1
, LocationSize V1Size
,
1017 const Value
*V2
, LocationSize V2Size
,
1018 const Value
*UnderlyingV1
, const Value
*UnderlyingV2
, AAQueryInfo
&AAQI
) {
1019 if (!V1Size
.hasValue() && !V2Size
.hasValue()) {
1020 // TODO: This limitation exists for compile-time reasons. Relax it if we
1021 // can avoid exponential pathological cases.
1022 if (!isa
<GEPOperator
>(V2
))
1023 return AliasResult::MayAlias
;
1025 // If both accesses have unknown size, we can only check whether the base
1026 // objects don't alias.
1027 AliasResult BaseAlias
=
1028 AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1
),
1029 MemoryLocation::getBeforeOrAfter(UnderlyingV2
), AAQI
);
1030 return BaseAlias
== AliasResult::NoAlias
? AliasResult::NoAlias
1031 : AliasResult::MayAlias
;
1034 DecomposedGEP DecompGEP1
= DecomposeGEPExpression(GEP1
, DL
, &AC
, DT
);
1035 DecomposedGEP DecompGEP2
= DecomposeGEPExpression(V2
, DL
, &AC
, DT
);
1037 // Bail if we were not able to decompose anything.
1038 if (DecompGEP1
.Base
== GEP1
&& DecompGEP2
.Base
== V2
)
1039 return AliasResult::MayAlias
;
1041 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1042 // symbolic difference.
1043 subtractDecomposedGEPs(DecompGEP1
, DecompGEP2
, AAQI
);
1045 // If an inbounds GEP would have to start from an out of bounds address
1046 // for the two to alias, then we can assume noalias.
1047 if (*DecompGEP1
.InBounds
&& DecompGEP1
.VarIndices
.empty() &&
1048 V2Size
.hasValue() && DecompGEP1
.Offset
.sge(V2Size
.getValue()) &&
1049 isBaseOfObject(DecompGEP2
.Base
))
1050 return AliasResult::NoAlias
;
1052 if (isa
<GEPOperator
>(V2
)) {
1053 // Symmetric case to above.
1054 if (*DecompGEP2
.InBounds
&& DecompGEP1
.VarIndices
.empty() &&
1055 V1Size
.hasValue() && DecompGEP1
.Offset
.sle(-V1Size
.getValue()) &&
1056 isBaseOfObject(DecompGEP1
.Base
))
1057 return AliasResult::NoAlias
;
1060 // For GEPs with identical offsets, we can preserve the size and AAInfo
1061 // when performing the alias check on the underlying objects.
1062 if (DecompGEP1
.Offset
== 0 && DecompGEP1
.VarIndices
.empty())
1063 return AAQI
.AAR
.alias(MemoryLocation(DecompGEP1
.Base
, V1Size
),
1064 MemoryLocation(DecompGEP2
.Base
, V2Size
), AAQI
);
1066 // Do the base pointers alias?
1067 AliasResult BaseAlias
=
1068 AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1
.Base
),
1069 MemoryLocation::getBeforeOrAfter(DecompGEP2
.Base
), AAQI
);
1071 // If we get a No or May, then return it immediately, no amount of analysis
1072 // will improve this situation.
1073 if (BaseAlias
!= AliasResult::MustAlias
) {
1074 assert(BaseAlias
== AliasResult::NoAlias
||
1075 BaseAlias
== AliasResult::MayAlias
);
1079 // If there is a constant difference between the pointers, but the difference
1080 // is less than the size of the associated memory object, then we know
1081 // that the objects are partially overlapping. If the difference is
1082 // greater, we know they do not overlap.
1083 if (DecompGEP1
.VarIndices
.empty()) {
1084 APInt
&Off
= DecompGEP1
.Offset
;
1086 // Initialize for Off >= 0 (V2 <= GEP1) case.
1087 const Value
*LeftPtr
= V2
;
1088 const Value
*RightPtr
= GEP1
;
1089 LocationSize VLeftSize
= V2Size
;
1090 LocationSize VRightSize
= V1Size
;
1091 const bool Swapped
= Off
.isNegative();
1094 // Swap if we have the situation where:
1097 // ---------------->|
1098 // |-->V1Size |-------> V2Size
1100 std::swap(LeftPtr
, RightPtr
);
1101 std::swap(VLeftSize
, VRightSize
);
1105 if (!VLeftSize
.hasValue())
1106 return AliasResult::MayAlias
;
1108 const uint64_t LSize
= VLeftSize
.getValue();
1109 if (Off
.ult(LSize
)) {
1110 // Conservatively drop processing if a phi was visited and/or offset is
1112 AliasResult AR
= AliasResult::PartialAlias
;
1113 if (VRightSize
.hasValue() && Off
.ule(INT32_MAX
) &&
1114 (Off
+ VRightSize
.getValue()).ule(LSize
)) {
1115 // Memory referenced by right pointer is nested. Save the offset in
1116 // cache. Note that originally offset estimated as GEP1-V2, but
1117 // AliasResult contains the shift that represents GEP1+Offset=V2.
1118 AR
.setOffset(-Off
.getSExtValue());
1123 return AliasResult::NoAlias
;
1126 // We need to know both acess sizes for all the following heuristics.
1127 if (!V1Size
.hasValue() || !V2Size
.hasValue())
1128 return AliasResult::MayAlias
;
1131 ConstantRange OffsetRange
= ConstantRange(DecompGEP1
.Offset
);
1132 for (unsigned i
= 0, e
= DecompGEP1
.VarIndices
.size(); i
!= e
; ++i
) {
1133 const VariableGEPIndex
&Index
= DecompGEP1
.VarIndices
[i
];
1134 const APInt
&Scale
= Index
.Scale
;
1135 APInt ScaleForGCD
= Scale
;
1138 APInt::getOneBitSet(Scale
.getBitWidth(), Scale
.countr_zero());
1141 GCD
= ScaleForGCD
.abs();
1143 GCD
= APIntOps::GreatestCommonDivisor(GCD
, ScaleForGCD
.abs());
1145 ConstantRange CR
= computeConstantRange(Index
.Val
.V
, /* ForSigned */ false,
1146 true, &AC
, Index
.CxtI
);
1148 computeKnownBits(Index
.Val
.V
, DL
, 0, &AC
, Index
.CxtI
, DT
);
1149 CR
= CR
.intersectWith(
1150 ConstantRange::fromKnownBits(Known
, /* Signed */ true),
1151 ConstantRange::Signed
);
1152 CR
= Index
.Val
.evaluateWith(CR
).sextOrTrunc(OffsetRange
.getBitWidth());
1154 assert(OffsetRange
.getBitWidth() == Scale
.getBitWidth() &&
1155 "Bit widths are normalized to MaxIndexSize");
1157 OffsetRange
= OffsetRange
.add(CR
.smul_sat(ConstantRange(Scale
)));
1159 OffsetRange
= OffsetRange
.add(CR
.smul_fast(ConstantRange(Scale
)));
1162 // We now have accesses at two offsets from the same base:
1163 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1164 // 2. 0 with size V2Size
1165 // Using arithmetic modulo GCD, the accesses are at
1166 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1167 // into the range [V2Size..GCD), then we know they cannot overlap.
1168 APInt ModOffset
= DecompGEP1
.Offset
.srem(GCD
);
1169 if (ModOffset
.isNegative())
1170 ModOffset
+= GCD
; // We want mod, not rem.
1171 if (ModOffset
.uge(V2Size
.getValue()) &&
1172 (GCD
- ModOffset
).uge(V1Size
.getValue()))
1173 return AliasResult::NoAlias
;
1175 // Compute ranges of potentially accessed bytes for both accesses. If the
1176 // interseciton is empty, there can be no overlap.
1177 unsigned BW
= OffsetRange
.getBitWidth();
1178 ConstantRange Range1
= OffsetRange
.add(
1179 ConstantRange(APInt(BW
, 0), APInt(BW
, V1Size
.getValue())));
1180 ConstantRange Range2
=
1181 ConstantRange(APInt(BW
, 0), APInt(BW
, V2Size
.getValue()));
1182 if (Range1
.intersectWith(Range2
).isEmptySet())
1183 return AliasResult::NoAlias
;
1185 // Try to determine the range of values for VarIndex such that
1186 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1187 std::optional
<APInt
> MinAbsVarIndex
;
1188 if (DecompGEP1
.VarIndices
.size() == 1) {
1189 // VarIndex = Scale*V.
1190 const VariableGEPIndex
&Var
= DecompGEP1
.VarIndices
[0];
1191 if (Var
.Val
.TruncBits
== 0 &&
1192 isKnownNonZero(Var
.Val
.V
, DL
, 0, &AC
, Var
.CxtI
, DT
)) {
1193 // If V != 0, then abs(VarIndex) > 0.
1194 MinAbsVarIndex
= APInt(Var
.Scale
.getBitWidth(), 1);
1196 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1197 // potentially wrapping math.
1198 auto MultiplyByScaleNoWrap
= [](const VariableGEPIndex
&Var
) {
1202 int ValOrigBW
= Var
.Val
.V
->getType()->getPrimitiveSizeInBits();
1203 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1204 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1205 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1206 int MaxScaleValueBW
= Var
.Val
.getBitWidth() - ValOrigBW
;
1207 if (MaxScaleValueBW
<= 0)
1209 return Var
.Scale
.ule(
1210 APInt::getMaxValue(MaxScaleValueBW
).zext(Var
.Scale
.getBitWidth()));
1212 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1213 // presence of potentially wrapping math.
1214 if (MultiplyByScaleNoWrap(Var
)) {
1215 // If V != 0 then abs(VarIndex) >= abs(Scale).
1216 MinAbsVarIndex
= Var
.Scale
.abs();
1219 } else if (DecompGEP1
.VarIndices
.size() == 2) {
1220 // VarIndex = Scale*V0 + (-Scale)*V1.
1221 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1222 // Check that MayBeCrossIteration is false, to avoid reasoning about
1223 // inequality of values across loop iterations.
1224 const VariableGEPIndex
&Var0
= DecompGEP1
.VarIndices
[0];
1225 const VariableGEPIndex
&Var1
= DecompGEP1
.VarIndices
[1];
1226 if (Var0
.Scale
== -Var1
.Scale
&& Var0
.Val
.TruncBits
== 0 &&
1227 Var0
.Val
.hasSameCastsAs(Var1
.Val
) && !AAQI
.MayBeCrossIteration
&&
1228 isKnownNonEqual(Var0
.Val
.V
, Var1
.Val
.V
, DL
, &AC
, /* CxtI */ nullptr,
1230 MinAbsVarIndex
= Var0
.Scale
.abs();
1233 if (MinAbsVarIndex
) {
1234 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1235 APInt OffsetLo
= DecompGEP1
.Offset
- *MinAbsVarIndex
;
1236 APInt OffsetHi
= DecompGEP1
.Offset
+ *MinAbsVarIndex
;
1237 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1238 if (OffsetLo
.isNegative() && (-OffsetLo
).uge(V1Size
.getValue()) &&
1239 OffsetHi
.isNonNegative() && OffsetHi
.uge(V2Size
.getValue()))
1240 return AliasResult::NoAlias
;
1243 if (constantOffsetHeuristic(DecompGEP1
, V1Size
, V2Size
, &AC
, DT
, AAQI
))
1244 return AliasResult::NoAlias
;
1246 // Statically, we can see that the base objects are the same, but the
1247 // pointers have dynamic offsets which we can't resolve. And none of our
1248 // little tricks above worked.
1249 return AliasResult::MayAlias
;
1252 static AliasResult
MergeAliasResults(AliasResult A
, AliasResult B
) {
1253 // If the results agree, take it.
1256 // A mix of PartialAlias and MustAlias is PartialAlias.
1257 if ((A
== AliasResult::PartialAlias
&& B
== AliasResult::MustAlias
) ||
1258 (B
== AliasResult::PartialAlias
&& A
== AliasResult::MustAlias
))
1259 return AliasResult::PartialAlias
;
1260 // Otherwise, we don't know anything.
1261 return AliasResult::MayAlias
;
1264 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1265 /// against another.
1267 BasicAAResult::aliasSelect(const SelectInst
*SI
, LocationSize SISize
,
1268 const Value
*V2
, LocationSize V2Size
,
1269 AAQueryInfo
&AAQI
) {
1270 // If the values are Selects with the same condition, we can do a more precise
1271 // check: just check for aliases between the values on corresponding arms.
1272 if (const SelectInst
*SI2
= dyn_cast
<SelectInst
>(V2
))
1273 if (isValueEqualInPotentialCycles(SI
->getCondition(), SI2
->getCondition(),
1276 AAQI
.AAR
.alias(MemoryLocation(SI
->getTrueValue(), SISize
),
1277 MemoryLocation(SI2
->getTrueValue(), V2Size
), AAQI
);
1278 if (Alias
== AliasResult::MayAlias
)
1279 return AliasResult::MayAlias
;
1280 AliasResult ThisAlias
=
1281 AAQI
.AAR
.alias(MemoryLocation(SI
->getFalseValue(), SISize
),
1282 MemoryLocation(SI2
->getFalseValue(), V2Size
), AAQI
);
1283 return MergeAliasResults(ThisAlias
, Alias
);
1286 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1287 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1288 AliasResult Alias
= AAQI
.AAR
.alias(MemoryLocation(SI
->getTrueValue(), SISize
),
1289 MemoryLocation(V2
, V2Size
), AAQI
);
1290 if (Alias
== AliasResult::MayAlias
)
1291 return AliasResult::MayAlias
;
1293 AliasResult ThisAlias
=
1294 AAQI
.AAR
.alias(MemoryLocation(SI
->getFalseValue(), SISize
),
1295 MemoryLocation(V2
, V2Size
), AAQI
);
1296 return MergeAliasResults(ThisAlias
, Alias
);
1299 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1301 AliasResult
BasicAAResult::aliasPHI(const PHINode
*PN
, LocationSize PNSize
,
1302 const Value
*V2
, LocationSize V2Size
,
1303 AAQueryInfo
&AAQI
) {
1304 if (!PN
->getNumIncomingValues())
1305 return AliasResult::NoAlias
;
1306 // If the values are PHIs in the same block, we can do a more precise
1307 // as well as efficient check: just check for aliases between the values
1308 // on corresponding edges.
1309 if (const PHINode
*PN2
= dyn_cast
<PHINode
>(V2
))
1310 if (PN2
->getParent() == PN
->getParent()) {
1311 std::optional
<AliasResult
> Alias
;
1312 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1313 AliasResult ThisAlias
= AAQI
.AAR
.alias(
1314 MemoryLocation(PN
->getIncomingValue(i
), PNSize
),
1316 PN2
->getIncomingValueForBlock(PN
->getIncomingBlock(i
)), V2Size
),
1319 *Alias
= MergeAliasResults(*Alias
, ThisAlias
);
1322 if (*Alias
== AliasResult::MayAlias
)
1328 SmallVector
<Value
*, 4> V1Srcs
;
1329 // If a phi operand recurses back to the phi, we can still determine NoAlias
1330 // if we don't alias the underlying objects of the other phi operands, as we
1331 // know that the recursive phi needs to be based on them in some way.
1332 bool isRecursive
= false;
1333 auto CheckForRecPhi
= [&](Value
*PV
) {
1334 if (!EnableRecPhiAnalysis
)
1336 if (getUnderlyingObject(PV
) == PN
) {
1343 SmallPtrSet
<Value
*, 4> UniqueSrc
;
1344 Value
*OnePhi
= nullptr;
1345 for (Value
*PV1
: PN
->incoming_values()) {
1346 // Skip the phi itself being the incoming value.
1350 if (isa
<PHINode
>(PV1
)) {
1351 if (OnePhi
&& OnePhi
!= PV1
) {
1352 // To control potential compile time explosion, we choose to be
1353 // conserviate when we have more than one Phi input. It is important
1354 // that we handle the single phi case as that lets us handle LCSSA
1355 // phi nodes and (combined with the recursive phi handling) simple
1356 // pointer induction variable patterns.
1357 return AliasResult::MayAlias
;
1362 if (CheckForRecPhi(PV1
))
1365 if (UniqueSrc
.insert(PV1
).second
)
1366 V1Srcs
.push_back(PV1
);
1369 if (OnePhi
&& UniqueSrc
.size() > 1)
1370 // Out of an abundance of caution, allow only the trivial lcssa and
1371 // recursive phi cases.
1372 return AliasResult::MayAlias
;
1374 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1375 // value. This should only be possible in blocks unreachable from the entry
1376 // block, but return MayAlias just in case.
1378 return AliasResult::MayAlias
;
1380 // If this PHI node is recursive, indicate that the pointer may be moved
1381 // across iterations. We can only prove NoAlias if different underlying
1382 // objects are involved.
1384 PNSize
= LocationSize::beforeOrAfterPointer();
1386 // In the recursive alias queries below, we may compare values from two
1387 // different loop iterations.
1388 SaveAndRestore
SavedMayBeCrossIteration(AAQI
.MayBeCrossIteration
, true);
1390 AliasResult Alias
= AAQI
.AAR
.alias(MemoryLocation(V1Srcs
[0], PNSize
),
1391 MemoryLocation(V2
, V2Size
), AAQI
);
1393 // Early exit if the check of the first PHI source against V2 is MayAlias.
1394 // Other results are not possible.
1395 if (Alias
== AliasResult::MayAlias
)
1396 return AliasResult::MayAlias
;
1397 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1398 // remain valid to all elements and needs to conservatively return MayAlias.
1399 if (isRecursive
&& Alias
!= AliasResult::NoAlias
)
1400 return AliasResult::MayAlias
;
1402 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1403 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1404 for (unsigned i
= 1, e
= V1Srcs
.size(); i
!= e
; ++i
) {
1405 Value
*V
= V1Srcs
[i
];
1407 AliasResult ThisAlias
= AAQI
.AAR
.alias(
1408 MemoryLocation(V
, PNSize
), MemoryLocation(V2
, V2Size
), AAQI
);
1409 Alias
= MergeAliasResults(ThisAlias
, Alias
);
1410 if (Alias
== AliasResult::MayAlias
)
1417 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1418 /// array references.
1419 AliasResult
BasicAAResult::aliasCheck(const Value
*V1
, LocationSize V1Size
,
1420 const Value
*V2
, LocationSize V2Size
,
1422 const Instruction
*CtxI
) {
1423 // If either of the memory references is empty, it doesn't matter what the
1424 // pointer values are.
1425 if (V1Size
.isZero() || V2Size
.isZero())
1426 return AliasResult::NoAlias
;
1428 // Strip off any casts if they exist.
1429 V1
= V1
->stripPointerCastsForAliasAnalysis();
1430 V2
= V2
->stripPointerCastsForAliasAnalysis();
1432 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1433 // value for undef that aliases nothing in the program.
1434 if (isa
<UndefValue
>(V1
) || isa
<UndefValue
>(V2
))
1435 return AliasResult::NoAlias
;
1437 // Are we checking for alias of the same value?
1438 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1439 // different iterations. We must therefore make sure that this is not the
1440 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1441 // happen by looking at the visited phi nodes and making sure they cannot
1443 if (isValueEqualInPotentialCycles(V1
, V2
, AAQI
))
1444 return AliasResult::MustAlias
;
1446 if (!V1
->getType()->isPointerTy() || !V2
->getType()->isPointerTy())
1447 return AliasResult::NoAlias
; // Scalars cannot alias each other
1449 // Figure out what objects these things are pointing to if we can.
1450 const Value
*O1
= getUnderlyingObject(V1
, MaxLookupSearchDepth
);
1451 const Value
*O2
= getUnderlyingObject(V2
, MaxLookupSearchDepth
);
1453 // Null values in the default address space don't point to any object, so they
1454 // don't alias any other pointer.
1455 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O1
))
1456 if (!NullPointerIsDefined(&F
, CPN
->getType()->getAddressSpace()))
1457 return AliasResult::NoAlias
;
1458 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O2
))
1459 if (!NullPointerIsDefined(&F
, CPN
->getType()->getAddressSpace()))
1460 return AliasResult::NoAlias
;
1463 // If V1/V2 point to two different objects, we know that we have no alias.
1464 if (isIdentifiedObject(O1
) && isIdentifiedObject(O2
))
1465 return AliasResult::NoAlias
;
1467 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1468 if ((isa
<Constant
>(O1
) && isIdentifiedObject(O2
) && !isa
<Constant
>(O2
)) ||
1469 (isa
<Constant
>(O2
) && isIdentifiedObject(O1
) && !isa
<Constant
>(O1
)))
1470 return AliasResult::NoAlias
;
1472 // Function arguments can't alias with things that are known to be
1473 // unambigously identified at the function level.
1474 if ((isa
<Argument
>(O1
) && isIdentifiedFunctionLocal(O2
)) ||
1475 (isa
<Argument
>(O2
) && isIdentifiedFunctionLocal(O1
)))
1476 return AliasResult::NoAlias
;
1478 // If one pointer is the result of a call/invoke or load and the other is a
1479 // non-escaping local object within the same function, then we know the
1480 // object couldn't escape to a point where the call could return it.
1482 // Note that if the pointers are in different functions, there are a
1483 // variety of complications. A call with a nocapture argument may still
1484 // temporary store the nocapture argument's value in a temporary memory
1485 // location if that memory location doesn't escape. Or it may pass a
1486 // nocapture value to other functions as long as they don't capture it.
1487 if (isEscapeSource(O1
) &&
1488 AAQI
.CI
->isNotCapturedBeforeOrAt(O2
, cast
<Instruction
>(O1
)))
1489 return AliasResult::NoAlias
;
1490 if (isEscapeSource(O2
) &&
1491 AAQI
.CI
->isNotCapturedBeforeOrAt(O1
, cast
<Instruction
>(O2
)))
1492 return AliasResult::NoAlias
;
1495 // If the size of one access is larger than the entire object on the other
1496 // side, then we know such behavior is undefined and can assume no alias.
1497 bool NullIsValidLocation
= NullPointerIsDefined(&F
);
1498 if ((isObjectSmallerThan(
1499 O2
, getMinimalExtentFrom(*V1
, V1Size
, DL
, NullIsValidLocation
), DL
,
1500 TLI
, NullIsValidLocation
)) ||
1501 (isObjectSmallerThan(
1502 O1
, getMinimalExtentFrom(*V2
, V2Size
, DL
, NullIsValidLocation
), DL
,
1503 TLI
, NullIsValidLocation
)))
1504 return AliasResult::NoAlias
;
1506 if (CtxI
&& EnableSeparateStorageAnalysis
) {
1507 for (auto &AssumeVH
: AC
.assumptions()) {
1511 AssumeInst
*Assume
= cast
<AssumeInst
>(AssumeVH
);
1513 for (unsigned Idx
= 0; Idx
< Assume
->getNumOperandBundles(); Idx
++) {
1514 OperandBundleUse OBU
= Assume
->getOperandBundleAt(Idx
);
1515 if (OBU
.getTagName() == "separate_storage") {
1516 assert(OBU
.Inputs
.size() == 2);
1517 const Value
*Hint1
= OBU
.Inputs
[0].get();
1518 const Value
*Hint2
= OBU
.Inputs
[1].get();
1519 // This is often a no-op; instcombine rewrites this for us. No-op
1520 // getUnderlyingObject calls are fast, though.
1521 const Value
*HintO1
= getUnderlyingObject(Hint1
);
1522 const Value
*HintO2
= getUnderlyingObject(Hint2
);
1524 if (((O1
== HintO1
&& O2
== HintO2
) ||
1525 (O1
== HintO2
&& O2
== HintO1
)) &&
1526 isValidAssumeForContext(Assume
, CtxI
, DT
))
1527 return AliasResult::NoAlias
;
1533 // If one the accesses may be before the accessed pointer, canonicalize this
1534 // by using unknown after-pointer sizes for both accesses. This is
1535 // equivalent, because regardless of which pointer is lower, one of them
1536 // will always came after the other, as long as the underlying objects aren't
1537 // disjoint. We do this so that the rest of BasicAA does not have to deal
1538 // with accesses before the base pointer, and to improve cache utilization by
1539 // merging equivalent states.
1540 if (V1Size
.mayBeBeforePointer() || V2Size
.mayBeBeforePointer()) {
1541 V1Size
= LocationSize::afterPointer();
1542 V2Size
= LocationSize::afterPointer();
1545 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1546 // for recursive queries. For this reason, this limit is chosen to be large
1547 // enough to be very rarely hit, while still being small enough to avoid
1549 if (AAQI
.Depth
>= 512)
1550 return AliasResult::MayAlias
;
1552 // Check the cache before climbing up use-def chains. This also terminates
1553 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1554 // cache key, because some cases where MayBeCrossIteration==false returns
1555 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1556 AAQueryInfo::LocPair
Locs({V1
, V1Size
, AAQI
.MayBeCrossIteration
},
1557 {V2
, V2Size
, AAQI
.MayBeCrossIteration
});
1558 const bool Swapped
= V1
> V2
;
1560 std::swap(Locs
.first
, Locs
.second
);
1561 const auto &Pair
= AAQI
.AliasCache
.try_emplace(
1562 Locs
, AAQueryInfo::CacheEntry
{AliasResult::NoAlias
, 0});
1564 auto &Entry
= Pair
.first
->second
;
1565 if (!Entry
.isDefinitive()) {
1566 // Remember that we used an assumption.
1567 ++Entry
.NumAssumptionUses
;
1568 ++AAQI
.NumAssumptionUses
;
1570 // Cache contains sorted {V1,V2} pairs but we should return original order.
1571 auto Result
= Entry
.Result
;
1572 Result
.swap(Swapped
);
1576 int OrigNumAssumptionUses
= AAQI
.NumAssumptionUses
;
1577 unsigned OrigNumAssumptionBasedResults
= AAQI
.AssumptionBasedResults
.size();
1578 AliasResult Result
=
1579 aliasCheckRecursive(V1
, V1Size
, V2
, V2Size
, AAQI
, O1
, O2
);
1581 auto It
= AAQI
.AliasCache
.find(Locs
);
1582 assert(It
!= AAQI
.AliasCache
.end() && "Must be in cache");
1583 auto &Entry
= It
->second
;
1585 // Check whether a NoAlias assumption has been used, but disproven.
1586 bool AssumptionDisproven
=
1587 Entry
.NumAssumptionUses
> 0 && Result
!= AliasResult::NoAlias
;
1588 if (AssumptionDisproven
)
1589 Result
= AliasResult::MayAlias
;
1591 // This is a definitive result now, when considered as a root query.
1592 AAQI
.NumAssumptionUses
-= Entry
.NumAssumptionUses
;
1593 Entry
.Result
= Result
;
1594 // Cache contains sorted {V1,V2} pairs.
1595 Entry
.Result
.swap(Swapped
);
1596 Entry
.NumAssumptionUses
= -1;
1598 // If the assumption has been disproven, remove any results that may have
1599 // been based on this assumption. Do this after the Entry updates above to
1600 // avoid iterator invalidation.
1601 if (AssumptionDisproven
)
1602 while (AAQI
.AssumptionBasedResults
.size() > OrigNumAssumptionBasedResults
)
1603 AAQI
.AliasCache
.erase(AAQI
.AssumptionBasedResults
.pop_back_val());
1605 // The result may still be based on assumptions higher up in the chain.
1606 // Remember it, so it can be purged from the cache later.
1607 if (OrigNumAssumptionUses
!= AAQI
.NumAssumptionUses
&&
1608 Result
!= AliasResult::MayAlias
)
1609 AAQI
.AssumptionBasedResults
.push_back(Locs
);
1613 AliasResult
BasicAAResult::aliasCheckRecursive(
1614 const Value
*V1
, LocationSize V1Size
,
1615 const Value
*V2
, LocationSize V2Size
,
1616 AAQueryInfo
&AAQI
, const Value
*O1
, const Value
*O2
) {
1617 if (const GEPOperator
*GV1
= dyn_cast
<GEPOperator
>(V1
)) {
1618 AliasResult Result
= aliasGEP(GV1
, V1Size
, V2
, V2Size
, O1
, O2
, AAQI
);
1619 if (Result
!= AliasResult::MayAlias
)
1621 } else if (const GEPOperator
*GV2
= dyn_cast
<GEPOperator
>(V2
)) {
1622 AliasResult Result
= aliasGEP(GV2
, V2Size
, V1
, V1Size
, O2
, O1
, AAQI
);
1624 if (Result
!= AliasResult::MayAlias
)
1628 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V1
)) {
1629 AliasResult Result
= aliasPHI(PN
, V1Size
, V2
, V2Size
, AAQI
);
1630 if (Result
!= AliasResult::MayAlias
)
1632 } else if (const PHINode
*PN
= dyn_cast
<PHINode
>(V2
)) {
1633 AliasResult Result
= aliasPHI(PN
, V2Size
, V1
, V1Size
, AAQI
);
1635 if (Result
!= AliasResult::MayAlias
)
1639 if (const SelectInst
*S1
= dyn_cast
<SelectInst
>(V1
)) {
1640 AliasResult Result
= aliasSelect(S1
, V1Size
, V2
, V2Size
, AAQI
);
1641 if (Result
!= AliasResult::MayAlias
)
1643 } else if (const SelectInst
*S2
= dyn_cast
<SelectInst
>(V2
)) {
1644 AliasResult Result
= aliasSelect(S2
, V2Size
, V1
, V1Size
, AAQI
);
1646 if (Result
!= AliasResult::MayAlias
)
1650 // If both pointers are pointing into the same object and one of them
1651 // accesses the entire object, then the accesses must overlap in some way.
1653 bool NullIsValidLocation
= NullPointerIsDefined(&F
);
1654 if (V1Size
.isPrecise() && V2Size
.isPrecise() &&
1655 (isObjectSize(O1
, V1Size
.getValue(), DL
, TLI
, NullIsValidLocation
) ||
1656 isObjectSize(O2
, V2Size
.getValue(), DL
, TLI
, NullIsValidLocation
)))
1657 return AliasResult::PartialAlias
;
1660 return AliasResult::MayAlias
;
1663 /// Check whether two Values can be considered equivalent.
1665 /// If the values may come from different cycle iterations, this will also
1666 /// check that the values are not part of cycle. We have to do this because we
1667 /// are looking through phi nodes, that is we say
1668 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1669 bool BasicAAResult::isValueEqualInPotentialCycles(const Value
*V
,
1671 const AAQueryInfo
&AAQI
) {
1675 if (!AAQI
.MayBeCrossIteration
)
1678 // Non-instructions and instructions in the entry block cannot be part of
1680 const Instruction
*Inst
= dyn_cast
<Instruction
>(V
);
1681 if (!Inst
|| Inst
->getParent()->isEntryBlock())
1684 // Check whether the instruction is part of a cycle, by checking whether the
1685 // block can (non-trivially) reach itself.
1686 BasicBlock
*BB
= const_cast<BasicBlock
*>(Inst
->getParent());
1687 SmallVector
<BasicBlock
*> Succs(successors(BB
));
1688 return !Succs
.empty() &&
1689 !isPotentiallyReachableFromMany(Succs
, BB
, nullptr, DT
);
1692 /// Computes the symbolic difference between two de-composed GEPs.
1693 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP
&DestGEP
,
1694 const DecomposedGEP
&SrcGEP
,
1695 const AAQueryInfo
&AAQI
) {
1696 DestGEP
.Offset
-= SrcGEP
.Offset
;
1697 for (const VariableGEPIndex
&Src
: SrcGEP
.VarIndices
) {
1698 // Find V in Dest. This is N^2, but pointer indices almost never have more
1699 // than a few variable indexes.
1701 for (auto I
: enumerate(DestGEP
.VarIndices
)) {
1702 VariableGEPIndex
&Dest
= I
.value();
1703 if (!isValueEqualInPotentialCycles(Dest
.Val
.V
, Src
.Val
.V
, AAQI
) ||
1704 !Dest
.Val
.hasSameCastsAs(Src
.Val
))
1707 // If we found it, subtract off Scale V's from the entry in Dest. If it
1708 // goes to zero, remove the entry.
1709 if (Dest
.Scale
!= Src
.Scale
) {
1710 Dest
.Scale
-= Src
.Scale
;
1713 DestGEP
.VarIndices
.erase(DestGEP
.VarIndices
.begin() + I
.index());
1719 // If we didn't consume this entry, add it to the end of the Dest list.
1721 VariableGEPIndex Entry
= {Src
.Val
, -Src
.Scale
, Src
.CxtI
, Src
.IsNSW
};
1722 DestGEP
.VarIndices
.push_back(Entry
);
1727 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP
&GEP
,
1728 LocationSize MaybeV1Size
,
1729 LocationSize MaybeV2Size
,
1730 AssumptionCache
*AC
,
1732 const AAQueryInfo
&AAQI
) {
1733 if (GEP
.VarIndices
.size() != 2 || !MaybeV1Size
.hasValue() ||
1734 !MaybeV2Size
.hasValue())
1737 const uint64_t V1Size
= MaybeV1Size
.getValue();
1738 const uint64_t V2Size
= MaybeV2Size
.getValue();
1740 const VariableGEPIndex
&Var0
= GEP
.VarIndices
[0], &Var1
= GEP
.VarIndices
[1];
1742 if (Var0
.Val
.TruncBits
!= 0 || !Var0
.Val
.hasSameCastsAs(Var1
.Val
) ||
1743 Var0
.Scale
!= -Var1
.Scale
||
1744 Var0
.Val
.V
->getType() != Var1
.Val
.V
->getType())
1747 // We'll strip off the Extensions of Var0 and Var1 and do another round
1748 // of GetLinearExpression decomposition. In the example above, if Var0
1749 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1751 LinearExpression E0
=
1752 GetLinearExpression(CastedValue(Var0
.Val
.V
), DL
, 0, AC
, DT
);
1753 LinearExpression E1
=
1754 GetLinearExpression(CastedValue(Var1
.Val
.V
), DL
, 0, AC
, DT
);
1755 if (E0
.Scale
!= E1
.Scale
|| !E0
.Val
.hasSameCastsAs(E1
.Val
) ||
1756 !isValueEqualInPotentialCycles(E0
.Val
.V
, E1
.Val
.V
, AAQI
))
1759 // We have a hit - Var0 and Var1 only differ by a constant offset!
1761 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1762 // Var1 is possible to calculate, but we're just interested in the absolute
1763 // minimum difference between the two. The minimum distance may occur due to
1764 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1765 // the minimum distance between %i and %i + 5 is 3.
1766 APInt MinDiff
= E0
.Offset
- E1
.Offset
, Wrapped
= -MinDiff
;
1767 MinDiff
= APIntOps::umin(MinDiff
, Wrapped
);
1768 APInt MinDiffBytes
=
1769 MinDiff
.zextOrTrunc(Var0
.Scale
.getBitWidth()) * Var0
.Scale
.abs();
1771 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1772 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1773 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1774 // V2Size can fit in the MinDiffBytes gap.
1775 return MinDiffBytes
.uge(V1Size
+ GEP
.Offset
.abs()) &&
1776 MinDiffBytes
.uge(V2Size
+ GEP
.Offset
.abs());
1779 //===----------------------------------------------------------------------===//
1780 // BasicAliasAnalysis Pass
1781 //===----------------------------------------------------------------------===//
1783 AnalysisKey
BasicAA::Key
;
1785 BasicAAResult
BasicAA::run(Function
&F
, FunctionAnalysisManager
&AM
) {
1786 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1787 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
1788 auto *DT
= &AM
.getResult
<DominatorTreeAnalysis
>(F
);
1789 return BasicAAResult(F
.getParent()->getDataLayout(), F
, TLI
, AC
, DT
);
1792 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID
) {
1793 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1796 char BasicAAWrapperPass::ID
= 0;
1798 void BasicAAWrapperPass::anchor() {}
1800 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass
, "basic-aa",
1801 "Basic Alias Analysis (stateless AA impl)", true, true)
1802 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
1803 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1804 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1805 INITIALIZE_PASS_END(BasicAAWrapperPass
, "basic-aa",
1806 "Basic Alias Analysis (stateless AA impl)", true, true)
1808 FunctionPass
*llvm::createBasicAAWrapperPass() {
1809 return new BasicAAWrapperPass();
1812 bool BasicAAWrapperPass::runOnFunction(Function
&F
) {
1813 auto &ACT
= getAnalysis
<AssumptionCacheTracker
>();
1814 auto &TLIWP
= getAnalysis
<TargetLibraryInfoWrapperPass
>();
1815 auto &DTWP
= getAnalysis
<DominatorTreeWrapperPass
>();
1817 Result
.reset(new BasicAAResult(F
.getParent()->getDataLayout(), F
,
1818 TLIWP
.getTLI(F
), ACT
.getAssumptionCache(F
),
1819 &DTWP
.getDomTree()));
1824 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
1825 AU
.setPreservesAll();
1826 AU
.addRequiredTransitive
<AssumptionCacheTracker
>();
1827 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
1828 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();
1831 BasicAAResult
llvm::createLegacyPMBasicAAResult(Pass
&P
, Function
&F
) {
1832 return BasicAAResult(
1833 F
.getParent()->getDataLayout(), F
,
1834 P
.getAnalysis
<TargetLibraryInfoWrapperPass
>().getTLI(F
),
1835 P
.getAnalysis
<AssumptionCacheTracker
>().getAssumptionCache(F
));