1 //===- BasicAliasAnalysis.cpp - Stateless Alias Analysis Impl -------------===//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
9 // This file defines the primary stateless implementation of the
10 // Alias Analysis interface that implements identities (two different
11 // globals cannot alias, etc), but does no stateful analysis.
13 //===----------------------------------------------------------------------===//
15 #include "llvm/Analysis/BasicAliasAnalysis.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/ScopeExit.h"
18 #include "llvm/ADT/SmallPtrSet.h"
19 #include "llvm/ADT/SmallVector.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/CFG.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/MemoryBuiltins.h"
26 #include "llvm/Analysis/MemoryLocation.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/IR/Argument.h"
30 #include "llvm/IR/Attributes.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Function.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/GlobalVariable.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/Type.h"
48 #include "llvm/IR/User.h"
49 #include "llvm/IR/Value.h"
50 #include "llvm/InitializePasses.h"
51 #include "llvm/Pass.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/CommandLine.h"
54 #include "llvm/Support/Compiler.h"
55 #include "llvm/Support/KnownBits.h"
56 #include "llvm/Support/SaveAndRestore.h"
63 #define DEBUG_TYPE "basicaa"
67 /// Enable analysis of recursive PHI nodes.
68 static cl::opt
<bool> EnableRecPhiAnalysis("basic-aa-recphi", cl::Hidden
,
71 static cl::opt
<bool> EnableSeparateStorageAnalysis("basic-aa-separate-storage",
72 cl::Hidden
, cl::init(false));
74 /// SearchLimitReached / SearchTimes shows how often the limit of
75 /// to decompose GEPs is reached. It will affect the precision
76 /// of basic alias analysis.
77 STATISTIC(SearchLimitReached
, "Number of times the limit to "
78 "decompose GEPs is reached");
79 STATISTIC(SearchTimes
, "Number of times a GEP is decomposed");
81 // The max limit of the search depth in DecomposeGEPExpression() and
82 // getUnderlyingObject().
83 static const unsigned MaxLookupSearchDepth
= 6;
85 bool BasicAAResult::invalidate(Function
&Fn
, const PreservedAnalyses
&PA
,
86 FunctionAnalysisManager::Invalidator
&Inv
) {
87 // We don't care if this analysis itself is preserved, it has no state. But
88 // we need to check that the analyses it depends on have been. Note that we
89 // may be created without handles to some analyses and in that case don't
91 if (Inv
.invalidate
<AssumptionAnalysis
>(Fn
, PA
) ||
92 (DT
&& Inv
.invalidate
<DominatorTreeAnalysis
>(Fn
, PA
)))
95 // Otherwise this analysis result remains valid.
99 //===----------------------------------------------------------------------===//
101 //===----------------------------------------------------------------------===//
103 /// Returns the size of the object specified by V or UnknownSize if unknown.
104 static std::optional
<TypeSize
> getObjectSize(const Value
*V
,
105 const DataLayout
&DL
,
106 const TargetLibraryInfo
&TLI
,
108 bool RoundToAlign
= false) {
111 Opts
.RoundToAlign
= RoundToAlign
;
112 Opts
.NullIsUnknownSize
= NullIsValidLoc
;
113 if (getObjectSize(V
, Size
, DL
, &TLI
, Opts
))
114 return TypeSize::Fixed(Size
);
118 /// Returns true if we can prove that the object specified by V is smaller than
120 static bool isObjectSmallerThan(const Value
*V
, TypeSize Size
,
121 const DataLayout
&DL
,
122 const TargetLibraryInfo
&TLI
,
123 bool NullIsValidLoc
) {
124 // Note that the meanings of the "object" are slightly different in the
125 // following contexts:
126 // c1: llvm::getObjectSize()
127 // c2: llvm.objectsize() intrinsic
128 // c3: isObjectSmallerThan()
129 // c1 and c2 share the same meaning; however, the meaning of "object" in c3
130 // refers to the "entire object".
132 // Consider this example:
133 // char *p = (char*)malloc(100)
136 // In the context of c1 and c2, the "object" pointed by q refers to the
137 // stretch of memory of q[0:19]. So, getObjectSize(q) should return 20.
139 // However, in the context of c3, the "object" refers to the chunk of memory
140 // being allocated. So, the "object" has 100 bytes, and q points to the middle
141 // the "object". In case q is passed to isObjectSmallerThan() as the 1st
142 // parameter, before the llvm::getObjectSize() is called to get the size of
143 // entire object, we should:
144 // - either rewind the pointer q to the base-address of the object in
145 // question (in this case rewind to p), or
146 // - just give up. It is up to caller to make sure the pointer is pointing
147 // to the base address the object.
149 // We go for 2nd option for simplicity.
150 if (!isIdentifiedObject(V
))
153 // This function needs to use the aligned object size because we allow
154 // reads a bit past the end given sufficient alignment.
155 std::optional
<TypeSize
> ObjectSize
= getObjectSize(V
, DL
, TLI
, NullIsValidLoc
,
156 /*RoundToAlign*/ true);
158 return ObjectSize
&& TypeSize::isKnownLT(*ObjectSize
, Size
);
161 /// Return the minimal extent from \p V to the end of the underlying object,
162 /// assuming the result is used in an aliasing query. E.g., we do use the query
163 /// location size and the fact that null pointers cannot alias here.
164 static TypeSize
getMinimalExtentFrom(const Value
&V
,
165 const LocationSize
&LocSize
,
166 const DataLayout
&DL
,
167 bool NullIsValidLoc
) {
168 // If we have dereferenceability information we know a lower bound for the
169 // extent as accesses for a lower offset would be valid. We need to exclude
170 // the "or null" part if null is a valid pointer. We can ignore frees, as an
171 // access after free would be undefined behavior.
172 bool CanBeNull
, CanBeFreed
;
173 uint64_t DerefBytes
=
174 V
.getPointerDereferenceableBytes(DL
, CanBeNull
, CanBeFreed
);
175 DerefBytes
= (CanBeNull
&& NullIsValidLoc
) ? 0 : DerefBytes
;
176 // If queried with a precise location size, we assume that location size to be
177 // accessed, thus valid.
178 if (LocSize
.isPrecise())
179 DerefBytes
= std::max(DerefBytes
, LocSize
.getValue().getKnownMinValue());
180 return TypeSize::Fixed(DerefBytes
);
183 /// Returns true if we can prove that the object specified by V has size Size.
184 static bool isObjectSize(const Value
*V
, TypeSize Size
, const DataLayout
&DL
,
185 const TargetLibraryInfo
&TLI
, bool NullIsValidLoc
) {
186 std::optional
<TypeSize
> ObjectSize
=
187 getObjectSize(V
, DL
, TLI
, NullIsValidLoc
);
188 return ObjectSize
&& *ObjectSize
== Size
;
191 //===----------------------------------------------------------------------===//
192 // CaptureInfo implementations
193 //===----------------------------------------------------------------------===//
195 CaptureInfo::~CaptureInfo() = default;
197 bool SimpleCaptureInfo::isNotCapturedBeforeOrAt(const Value
*Object
,
198 const Instruction
*I
) {
199 return isNonEscapingLocalObject(Object
, &IsCapturedCache
);
202 bool EarliestEscapeInfo::isNotCapturedBeforeOrAt(const Value
*Object
,
203 const Instruction
*I
) {
204 if (!isIdentifiedFunctionLocal(Object
))
207 auto Iter
= EarliestEscapes
.insert({Object
, nullptr});
209 Instruction
*EarliestCapture
= FindEarliestCapture(
210 Object
, *const_cast<Function
*>(I
->getFunction()),
211 /*ReturnCaptures=*/false, /*StoreCaptures=*/true, DT
, EphValues
);
212 if (EarliestCapture
) {
213 auto Ins
= Inst2Obj
.insert({EarliestCapture
, {}});
214 Ins
.first
->second
.push_back(Object
);
216 Iter
.first
->second
= EarliestCapture
;
219 // No capturing instruction.
220 if (!Iter
.first
->second
)
223 return I
!= Iter
.first
->second
&&
224 !isPotentiallyReachable(Iter
.first
->second
, I
, nullptr, &DT
, LI
);
227 void EarliestEscapeInfo::removeInstruction(Instruction
*I
) {
228 auto Iter
= Inst2Obj
.find(I
);
229 if (Iter
!= Inst2Obj
.end()) {
230 for (const Value
*Obj
: Iter
->second
)
231 EarliestEscapes
.erase(Obj
);
236 //===----------------------------------------------------------------------===//
237 // GetElementPtr Instruction Decomposition and Analysis
238 //===----------------------------------------------------------------------===//
241 /// Represents zext(sext(trunc(V))).
244 unsigned ZExtBits
= 0;
245 unsigned SExtBits
= 0;
246 unsigned TruncBits
= 0;
248 explicit CastedValue(const Value
*V
) : V(V
) {}
249 explicit CastedValue(const Value
*V
, unsigned ZExtBits
, unsigned SExtBits
,
251 : V(V
), ZExtBits(ZExtBits
), SExtBits(SExtBits
), TruncBits(TruncBits
) {}
253 unsigned getBitWidth() const {
254 return V
->getType()->getPrimitiveSizeInBits() - TruncBits
+ ZExtBits
+
258 CastedValue
withValue(const Value
*NewV
) const {
259 return CastedValue(NewV
, ZExtBits
, SExtBits
, TruncBits
);
262 /// Replace V with zext(NewV)
263 CastedValue
withZExtOfValue(const Value
*NewV
) const {
264 unsigned ExtendBy
= V
->getType()->getPrimitiveSizeInBits() -
265 NewV
->getType()->getPrimitiveSizeInBits();
266 if (ExtendBy
<= TruncBits
)
267 return CastedValue(NewV
, ZExtBits
, SExtBits
, TruncBits
- ExtendBy
);
269 // zext(sext(zext(NewV))) == zext(zext(zext(NewV)))
270 ExtendBy
-= TruncBits
;
271 return CastedValue(NewV
, ZExtBits
+ SExtBits
+ ExtendBy
, 0, 0);
274 /// Replace V with sext(NewV)
275 CastedValue
withSExtOfValue(const Value
*NewV
) const {
276 unsigned ExtendBy
= V
->getType()->getPrimitiveSizeInBits() -
277 NewV
->getType()->getPrimitiveSizeInBits();
278 if (ExtendBy
<= TruncBits
)
279 return CastedValue(NewV
, ZExtBits
, SExtBits
, TruncBits
- ExtendBy
);
281 // zext(sext(sext(NewV)))
282 ExtendBy
-= TruncBits
;
283 return CastedValue(NewV
, ZExtBits
, SExtBits
+ ExtendBy
, 0);
286 APInt
evaluateWith(APInt N
) const {
287 assert(N
.getBitWidth() == V
->getType()->getPrimitiveSizeInBits() &&
288 "Incompatible bit width");
289 if (TruncBits
) N
= N
.trunc(N
.getBitWidth() - TruncBits
);
290 if (SExtBits
) N
= N
.sext(N
.getBitWidth() + SExtBits
);
291 if (ZExtBits
) N
= N
.zext(N
.getBitWidth() + ZExtBits
);
295 ConstantRange
evaluateWith(ConstantRange N
) const {
296 assert(N
.getBitWidth() == V
->getType()->getPrimitiveSizeInBits() &&
297 "Incompatible bit width");
298 if (TruncBits
) N
= N
.truncate(N
.getBitWidth() - TruncBits
);
299 if (SExtBits
) N
= N
.signExtend(N
.getBitWidth() + SExtBits
);
300 if (ZExtBits
) N
= N
.zeroExtend(N
.getBitWidth() + ZExtBits
);
304 bool canDistributeOver(bool NUW
, bool NSW
) const {
305 // zext(x op<nuw> y) == zext(x) op<nuw> zext(y)
306 // sext(x op<nsw> y) == sext(x) op<nsw> sext(y)
307 // trunc(x op y) == trunc(x) op trunc(y)
308 return (!ZExtBits
|| NUW
) && (!SExtBits
|| NSW
);
311 bool hasSameCastsAs(const CastedValue
&Other
) const {
312 return ZExtBits
== Other
.ZExtBits
&& SExtBits
== Other
.SExtBits
&&
313 TruncBits
== Other
.TruncBits
;
317 /// Represents zext(sext(trunc(V))) * Scale + Offset.
318 struct LinearExpression
{
323 /// True if all operations in this expression are NSW.
326 LinearExpression(const CastedValue
&Val
, const APInt
&Scale
,
327 const APInt
&Offset
, bool IsNSW
)
328 : Val(Val
), Scale(Scale
), Offset(Offset
), IsNSW(IsNSW
) {}
330 LinearExpression(const CastedValue
&Val
) : Val(Val
), IsNSW(true) {
331 unsigned BitWidth
= Val
.getBitWidth();
332 Scale
= APInt(BitWidth
, 1);
333 Offset
= APInt(BitWidth
, 0);
336 LinearExpression
mul(const APInt
&Other
, bool MulIsNSW
) const {
337 // The check for zero offset is necessary, because generally
338 // (X +nsw Y) *nsw Z does not imply (X *nsw Z) +nsw (Y *nsw Z).
339 bool NSW
= IsNSW
&& (Other
.isOne() || (MulIsNSW
&& Offset
.isZero()));
340 return LinearExpression(Val
, Scale
* Other
, Offset
* Other
, NSW
);
345 /// Analyzes the specified value as a linear expression: "A*V + B", where A and
346 /// B are constant integers.
347 static LinearExpression
GetLinearExpression(
348 const CastedValue
&Val
, const DataLayout
&DL
, unsigned Depth
,
349 AssumptionCache
*AC
, DominatorTree
*DT
) {
350 // Limit our recursion depth.
354 if (const ConstantInt
*Const
= dyn_cast
<ConstantInt
>(Val
.V
))
355 return LinearExpression(Val
, APInt(Val
.getBitWidth(), 0),
356 Val
.evaluateWith(Const
->getValue()), true);
358 if (const BinaryOperator
*BOp
= dyn_cast
<BinaryOperator
>(Val
.V
)) {
359 if (ConstantInt
*RHSC
= dyn_cast
<ConstantInt
>(BOp
->getOperand(1))) {
360 APInt RHS
= Val
.evaluateWith(RHSC
->getValue());
361 // The only non-OBO case we deal with is or, and only limited to the
362 // case where it is both nuw and nsw.
363 bool NUW
= true, NSW
= true;
364 if (isa
<OverflowingBinaryOperator
>(BOp
)) {
365 NUW
&= BOp
->hasNoUnsignedWrap();
366 NSW
&= BOp
->hasNoSignedWrap();
368 if (!Val
.canDistributeOver(NUW
, NSW
))
371 // While we can distribute over trunc, we cannot preserve nowrap flags
376 LinearExpression
E(Val
);
377 switch (BOp
->getOpcode()) {
379 // We don't understand this instruction, so we can't decompose it any
382 case Instruction::Or
:
383 // X|C == X+C if all the bits in C are unset in X. Otherwise we can't
385 if (!MaskedValueIsZero(BOp
->getOperand(0), RHSC
->getValue(), DL
, 0, AC
,
390 case Instruction::Add
: {
391 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
397 case Instruction::Sub
: {
398 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
404 case Instruction::Mul
:
405 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
409 case Instruction::Shl
:
410 // We're trying to linearize an expression of the kind:
412 // where the shift count exceeds the bitwidth of the type.
413 // We can't decompose this further (the expression would return
415 if (RHS
.getLimitedValue() > Val
.getBitWidth())
418 E
= GetLinearExpression(Val
.withValue(BOp
->getOperand(0)), DL
,
420 E
.Offset
<<= RHS
.getLimitedValue();
421 E
.Scale
<<= RHS
.getLimitedValue();
429 if (isa
<ZExtInst
>(Val
.V
))
430 return GetLinearExpression(
431 Val
.withZExtOfValue(cast
<CastInst
>(Val
.V
)->getOperand(0)),
432 DL
, Depth
+ 1, AC
, DT
);
434 if (isa
<SExtInst
>(Val
.V
))
435 return GetLinearExpression(
436 Val
.withSExtOfValue(cast
<CastInst
>(Val
.V
)->getOperand(0)),
437 DL
, Depth
+ 1, AC
, DT
);
442 /// To ensure a pointer offset fits in an integer of size IndexSize
443 /// (in bits) when that size is smaller than the maximum index size. This is
444 /// an issue, for example, in particular for 32b pointers with negative indices
445 /// that rely on two's complement wrap-arounds for precise alias information
446 /// where the maximum index size is 64b.
447 static APInt
adjustToIndexSize(const APInt
&Offset
, unsigned IndexSize
) {
448 assert(IndexSize
<= Offset
.getBitWidth() && "Invalid IndexSize!");
449 unsigned ShiftBits
= Offset
.getBitWidth() - IndexSize
;
450 return (Offset
<< ShiftBits
).ashr(ShiftBits
);
454 // A linear transformation of a Value; this class represents
455 // ZExt(SExt(Trunc(V, TruncBits), SExtBits), ZExtBits) * Scale.
456 struct VariableGEPIndex
{
460 // Context instruction to use when querying information about this index.
461 const Instruction
*CxtI
;
463 /// True if all operations in this expression are NSW.
466 /// True if the index should be subtracted rather than added. We don't simply
467 /// negate the Scale, to avoid losing the NSW flag: X - INT_MIN*1 may be
468 /// non-wrapping, while X + INT_MIN*(-1) wraps.
471 bool hasNegatedScaleOf(const VariableGEPIndex
&Other
) const {
472 if (IsNegated
== Other
.IsNegated
)
473 return Scale
== -Other
.Scale
;
474 return Scale
== Other
.Scale
;
481 void print(raw_ostream
&OS
) const {
482 OS
<< "(V=" << Val
.V
->getName()
483 << ", zextbits=" << Val
.ZExtBits
484 << ", sextbits=" << Val
.SExtBits
485 << ", truncbits=" << Val
.TruncBits
486 << ", scale=" << Scale
488 << ", negated=" << IsNegated
<< ")";
493 // Represents the internal structure of a GEP, decomposed into a base pointer,
494 // constant offsets, and variable scaled indices.
495 struct BasicAAResult::DecomposedGEP
{
496 // Base pointer of the GEP
498 // Total constant offset from base.
500 // Scaled variable (non-constant) indices.
501 SmallVector
<VariableGEPIndex
, 4> VarIndices
;
502 // Are all operations inbounds GEPs or non-indexing operations?
503 // (std::nullopt iff expression doesn't involve any geps)
504 std::optional
<bool> InBounds
;
510 void print(raw_ostream
&OS
) const {
511 OS
<< "(DecomposedGEP Base=" << Base
->getName()
512 << ", Offset=" << Offset
514 for (size_t i
= 0; i
< VarIndices
.size(); i
++) {
517 VarIndices
[i
].print(OS
);
524 /// If V is a symbolic pointer expression, decompose it into a base pointer
525 /// with a constant offset and a number of scaled symbolic offsets.
527 /// The scaled symbolic offsets (represented by pairs of a Value* and a scale
528 /// in the VarIndices vector) are Value*'s that are known to be scaled by the
529 /// specified amount, but which may have other unrepresented high bits. As
530 /// such, the gep cannot necessarily be reconstructed from its decomposed form.
531 BasicAAResult::DecomposedGEP
532 BasicAAResult::DecomposeGEPExpression(const Value
*V
, const DataLayout
&DL
,
533 AssumptionCache
*AC
, DominatorTree
*DT
) {
534 // Limit recursion depth to limit compile time in crazy cases.
535 unsigned MaxLookup
= MaxLookupSearchDepth
;
537 const Instruction
*CxtI
= dyn_cast
<Instruction
>(V
);
539 unsigned MaxIndexSize
= DL
.getMaxIndexSizeInBits();
540 DecomposedGEP Decomposed
;
541 Decomposed
.Offset
= APInt(MaxIndexSize
, 0);
543 // See if this is a bitcast or GEP.
544 const Operator
*Op
= dyn_cast
<Operator
>(V
);
546 // The only non-operator case we can handle are GlobalAliases.
547 if (const GlobalAlias
*GA
= dyn_cast
<GlobalAlias
>(V
)) {
548 if (!GA
->isInterposable()) {
549 V
= GA
->getAliasee();
557 if (Op
->getOpcode() == Instruction::BitCast
||
558 Op
->getOpcode() == Instruction::AddrSpaceCast
) {
559 V
= Op
->getOperand(0);
563 const GEPOperator
*GEPOp
= dyn_cast
<GEPOperator
>(Op
);
565 if (const auto *PHI
= dyn_cast
<PHINode
>(V
)) {
566 // Look through single-arg phi nodes created by LCSSA.
567 if (PHI
->getNumIncomingValues() == 1) {
568 V
= PHI
->getIncomingValue(0);
571 } else if (const auto *Call
= dyn_cast
<CallBase
>(V
)) {
572 // CaptureTracking can know about special capturing properties of some
573 // intrinsics like launder.invariant.group, that can't be expressed with
574 // the attributes, but have properties like returning aliasing pointer.
575 // Because some analysis may assume that nocaptured pointer is not
576 // returned from some special intrinsic (because function would have to
577 // be marked with returns attribute), it is crucial to use this function
578 // because it should be in sync with CaptureTracking. Not using it may
579 // cause weird miscompilations where 2 aliasing pointers are assumed to
581 if (auto *RP
= getArgumentAliasingToReturnedPointer(Call
, false)) {
591 // Track whether we've seen at least one in bounds gep, and if so, whether
592 // all geps parsed were in bounds.
593 if (Decomposed
.InBounds
== std::nullopt
)
594 Decomposed
.InBounds
= GEPOp
->isInBounds();
595 else if (!GEPOp
->isInBounds())
596 Decomposed
.InBounds
= false;
598 assert(GEPOp
->getSourceElementType()->isSized() && "GEP must be sized");
600 unsigned AS
= GEPOp
->getPointerAddressSpace();
601 // Walk the indices of the GEP, accumulating them into BaseOff/VarIndices.
602 gep_type_iterator GTI
= gep_type_begin(GEPOp
);
603 unsigned IndexSize
= DL
.getIndexSizeInBits(AS
);
604 // Assume all GEP operands are constants until proven otherwise.
605 bool GepHasConstantOffset
= true;
606 for (User::const_op_iterator I
= GEPOp
->op_begin() + 1, E
= GEPOp
->op_end();
607 I
!= E
; ++I
, ++GTI
) {
608 const Value
*Index
= *I
;
609 // Compute the (potentially symbolic) offset in bytes for this index.
610 if (StructType
*STy
= GTI
.getStructTypeOrNull()) {
611 // For a struct, add the member offset.
612 unsigned FieldNo
= cast
<ConstantInt
>(Index
)->getZExtValue();
616 Decomposed
.Offset
+= DL
.getStructLayout(STy
)->getElementOffset(FieldNo
);
620 // For an array/pointer, add the element offset, explicitly scaled.
621 if (const ConstantInt
*CIdx
= dyn_cast
<ConstantInt
>(Index
)) {
625 // Don't attempt to analyze GEPs if the scalable index is not zero.
626 TypeSize AllocTypeSize
= DL
.getTypeAllocSize(GTI
.getIndexedType());
627 if (AllocTypeSize
.isScalable()) {
632 Decomposed
.Offset
+= AllocTypeSize
.getFixedValue() *
633 CIdx
->getValue().sextOrTrunc(MaxIndexSize
);
637 TypeSize AllocTypeSize
= DL
.getTypeAllocSize(GTI
.getIndexedType());
638 if (AllocTypeSize
.isScalable()) {
643 GepHasConstantOffset
= false;
645 // If the integer type is smaller than the index size, it is implicitly
646 // sign extended or truncated to index size.
647 unsigned Width
= Index
->getType()->getIntegerBitWidth();
648 unsigned SExtBits
= IndexSize
> Width
? IndexSize
- Width
: 0;
649 unsigned TruncBits
= IndexSize
< Width
? Width
- IndexSize
: 0;
650 LinearExpression LE
= GetLinearExpression(
651 CastedValue(Index
, 0, SExtBits
, TruncBits
), DL
, 0, AC
, DT
);
653 // Scale by the type size.
654 unsigned TypeSize
= AllocTypeSize
.getFixedValue();
655 LE
= LE
.mul(APInt(IndexSize
, TypeSize
), GEPOp
->isInBounds());
656 Decomposed
.Offset
+= LE
.Offset
.sext(MaxIndexSize
);
657 APInt Scale
= LE
.Scale
.sext(MaxIndexSize
);
659 // If we already had an occurrence of this index variable, merge this
660 // scale into it. For example, we want to handle:
661 // A[x][x] -> x*16 + x*4 -> x*20
662 // This also ensures that 'x' only appears in the index list once.
663 for (unsigned i
= 0, e
= Decomposed
.VarIndices
.size(); i
!= e
; ++i
) {
664 if (Decomposed
.VarIndices
[i
].Val
.V
== LE
.Val
.V
&&
665 Decomposed
.VarIndices
[i
].Val
.hasSameCastsAs(LE
.Val
)) {
666 Scale
+= Decomposed
.VarIndices
[i
].Scale
;
667 LE
.IsNSW
= false; // We cannot guarantee nsw for the merge.
668 Decomposed
.VarIndices
.erase(Decomposed
.VarIndices
.begin() + i
);
673 // Make sure that we have a scale that makes sense for this target's
675 Scale
= adjustToIndexSize(Scale
, IndexSize
);
678 VariableGEPIndex Entry
= {LE
.Val
, Scale
, CxtI
, LE
.IsNSW
,
679 /* IsNegated */ false};
680 Decomposed
.VarIndices
.push_back(Entry
);
684 // Take care of wrap-arounds
685 if (GepHasConstantOffset
)
686 Decomposed
.Offset
= adjustToIndexSize(Decomposed
.Offset
, IndexSize
);
688 // Analyze the base pointer next.
689 V
= GEPOp
->getOperand(0);
690 } while (--MaxLookup
);
692 // If the chain of expressions is too deep, just return early.
694 SearchLimitReached
++;
698 ModRefInfo
BasicAAResult::getModRefInfoMask(const MemoryLocation
&Loc
,
701 assert(Visited
.empty() && "Visited must be cleared after use!");
702 auto _
= make_scope_exit([&] { Visited
.clear(); });
704 unsigned MaxLookup
= 8;
705 SmallVector
<const Value
*, 16> Worklist
;
706 Worklist
.push_back(Loc
.Ptr
);
707 ModRefInfo Result
= ModRefInfo::NoModRef
;
710 const Value
*V
= getUnderlyingObject(Worklist
.pop_back_val());
711 if (!Visited
.insert(V
).second
)
714 // Ignore allocas if we were instructed to do so.
715 if (IgnoreLocals
&& isa
<AllocaInst
>(V
))
718 // If the location points to memory that is known to be invariant for
719 // the life of the underlying SSA value, then we can exclude Mod from
720 // the set of valid memory effects.
722 // An argument that is marked readonly and noalias is known to be
723 // invariant while that function is executing.
724 if (const Argument
*Arg
= dyn_cast
<Argument
>(V
)) {
725 if (Arg
->hasNoAliasAttr() && Arg
->onlyReadsMemory()) {
726 Result
|= ModRefInfo::Ref
;
731 // A global constant can't be mutated.
732 if (const GlobalVariable
*GV
= dyn_cast
<GlobalVariable
>(V
)) {
733 // Note: this doesn't require GV to be "ODR" because it isn't legal for a
734 // global to be marked constant in some modules and non-constant in
735 // others. GV may even be a declaration, not a definition.
736 if (!GV
->isConstant())
737 return ModRefInfo::ModRef
;
741 // If both select values point to local memory, then so does the select.
742 if (const SelectInst
*SI
= dyn_cast
<SelectInst
>(V
)) {
743 Worklist
.push_back(SI
->getTrueValue());
744 Worklist
.push_back(SI
->getFalseValue());
748 // If all values incoming to a phi node point to local memory, then so does
750 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V
)) {
751 // Don't bother inspecting phi nodes with many operands.
752 if (PN
->getNumIncomingValues() > MaxLookup
)
753 return ModRefInfo::ModRef
;
754 append_range(Worklist
, PN
->incoming_values());
758 // Otherwise be conservative.
759 return ModRefInfo::ModRef
;
760 } while (!Worklist
.empty() && --MaxLookup
);
762 // If we hit the maximum number of instructions to examine, be conservative.
763 if (!Worklist
.empty())
764 return ModRefInfo::ModRef
;
769 static bool isIntrinsicCall(const CallBase
*Call
, Intrinsic::ID IID
) {
770 const IntrinsicInst
*II
= dyn_cast
<IntrinsicInst
>(Call
);
771 return II
&& II
->getIntrinsicID() == IID
;
774 /// Returns the behavior when calling the given call site.
775 MemoryEffects
BasicAAResult::getMemoryEffects(const CallBase
*Call
,
777 MemoryEffects Min
= Call
->getAttributes().getMemoryEffects();
779 if (const Function
*F
= dyn_cast
<Function
>(Call
->getCalledOperand())) {
780 MemoryEffects FuncME
= AAQI
.AAR
.getMemoryEffects(F
);
781 // Operand bundles on the call may also read or write memory, in addition
782 // to the behavior of the called function.
783 if (Call
->hasReadingOperandBundles())
784 FuncME
|= MemoryEffects::readOnly();
785 if (Call
->hasClobberingOperandBundles())
786 FuncME
|= MemoryEffects::writeOnly();
793 /// Returns the behavior when calling the given function. For use when the call
794 /// site is not known.
795 MemoryEffects
BasicAAResult::getMemoryEffects(const Function
*F
) {
796 switch (F
->getIntrinsicID()) {
797 case Intrinsic::experimental_guard
:
798 case Intrinsic::experimental_deoptimize
:
799 // These intrinsics can read arbitrary memory, and additionally modref
800 // inaccessible memory to model control dependence.
801 return MemoryEffects::readOnly() |
802 MemoryEffects::inaccessibleMemOnly(ModRefInfo::ModRef
);
805 return F
->getMemoryEffects();
808 ModRefInfo
BasicAAResult::getArgModRefInfo(const CallBase
*Call
,
810 if (Call
->paramHasAttr(ArgIdx
, Attribute::WriteOnly
))
811 return ModRefInfo::Mod
;
813 if (Call
->paramHasAttr(ArgIdx
, Attribute::ReadOnly
))
814 return ModRefInfo::Ref
;
816 if (Call
->paramHasAttr(ArgIdx
, Attribute::ReadNone
))
817 return ModRefInfo::NoModRef
;
819 return ModRefInfo::ModRef
;
823 static const Function
*getParent(const Value
*V
) {
824 if (const Instruction
*inst
= dyn_cast
<Instruction
>(V
)) {
825 if (!inst
->getParent())
827 return inst
->getParent()->getParent();
830 if (const Argument
*arg
= dyn_cast
<Argument
>(V
))
831 return arg
->getParent();
836 static bool notDifferentParent(const Value
*O1
, const Value
*O2
) {
838 const Function
*F1
= getParent(O1
);
839 const Function
*F2
= getParent(O2
);
841 return !F1
|| !F2
|| F1
== F2
;
845 AliasResult
BasicAAResult::alias(const MemoryLocation
&LocA
,
846 const MemoryLocation
&LocB
, AAQueryInfo
&AAQI
,
847 const Instruction
*CtxI
) {
848 assert(notDifferentParent(LocA
.Ptr
, LocB
.Ptr
) &&
849 "BasicAliasAnalysis doesn't support interprocedural queries.");
850 return aliasCheck(LocA
.Ptr
, LocA
.Size
, LocB
.Ptr
, LocB
.Size
, AAQI
, CtxI
);
853 /// Checks to see if the specified callsite can clobber the specified memory
856 /// Since we only look at local properties of this function, we really can't
857 /// say much about this query. We do, however, use simple "address taken"
858 /// analysis on local objects.
859 ModRefInfo
BasicAAResult::getModRefInfo(const CallBase
*Call
,
860 const MemoryLocation
&Loc
,
862 assert(notDifferentParent(Call
, Loc
.Ptr
) &&
863 "AliasAnalysis query involving multiple functions!");
865 const Value
*Object
= getUnderlyingObject(Loc
.Ptr
);
867 // Calls marked 'tail' cannot read or write allocas from the current frame
868 // because the current frame might be destroyed by the time they run. However,
869 // a tail call may use an alloca with byval. Calling with byval copies the
870 // contents of the alloca into argument registers or stack slots, so there is
871 // no lifetime issue.
872 if (isa
<AllocaInst
>(Object
))
873 if (const CallInst
*CI
= dyn_cast
<CallInst
>(Call
))
874 if (CI
->isTailCall() &&
875 !CI
->getAttributes().hasAttrSomewhere(Attribute::ByVal
))
876 return ModRefInfo::NoModRef
;
878 // Stack restore is able to modify unescaped dynamic allocas. Assume it may
879 // modify them even though the alloca is not escaped.
880 if (auto *AI
= dyn_cast
<AllocaInst
>(Object
))
881 if (!AI
->isStaticAlloca() && isIntrinsicCall(Call
, Intrinsic::stackrestore
))
882 return ModRefInfo::Mod
;
884 // A call can access a locally allocated object either because it is passed as
885 // an argument to the call, or because it has escaped prior to the call.
887 // Make sure the object has not escaped here, and then check that none of the
888 // call arguments alias the object below.
889 if (!isa
<Constant
>(Object
) && Call
!= Object
&&
890 AAQI
.CI
->isNotCapturedBeforeOrAt(Object
, Call
)) {
892 // Optimistically assume that call doesn't touch Object and check this
893 // assumption in the following loop.
894 ModRefInfo Result
= ModRefInfo::NoModRef
;
896 unsigned OperandNo
= 0;
897 for (auto CI
= Call
->data_operands_begin(), CE
= Call
->data_operands_end();
898 CI
!= CE
; ++CI
, ++OperandNo
) {
899 if (!(*CI
)->getType()->isPointerTy())
902 // Call doesn't access memory through this operand, so we don't care
903 // if it aliases with Object.
904 if (Call
->doesNotAccessMemory(OperandNo
))
907 // If this is a no-capture pointer argument, see if we can tell that it
908 // is impossible to alias the pointer we're checking.
910 AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(*CI
),
911 MemoryLocation::getBeforeOrAfter(Object
), AAQI
);
912 // Operand doesn't alias 'Object', continue looking for other aliases
913 if (AR
== AliasResult::NoAlias
)
915 // Operand aliases 'Object', but call doesn't modify it. Strengthen
916 // initial assumption and keep looking in case if there are more aliases.
917 if (Call
->onlyReadsMemory(OperandNo
)) {
918 Result
|= ModRefInfo::Ref
;
921 // Operand aliases 'Object' but call only writes into it.
922 if (Call
->onlyWritesMemory(OperandNo
)) {
923 Result
|= ModRefInfo::Mod
;
926 // This operand aliases 'Object' and call reads and writes into it.
927 // Setting ModRef will not yield an early return below, MustAlias is not
929 Result
= ModRefInfo::ModRef
;
933 // Early return if we improved mod ref information
934 if (!isModAndRefSet(Result
))
938 // If the call is malloc/calloc like, we can assume that it doesn't
939 // modify any IR visible value. This is only valid because we assume these
940 // routines do not read values visible in the IR. TODO: Consider special
941 // casing realloc and strdup routines which access only their arguments as
942 // well. Or alternatively, replace all of this with inaccessiblememonly once
943 // that's implemented fully.
944 if (isMallocOrCallocLikeFn(Call
, &TLI
)) {
945 // Be conservative if the accessed pointer may alias the allocation -
946 // fallback to the generic handling below.
947 if (AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(Call
), Loc
, AAQI
) ==
948 AliasResult::NoAlias
)
949 return ModRefInfo::NoModRef
;
952 // Like assumes, invariant.start intrinsics were also marked as arbitrarily
953 // writing so that proper control dependencies are maintained but they never
954 // mod any particular memory location visible to the IR.
955 // *Unlike* assumes (which are now modeled as NoModRef), invariant.start
956 // intrinsic is now modeled as reading memory. This prevents hoisting the
957 // invariant.start intrinsic over stores. Consider:
960 // invariant_start(ptr)
964 // This cannot be transformed to:
967 // invariant_start(ptr)
972 // The transformation will cause the second store to be ignored (based on
973 // rules of invariant.start) and print 40, while the first program always
975 if (isIntrinsicCall(Call
, Intrinsic::invariant_start
))
976 return ModRefInfo::Ref
;
979 return ModRefInfo::ModRef
;
982 ModRefInfo
BasicAAResult::getModRefInfo(const CallBase
*Call1
,
983 const CallBase
*Call2
,
985 // Guard intrinsics are marked as arbitrarily writing so that proper control
986 // dependencies are maintained but they never mods any particular memory
989 // *Unlike* assumes, guard intrinsics are modeled as reading memory since the
990 // heap state at the point the guard is issued needs to be consistent in case
991 // the guard invokes the "deopt" continuation.
993 // NB! This function is *not* commutative, so we special case two
994 // possibilities for guard intrinsics.
996 if (isIntrinsicCall(Call1
, Intrinsic::experimental_guard
))
997 return isModSet(getMemoryEffects(Call2
, AAQI
).getModRef())
999 : ModRefInfo::NoModRef
;
1001 if (isIntrinsicCall(Call2
, Intrinsic::experimental_guard
))
1002 return isModSet(getMemoryEffects(Call1
, AAQI
).getModRef())
1004 : ModRefInfo::NoModRef
;
1007 return ModRefInfo::ModRef
;
1010 /// Return true if we know V to the base address of the corresponding memory
1011 /// object. This implies that any address less than V must be out of bounds
1012 /// for the underlying object. Note that just being isIdentifiedObject() is
1013 /// not enough - For example, a negative offset from a noalias argument or call
1014 /// can be inbounds w.r.t the actual underlying object.
1015 static bool isBaseOfObject(const Value
*V
) {
1016 // TODO: We can handle other cases here
1017 // 1) For GC languages, arguments to functions are often required to be
1019 // 2) Result of allocation routines are often base pointers. Leverage TLI.
1020 return (isa
<AllocaInst
>(V
) || isa
<GlobalVariable
>(V
));
1023 /// Provides a bunch of ad-hoc rules to disambiguate a GEP instruction against
1024 /// another pointer.
1026 /// We know that V1 is a GEP, but we don't know anything about V2.
1027 /// UnderlyingV1 is getUnderlyingObject(GEP1), UnderlyingV2 is the same for
1029 AliasResult
BasicAAResult::aliasGEP(
1030 const GEPOperator
*GEP1
, LocationSize V1Size
,
1031 const Value
*V2
, LocationSize V2Size
,
1032 const Value
*UnderlyingV1
, const Value
*UnderlyingV2
, AAQueryInfo
&AAQI
) {
1033 if (!V1Size
.hasValue() && !V2Size
.hasValue()) {
1034 // TODO: This limitation exists for compile-time reasons. Relax it if we
1035 // can avoid exponential pathological cases.
1036 if (!isa
<GEPOperator
>(V2
))
1037 return AliasResult::MayAlias
;
1039 // If both accesses have unknown size, we can only check whether the base
1040 // objects don't alias.
1041 AliasResult BaseAlias
=
1042 AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(UnderlyingV1
),
1043 MemoryLocation::getBeforeOrAfter(UnderlyingV2
), AAQI
);
1044 return BaseAlias
== AliasResult::NoAlias
? AliasResult::NoAlias
1045 : AliasResult::MayAlias
;
1048 DecomposedGEP DecompGEP1
= DecomposeGEPExpression(GEP1
, DL
, &AC
, DT
);
1049 DecomposedGEP DecompGEP2
= DecomposeGEPExpression(V2
, DL
, &AC
, DT
);
1051 // Bail if we were not able to decompose anything.
1052 if (DecompGEP1
.Base
== GEP1
&& DecompGEP2
.Base
== V2
)
1053 return AliasResult::MayAlias
;
1055 // Subtract the GEP2 pointer from the GEP1 pointer to find out their
1056 // symbolic difference.
1057 subtractDecomposedGEPs(DecompGEP1
, DecompGEP2
, AAQI
);
1059 // If an inbounds GEP would have to start from an out of bounds address
1060 // for the two to alias, then we can assume noalias.
1061 // TODO: Remove !isScalable() once BasicAA fully support scalable location
1063 if (*DecompGEP1
.InBounds
&& DecompGEP1
.VarIndices
.empty() &&
1064 V2Size
.hasValue() && !V2Size
.isScalable() &&
1065 DecompGEP1
.Offset
.sge(V2Size
.getValue()) &&
1066 isBaseOfObject(DecompGEP2
.Base
))
1067 return AliasResult::NoAlias
;
1069 if (isa
<GEPOperator
>(V2
)) {
1070 // Symmetric case to above.
1071 if (*DecompGEP2
.InBounds
&& DecompGEP1
.VarIndices
.empty() &&
1072 V1Size
.hasValue() && !V1Size
.isScalable() &&
1073 DecompGEP1
.Offset
.sle(-V1Size
.getValue()) &&
1074 isBaseOfObject(DecompGEP1
.Base
))
1075 return AliasResult::NoAlias
;
1078 // For GEPs with identical offsets, we can preserve the size and AAInfo
1079 // when performing the alias check on the underlying objects.
1080 if (DecompGEP1
.Offset
== 0 && DecompGEP1
.VarIndices
.empty())
1081 return AAQI
.AAR
.alias(MemoryLocation(DecompGEP1
.Base
, V1Size
),
1082 MemoryLocation(DecompGEP2
.Base
, V2Size
), AAQI
);
1084 // Do the base pointers alias?
1085 AliasResult BaseAlias
=
1086 AAQI
.AAR
.alias(MemoryLocation::getBeforeOrAfter(DecompGEP1
.Base
),
1087 MemoryLocation::getBeforeOrAfter(DecompGEP2
.Base
), AAQI
);
1089 // If we get a No or May, then return it immediately, no amount of analysis
1090 // will improve this situation.
1091 if (BaseAlias
!= AliasResult::MustAlias
) {
1092 assert(BaseAlias
== AliasResult::NoAlias
||
1093 BaseAlias
== AliasResult::MayAlias
);
1097 // Bail on analysing scalable LocationSize
1098 if (V1Size
.isScalable() || V2Size
.isScalable())
1099 return AliasResult::MayAlias
;
1101 // If there is a constant difference between the pointers, but the difference
1102 // is less than the size of the associated memory object, then we know
1103 // that the objects are partially overlapping. If the difference is
1104 // greater, we know they do not overlap.
1105 if (DecompGEP1
.VarIndices
.empty()) {
1106 APInt
&Off
= DecompGEP1
.Offset
;
1108 // Initialize for Off >= 0 (V2 <= GEP1) case.
1109 const Value
*LeftPtr
= V2
;
1110 const Value
*RightPtr
= GEP1
;
1111 LocationSize VLeftSize
= V2Size
;
1112 LocationSize VRightSize
= V1Size
;
1113 const bool Swapped
= Off
.isNegative();
1116 // Swap if we have the situation where:
1119 // ---------------->|
1120 // |-->V1Size |-------> V2Size
1122 std::swap(LeftPtr
, RightPtr
);
1123 std::swap(VLeftSize
, VRightSize
);
1127 if (!VLeftSize
.hasValue())
1128 return AliasResult::MayAlias
;
1130 const uint64_t LSize
= VLeftSize
.getValue();
1131 if (Off
.ult(LSize
)) {
1132 // Conservatively drop processing if a phi was visited and/or offset is
1134 AliasResult AR
= AliasResult::PartialAlias
;
1135 if (VRightSize
.hasValue() && Off
.ule(INT32_MAX
) &&
1136 (Off
+ VRightSize
.getValue()).ule(LSize
)) {
1137 // Memory referenced by right pointer is nested. Save the offset in
1138 // cache. Note that originally offset estimated as GEP1-V2, but
1139 // AliasResult contains the shift that represents GEP1+Offset=V2.
1140 AR
.setOffset(-Off
.getSExtValue());
1145 return AliasResult::NoAlias
;
1148 // We need to know both acess sizes for all the following heuristics.
1149 if (!V1Size
.hasValue() || !V2Size
.hasValue())
1150 return AliasResult::MayAlias
;
1153 ConstantRange OffsetRange
= ConstantRange(DecompGEP1
.Offset
);
1154 for (unsigned i
= 0, e
= DecompGEP1
.VarIndices
.size(); i
!= e
; ++i
) {
1155 const VariableGEPIndex
&Index
= DecompGEP1
.VarIndices
[i
];
1156 const APInt
&Scale
= Index
.Scale
;
1157 APInt ScaleForGCD
= Scale
;
1160 APInt::getOneBitSet(Scale
.getBitWidth(), Scale
.countr_zero());
1163 GCD
= ScaleForGCD
.abs();
1165 GCD
= APIntOps::GreatestCommonDivisor(GCD
, ScaleForGCD
.abs());
1167 ConstantRange CR
= computeConstantRange(Index
.Val
.V
, /* ForSigned */ false,
1168 true, &AC
, Index
.CxtI
);
1170 computeKnownBits(Index
.Val
.V
, DL
, 0, &AC
, Index
.CxtI
, DT
);
1171 CR
= CR
.intersectWith(
1172 ConstantRange::fromKnownBits(Known
, /* Signed */ true),
1173 ConstantRange::Signed
);
1174 CR
= Index
.Val
.evaluateWith(CR
).sextOrTrunc(OffsetRange
.getBitWidth());
1176 assert(OffsetRange
.getBitWidth() == Scale
.getBitWidth() &&
1177 "Bit widths are normalized to MaxIndexSize");
1179 CR
= CR
.smul_sat(ConstantRange(Scale
));
1181 CR
= CR
.smul_fast(ConstantRange(Scale
));
1183 if (Index
.IsNegated
)
1184 OffsetRange
= OffsetRange
.sub(CR
);
1186 OffsetRange
= OffsetRange
.add(CR
);
1189 // We now have accesses at two offsets from the same base:
1190 // 1. (...)*GCD + DecompGEP1.Offset with size V1Size
1191 // 2. 0 with size V2Size
1192 // Using arithmetic modulo GCD, the accesses are at
1193 // [ModOffset..ModOffset+V1Size) and [0..V2Size). If the first access fits
1194 // into the range [V2Size..GCD), then we know they cannot overlap.
1195 APInt ModOffset
= DecompGEP1
.Offset
.srem(GCD
);
1196 if (ModOffset
.isNegative())
1197 ModOffset
+= GCD
; // We want mod, not rem.
1198 if (ModOffset
.uge(V2Size
.getValue()) &&
1199 (GCD
- ModOffset
).uge(V1Size
.getValue()))
1200 return AliasResult::NoAlias
;
1202 // Compute ranges of potentially accessed bytes for both accesses. If the
1203 // interseciton is empty, there can be no overlap.
1204 unsigned BW
= OffsetRange
.getBitWidth();
1205 ConstantRange Range1
= OffsetRange
.add(
1206 ConstantRange(APInt(BW
, 0), APInt(BW
, V1Size
.getValue())));
1207 ConstantRange Range2
=
1208 ConstantRange(APInt(BW
, 0), APInt(BW
, V2Size
.getValue()));
1209 if (Range1
.intersectWith(Range2
).isEmptySet())
1210 return AliasResult::NoAlias
;
1212 // Try to determine the range of values for VarIndex such that
1213 // VarIndex <= -MinAbsVarIndex || MinAbsVarIndex <= VarIndex.
1214 std::optional
<APInt
> MinAbsVarIndex
;
1215 if (DecompGEP1
.VarIndices
.size() == 1) {
1216 // VarIndex = Scale*V.
1217 const VariableGEPIndex
&Var
= DecompGEP1
.VarIndices
[0];
1218 if (Var
.Val
.TruncBits
== 0 &&
1219 isKnownNonZero(Var
.Val
.V
, DL
, 0, &AC
, Var
.CxtI
, DT
)) {
1220 // If V != 0, then abs(VarIndex) > 0.
1221 MinAbsVarIndex
= APInt(Var
.Scale
.getBitWidth(), 1);
1223 // Check if abs(V*Scale) >= abs(Scale) holds in the presence of
1224 // potentially wrapping math.
1225 auto MultiplyByScaleNoWrap
= [](const VariableGEPIndex
&Var
) {
1229 int ValOrigBW
= Var
.Val
.V
->getType()->getPrimitiveSizeInBits();
1230 // If Scale is small enough so that abs(V*Scale) >= abs(Scale) holds.
1231 // The max value of abs(V) is 2^ValOrigBW - 1. Multiplying with a
1232 // constant smaller than 2^(bitwidth(Val) - ValOrigBW) won't wrap.
1233 int MaxScaleValueBW
= Var
.Val
.getBitWidth() - ValOrigBW
;
1234 if (MaxScaleValueBW
<= 0)
1236 return Var
.Scale
.ule(
1237 APInt::getMaxValue(MaxScaleValueBW
).zext(Var
.Scale
.getBitWidth()));
1239 // Refine MinAbsVarIndex, if abs(Scale*V) >= abs(Scale) holds in the
1240 // presence of potentially wrapping math.
1241 if (MultiplyByScaleNoWrap(Var
)) {
1242 // If V != 0 then abs(VarIndex) >= abs(Scale).
1243 MinAbsVarIndex
= Var
.Scale
.abs();
1246 } else if (DecompGEP1
.VarIndices
.size() == 2) {
1247 // VarIndex = Scale*V0 + (-Scale)*V1.
1248 // If V0 != V1 then abs(VarIndex) >= abs(Scale).
1249 // Check that MayBeCrossIteration is false, to avoid reasoning about
1250 // inequality of values across loop iterations.
1251 const VariableGEPIndex
&Var0
= DecompGEP1
.VarIndices
[0];
1252 const VariableGEPIndex
&Var1
= DecompGEP1
.VarIndices
[1];
1253 if (Var0
.hasNegatedScaleOf(Var1
) && Var0
.Val
.TruncBits
== 0 &&
1254 Var0
.Val
.hasSameCastsAs(Var1
.Val
) && !AAQI
.MayBeCrossIteration
&&
1255 isKnownNonEqual(Var0
.Val
.V
, Var1
.Val
.V
, DL
, &AC
, /* CxtI */ nullptr,
1257 MinAbsVarIndex
= Var0
.Scale
.abs();
1260 if (MinAbsVarIndex
) {
1261 // The constant offset will have added at least +/-MinAbsVarIndex to it.
1262 APInt OffsetLo
= DecompGEP1
.Offset
- *MinAbsVarIndex
;
1263 APInt OffsetHi
= DecompGEP1
.Offset
+ *MinAbsVarIndex
;
1264 // We know that Offset <= OffsetLo || Offset >= OffsetHi
1265 if (OffsetLo
.isNegative() && (-OffsetLo
).uge(V1Size
.getValue()) &&
1266 OffsetHi
.isNonNegative() && OffsetHi
.uge(V2Size
.getValue()))
1267 return AliasResult::NoAlias
;
1270 if (constantOffsetHeuristic(DecompGEP1
, V1Size
, V2Size
, &AC
, DT
, AAQI
))
1271 return AliasResult::NoAlias
;
1273 // Statically, we can see that the base objects are the same, but the
1274 // pointers have dynamic offsets which we can't resolve. And none of our
1275 // little tricks above worked.
1276 return AliasResult::MayAlias
;
1279 static AliasResult
MergeAliasResults(AliasResult A
, AliasResult B
) {
1280 // If the results agree, take it.
1283 // A mix of PartialAlias and MustAlias is PartialAlias.
1284 if ((A
== AliasResult::PartialAlias
&& B
== AliasResult::MustAlias
) ||
1285 (B
== AliasResult::PartialAlias
&& A
== AliasResult::MustAlias
))
1286 return AliasResult::PartialAlias
;
1287 // Otherwise, we don't know anything.
1288 return AliasResult::MayAlias
;
1291 /// Provides a bunch of ad-hoc rules to disambiguate a Select instruction
1292 /// against another.
1294 BasicAAResult::aliasSelect(const SelectInst
*SI
, LocationSize SISize
,
1295 const Value
*V2
, LocationSize V2Size
,
1296 AAQueryInfo
&AAQI
) {
1297 // If the values are Selects with the same condition, we can do a more precise
1298 // check: just check for aliases between the values on corresponding arms.
1299 if (const SelectInst
*SI2
= dyn_cast
<SelectInst
>(V2
))
1300 if (isValueEqualInPotentialCycles(SI
->getCondition(), SI2
->getCondition(),
1303 AAQI
.AAR
.alias(MemoryLocation(SI
->getTrueValue(), SISize
),
1304 MemoryLocation(SI2
->getTrueValue(), V2Size
), AAQI
);
1305 if (Alias
== AliasResult::MayAlias
)
1306 return AliasResult::MayAlias
;
1307 AliasResult ThisAlias
=
1308 AAQI
.AAR
.alias(MemoryLocation(SI
->getFalseValue(), SISize
),
1309 MemoryLocation(SI2
->getFalseValue(), V2Size
), AAQI
);
1310 return MergeAliasResults(ThisAlias
, Alias
);
1313 // If both arms of the Select node NoAlias or MustAlias V2, then returns
1314 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1315 AliasResult Alias
= AAQI
.AAR
.alias(MemoryLocation(SI
->getTrueValue(), SISize
),
1316 MemoryLocation(V2
, V2Size
), AAQI
);
1317 if (Alias
== AliasResult::MayAlias
)
1318 return AliasResult::MayAlias
;
1320 AliasResult ThisAlias
=
1321 AAQI
.AAR
.alias(MemoryLocation(SI
->getFalseValue(), SISize
),
1322 MemoryLocation(V2
, V2Size
), AAQI
);
1323 return MergeAliasResults(ThisAlias
, Alias
);
1326 /// Provide a bunch of ad-hoc rules to disambiguate a PHI instruction against
1328 AliasResult
BasicAAResult::aliasPHI(const PHINode
*PN
, LocationSize PNSize
,
1329 const Value
*V2
, LocationSize V2Size
,
1330 AAQueryInfo
&AAQI
) {
1331 if (!PN
->getNumIncomingValues())
1332 return AliasResult::NoAlias
;
1333 // If the values are PHIs in the same block, we can do a more precise
1334 // as well as efficient check: just check for aliases between the values
1335 // on corresponding edges.
1336 if (const PHINode
*PN2
= dyn_cast
<PHINode
>(V2
))
1337 if (PN2
->getParent() == PN
->getParent()) {
1338 std::optional
<AliasResult
> Alias
;
1339 for (unsigned i
= 0, e
= PN
->getNumIncomingValues(); i
!= e
; ++i
) {
1340 AliasResult ThisAlias
= AAQI
.AAR
.alias(
1341 MemoryLocation(PN
->getIncomingValue(i
), PNSize
),
1343 PN2
->getIncomingValueForBlock(PN
->getIncomingBlock(i
)), V2Size
),
1346 *Alias
= MergeAliasResults(*Alias
, ThisAlias
);
1349 if (*Alias
== AliasResult::MayAlias
)
1355 SmallVector
<Value
*, 4> V1Srcs
;
1356 // If a phi operand recurses back to the phi, we can still determine NoAlias
1357 // if we don't alias the underlying objects of the other phi operands, as we
1358 // know that the recursive phi needs to be based on them in some way.
1359 bool isRecursive
= false;
1360 auto CheckForRecPhi
= [&](Value
*PV
) {
1361 if (!EnableRecPhiAnalysis
)
1363 if (getUnderlyingObject(PV
) == PN
) {
1370 SmallPtrSet
<Value
*, 4> UniqueSrc
;
1371 Value
*OnePhi
= nullptr;
1372 for (Value
*PV1
: PN
->incoming_values()) {
1373 // Skip the phi itself being the incoming value.
1377 if (isa
<PHINode
>(PV1
)) {
1378 if (OnePhi
&& OnePhi
!= PV1
) {
1379 // To control potential compile time explosion, we choose to be
1380 // conserviate when we have more than one Phi input. It is important
1381 // that we handle the single phi case as that lets us handle LCSSA
1382 // phi nodes and (combined with the recursive phi handling) simple
1383 // pointer induction variable patterns.
1384 return AliasResult::MayAlias
;
1389 if (CheckForRecPhi(PV1
))
1392 if (UniqueSrc
.insert(PV1
).second
)
1393 V1Srcs
.push_back(PV1
);
1396 if (OnePhi
&& UniqueSrc
.size() > 1)
1397 // Out of an abundance of caution, allow only the trivial lcssa and
1398 // recursive phi cases.
1399 return AliasResult::MayAlias
;
1401 // If V1Srcs is empty then that means that the phi has no underlying non-phi
1402 // value. This should only be possible in blocks unreachable from the entry
1403 // block, but return MayAlias just in case.
1405 return AliasResult::MayAlias
;
1407 // If this PHI node is recursive, indicate that the pointer may be moved
1408 // across iterations. We can only prove NoAlias if different underlying
1409 // objects are involved.
1411 PNSize
= LocationSize::beforeOrAfterPointer();
1413 // In the recursive alias queries below, we may compare values from two
1414 // different loop iterations.
1415 SaveAndRestore
SavedMayBeCrossIteration(AAQI
.MayBeCrossIteration
, true);
1417 AliasResult Alias
= AAQI
.AAR
.alias(MemoryLocation(V1Srcs
[0], PNSize
),
1418 MemoryLocation(V2
, V2Size
), AAQI
);
1420 // Early exit if the check of the first PHI source against V2 is MayAlias.
1421 // Other results are not possible.
1422 if (Alias
== AliasResult::MayAlias
)
1423 return AliasResult::MayAlias
;
1424 // With recursive phis we cannot guarantee that MustAlias/PartialAlias will
1425 // remain valid to all elements and needs to conservatively return MayAlias.
1426 if (isRecursive
&& Alias
!= AliasResult::NoAlias
)
1427 return AliasResult::MayAlias
;
1429 // If all sources of the PHI node NoAlias or MustAlias V2, then returns
1430 // NoAlias / MustAlias. Otherwise, returns MayAlias.
1431 for (unsigned i
= 1, e
= V1Srcs
.size(); i
!= e
; ++i
) {
1432 Value
*V
= V1Srcs
[i
];
1434 AliasResult ThisAlias
= AAQI
.AAR
.alias(
1435 MemoryLocation(V
, PNSize
), MemoryLocation(V2
, V2Size
), AAQI
);
1436 Alias
= MergeAliasResults(ThisAlias
, Alias
);
1437 if (Alias
== AliasResult::MayAlias
)
1444 /// Provides a bunch of ad-hoc rules to disambiguate in common cases, such as
1445 /// array references.
1446 AliasResult
BasicAAResult::aliasCheck(const Value
*V1
, LocationSize V1Size
,
1447 const Value
*V2
, LocationSize V2Size
,
1449 const Instruction
*CtxI
) {
1450 // If either of the memory references is empty, it doesn't matter what the
1451 // pointer values are.
1452 if (V1Size
.isZero() || V2Size
.isZero())
1453 return AliasResult::NoAlias
;
1455 // Strip off any casts if they exist.
1456 V1
= V1
->stripPointerCastsForAliasAnalysis();
1457 V2
= V2
->stripPointerCastsForAliasAnalysis();
1459 // If V1 or V2 is undef, the result is NoAlias because we can always pick a
1460 // value for undef that aliases nothing in the program.
1461 if (isa
<UndefValue
>(V1
) || isa
<UndefValue
>(V2
))
1462 return AliasResult::NoAlias
;
1464 // Are we checking for alias of the same value?
1465 // Because we look 'through' phi nodes, we could look at "Value" pointers from
1466 // different iterations. We must therefore make sure that this is not the
1467 // case. The function isValueEqualInPotentialCycles ensures that this cannot
1468 // happen by looking at the visited phi nodes and making sure they cannot
1470 if (isValueEqualInPotentialCycles(V1
, V2
, AAQI
))
1471 return AliasResult::MustAlias
;
1473 if (!V1
->getType()->isPointerTy() || !V2
->getType()->isPointerTy())
1474 return AliasResult::NoAlias
; // Scalars cannot alias each other
1476 // Figure out what objects these things are pointing to if we can.
1477 const Value
*O1
= getUnderlyingObject(V1
, MaxLookupSearchDepth
);
1478 const Value
*O2
= getUnderlyingObject(V2
, MaxLookupSearchDepth
);
1480 // Null values in the default address space don't point to any object, so they
1481 // don't alias any other pointer.
1482 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O1
))
1483 if (!NullPointerIsDefined(&F
, CPN
->getType()->getAddressSpace()))
1484 return AliasResult::NoAlias
;
1485 if (const ConstantPointerNull
*CPN
= dyn_cast
<ConstantPointerNull
>(O2
))
1486 if (!NullPointerIsDefined(&F
, CPN
->getType()->getAddressSpace()))
1487 return AliasResult::NoAlias
;
1490 // If V1/V2 point to two different objects, we know that we have no alias.
1491 if (isIdentifiedObject(O1
) && isIdentifiedObject(O2
))
1492 return AliasResult::NoAlias
;
1494 // Constant pointers can't alias with non-const isIdentifiedObject objects.
1495 if ((isa
<Constant
>(O1
) && isIdentifiedObject(O2
) && !isa
<Constant
>(O2
)) ||
1496 (isa
<Constant
>(O2
) && isIdentifiedObject(O1
) && !isa
<Constant
>(O1
)))
1497 return AliasResult::NoAlias
;
1499 // Function arguments can't alias with things that are known to be
1500 // unambigously identified at the function level.
1501 if ((isa
<Argument
>(O1
) && isIdentifiedFunctionLocal(O2
)) ||
1502 (isa
<Argument
>(O2
) && isIdentifiedFunctionLocal(O1
)))
1503 return AliasResult::NoAlias
;
1505 // If one pointer is the result of a call/invoke or load and the other is a
1506 // non-escaping local object within the same function, then we know the
1507 // object couldn't escape to a point where the call could return it.
1509 // Note that if the pointers are in different functions, there are a
1510 // variety of complications. A call with a nocapture argument may still
1511 // temporary store the nocapture argument's value in a temporary memory
1512 // location if that memory location doesn't escape. Or it may pass a
1513 // nocapture value to other functions as long as they don't capture it.
1514 if (isEscapeSource(O1
) &&
1515 AAQI
.CI
->isNotCapturedBeforeOrAt(O2
, cast
<Instruction
>(O1
)))
1516 return AliasResult::NoAlias
;
1517 if (isEscapeSource(O2
) &&
1518 AAQI
.CI
->isNotCapturedBeforeOrAt(O1
, cast
<Instruction
>(O2
)))
1519 return AliasResult::NoAlias
;
1522 // If the size of one access is larger than the entire object on the other
1523 // side, then we know such behavior is undefined and can assume no alias.
1524 bool NullIsValidLocation
= NullPointerIsDefined(&F
);
1525 if ((isObjectSmallerThan(
1526 O2
, getMinimalExtentFrom(*V1
, V1Size
, DL
, NullIsValidLocation
), DL
,
1527 TLI
, NullIsValidLocation
)) ||
1528 (isObjectSmallerThan(
1529 O1
, getMinimalExtentFrom(*V2
, V2Size
, DL
, NullIsValidLocation
), DL
,
1530 TLI
, NullIsValidLocation
)))
1531 return AliasResult::NoAlias
;
1533 if (CtxI
&& EnableSeparateStorageAnalysis
) {
1534 for (auto &AssumeVH
: AC
.assumptions()) {
1538 AssumeInst
*Assume
= cast
<AssumeInst
>(AssumeVH
);
1540 for (unsigned Idx
= 0; Idx
< Assume
->getNumOperandBundles(); Idx
++) {
1541 OperandBundleUse OBU
= Assume
->getOperandBundleAt(Idx
);
1542 if (OBU
.getTagName() == "separate_storage") {
1543 assert(OBU
.Inputs
.size() == 2);
1544 const Value
*Hint1
= OBU
.Inputs
[0].get();
1545 const Value
*Hint2
= OBU
.Inputs
[1].get();
1546 // This is often a no-op; instcombine rewrites this for us. No-op
1547 // getUnderlyingObject calls are fast, though.
1548 const Value
*HintO1
= getUnderlyingObject(Hint1
);
1549 const Value
*HintO2
= getUnderlyingObject(Hint2
);
1551 if (((O1
== HintO1
&& O2
== HintO2
) ||
1552 (O1
== HintO2
&& O2
== HintO1
)) &&
1553 isValidAssumeForContext(Assume
, CtxI
, DT
))
1554 return AliasResult::NoAlias
;
1560 // If one the accesses may be before the accessed pointer, canonicalize this
1561 // by using unknown after-pointer sizes for both accesses. This is
1562 // equivalent, because regardless of which pointer is lower, one of them
1563 // will always came after the other, as long as the underlying objects aren't
1564 // disjoint. We do this so that the rest of BasicAA does not have to deal
1565 // with accesses before the base pointer, and to improve cache utilization by
1566 // merging equivalent states.
1567 if (V1Size
.mayBeBeforePointer() || V2Size
.mayBeBeforePointer()) {
1568 V1Size
= LocationSize::afterPointer();
1569 V2Size
= LocationSize::afterPointer();
1572 // FIXME: If this depth limit is hit, then we may cache sub-optimal results
1573 // for recursive queries. For this reason, this limit is chosen to be large
1574 // enough to be very rarely hit, while still being small enough to avoid
1576 if (AAQI
.Depth
>= 512)
1577 return AliasResult::MayAlias
;
1579 // Check the cache before climbing up use-def chains. This also terminates
1580 // otherwise infinitely recursive queries. Include MayBeCrossIteration in the
1581 // cache key, because some cases where MayBeCrossIteration==false returns
1582 // MustAlias or NoAlias may become MayAlias under MayBeCrossIteration==true.
1583 AAQueryInfo::LocPair
Locs({V1
, V1Size
, AAQI
.MayBeCrossIteration
},
1584 {V2
, V2Size
, AAQI
.MayBeCrossIteration
});
1585 const bool Swapped
= V1
> V2
;
1587 std::swap(Locs
.first
, Locs
.second
);
1588 const auto &Pair
= AAQI
.AliasCache
.try_emplace(
1589 Locs
, AAQueryInfo::CacheEntry
{AliasResult::NoAlias
, 0});
1591 auto &Entry
= Pair
.first
->second
;
1592 if (!Entry
.isDefinitive()) {
1593 // Remember that we used an assumption.
1594 ++Entry
.NumAssumptionUses
;
1595 ++AAQI
.NumAssumptionUses
;
1597 // Cache contains sorted {V1,V2} pairs but we should return original order.
1598 auto Result
= Entry
.Result
;
1599 Result
.swap(Swapped
);
1603 int OrigNumAssumptionUses
= AAQI
.NumAssumptionUses
;
1604 unsigned OrigNumAssumptionBasedResults
= AAQI
.AssumptionBasedResults
.size();
1605 AliasResult Result
=
1606 aliasCheckRecursive(V1
, V1Size
, V2
, V2Size
, AAQI
, O1
, O2
);
1608 auto It
= AAQI
.AliasCache
.find(Locs
);
1609 assert(It
!= AAQI
.AliasCache
.end() && "Must be in cache");
1610 auto &Entry
= It
->second
;
1612 // Check whether a NoAlias assumption has been used, but disproven.
1613 bool AssumptionDisproven
=
1614 Entry
.NumAssumptionUses
> 0 && Result
!= AliasResult::NoAlias
;
1615 if (AssumptionDisproven
)
1616 Result
= AliasResult::MayAlias
;
1618 // This is a definitive result now, when considered as a root query.
1619 AAQI
.NumAssumptionUses
-= Entry
.NumAssumptionUses
;
1620 Entry
.Result
= Result
;
1621 // Cache contains sorted {V1,V2} pairs.
1622 Entry
.Result
.swap(Swapped
);
1623 Entry
.NumAssumptionUses
= -1;
1625 // If the assumption has been disproven, remove any results that may have
1626 // been based on this assumption. Do this after the Entry updates above to
1627 // avoid iterator invalidation.
1628 if (AssumptionDisproven
)
1629 while (AAQI
.AssumptionBasedResults
.size() > OrigNumAssumptionBasedResults
)
1630 AAQI
.AliasCache
.erase(AAQI
.AssumptionBasedResults
.pop_back_val());
1632 // The result may still be based on assumptions higher up in the chain.
1633 // Remember it, so it can be purged from the cache later.
1634 if (OrigNumAssumptionUses
!= AAQI
.NumAssumptionUses
&&
1635 Result
!= AliasResult::MayAlias
)
1636 AAQI
.AssumptionBasedResults
.push_back(Locs
);
1640 AliasResult
BasicAAResult::aliasCheckRecursive(
1641 const Value
*V1
, LocationSize V1Size
,
1642 const Value
*V2
, LocationSize V2Size
,
1643 AAQueryInfo
&AAQI
, const Value
*O1
, const Value
*O2
) {
1644 if (const GEPOperator
*GV1
= dyn_cast
<GEPOperator
>(V1
)) {
1645 AliasResult Result
= aliasGEP(GV1
, V1Size
, V2
, V2Size
, O1
, O2
, AAQI
);
1646 if (Result
!= AliasResult::MayAlias
)
1648 } else if (const GEPOperator
*GV2
= dyn_cast
<GEPOperator
>(V2
)) {
1649 AliasResult Result
= aliasGEP(GV2
, V2Size
, V1
, V1Size
, O2
, O1
, AAQI
);
1651 if (Result
!= AliasResult::MayAlias
)
1655 if (const PHINode
*PN
= dyn_cast
<PHINode
>(V1
)) {
1656 AliasResult Result
= aliasPHI(PN
, V1Size
, V2
, V2Size
, AAQI
);
1657 if (Result
!= AliasResult::MayAlias
)
1659 } else if (const PHINode
*PN
= dyn_cast
<PHINode
>(V2
)) {
1660 AliasResult Result
= aliasPHI(PN
, V2Size
, V1
, V1Size
, AAQI
);
1662 if (Result
!= AliasResult::MayAlias
)
1666 if (const SelectInst
*S1
= dyn_cast
<SelectInst
>(V1
)) {
1667 AliasResult Result
= aliasSelect(S1
, V1Size
, V2
, V2Size
, AAQI
);
1668 if (Result
!= AliasResult::MayAlias
)
1670 } else if (const SelectInst
*S2
= dyn_cast
<SelectInst
>(V2
)) {
1671 AliasResult Result
= aliasSelect(S2
, V2Size
, V1
, V1Size
, AAQI
);
1673 if (Result
!= AliasResult::MayAlias
)
1677 // If both pointers are pointing into the same object and one of them
1678 // accesses the entire object, then the accesses must overlap in some way.
1680 bool NullIsValidLocation
= NullPointerIsDefined(&F
);
1681 if (V1Size
.isPrecise() && V2Size
.isPrecise() &&
1682 (isObjectSize(O1
, V1Size
.getValue(), DL
, TLI
, NullIsValidLocation
) ||
1683 isObjectSize(O2
, V2Size
.getValue(), DL
, TLI
, NullIsValidLocation
)))
1684 return AliasResult::PartialAlias
;
1687 return AliasResult::MayAlias
;
1690 /// Check whether two Values can be considered equivalent.
1692 /// If the values may come from different cycle iterations, this will also
1693 /// check that the values are not part of cycle. We have to do this because we
1694 /// are looking through phi nodes, that is we say
1695 /// noalias(V, phi(VA, VB)) if noalias(V, VA) and noalias(V, VB).
1696 bool BasicAAResult::isValueEqualInPotentialCycles(const Value
*V
,
1698 const AAQueryInfo
&AAQI
) {
1702 if (!AAQI
.MayBeCrossIteration
)
1705 // Non-instructions and instructions in the entry block cannot be part of
1707 const Instruction
*Inst
= dyn_cast
<Instruction
>(V
);
1708 if (!Inst
|| Inst
->getParent()->isEntryBlock())
1711 // Check whether the instruction is part of a cycle, by checking whether the
1712 // block can (non-trivially) reach itself.
1713 BasicBlock
*BB
= const_cast<BasicBlock
*>(Inst
->getParent());
1714 SmallVector
<BasicBlock
*> Succs(successors(BB
));
1715 return !Succs
.empty() &&
1716 !isPotentiallyReachableFromMany(Succs
, BB
, nullptr, DT
);
1719 /// Computes the symbolic difference between two de-composed GEPs.
1720 void BasicAAResult::subtractDecomposedGEPs(DecomposedGEP
&DestGEP
,
1721 const DecomposedGEP
&SrcGEP
,
1722 const AAQueryInfo
&AAQI
) {
1723 DestGEP
.Offset
-= SrcGEP
.Offset
;
1724 for (const VariableGEPIndex
&Src
: SrcGEP
.VarIndices
) {
1725 // Find V in Dest. This is N^2, but pointer indices almost never have more
1726 // than a few variable indexes.
1728 for (auto I
: enumerate(DestGEP
.VarIndices
)) {
1729 VariableGEPIndex
&Dest
= I
.value();
1730 if (!isValueEqualInPotentialCycles(Dest
.Val
.V
, Src
.Val
.V
, AAQI
) ||
1731 !Dest
.Val
.hasSameCastsAs(Src
.Val
))
1734 // Normalize IsNegated if we're going to lose the NSW flag anyway.
1735 if (Dest
.IsNegated
) {
1736 Dest
.Scale
= -Dest
.Scale
;
1737 Dest
.IsNegated
= false;
1741 // If we found it, subtract off Scale V's from the entry in Dest. If it
1742 // goes to zero, remove the entry.
1743 if (Dest
.Scale
!= Src
.Scale
) {
1744 Dest
.Scale
-= Src
.Scale
;
1747 DestGEP
.VarIndices
.erase(DestGEP
.VarIndices
.begin() + I
.index());
1753 // If we didn't consume this entry, add it to the end of the Dest list.
1755 VariableGEPIndex Entry
= {Src
.Val
, Src
.Scale
, Src
.CxtI
, Src
.IsNSW
,
1756 /* IsNegated */ true};
1757 DestGEP
.VarIndices
.push_back(Entry
);
1762 bool BasicAAResult::constantOffsetHeuristic(const DecomposedGEP
&GEP
,
1763 LocationSize MaybeV1Size
,
1764 LocationSize MaybeV2Size
,
1765 AssumptionCache
*AC
,
1767 const AAQueryInfo
&AAQI
) {
1768 if (GEP
.VarIndices
.size() != 2 || !MaybeV1Size
.hasValue() ||
1769 !MaybeV2Size
.hasValue())
1772 const uint64_t V1Size
= MaybeV1Size
.getValue();
1773 const uint64_t V2Size
= MaybeV2Size
.getValue();
1775 const VariableGEPIndex
&Var0
= GEP
.VarIndices
[0], &Var1
= GEP
.VarIndices
[1];
1777 if (Var0
.Val
.TruncBits
!= 0 || !Var0
.Val
.hasSameCastsAs(Var1
.Val
) ||
1778 !Var0
.hasNegatedScaleOf(Var1
) ||
1779 Var0
.Val
.V
->getType() != Var1
.Val
.V
->getType())
1782 // We'll strip off the Extensions of Var0 and Var1 and do another round
1783 // of GetLinearExpression decomposition. In the example above, if Var0
1784 // is zext(%x + 1) we should get V1 == %x and V1Offset == 1.
1786 LinearExpression E0
=
1787 GetLinearExpression(CastedValue(Var0
.Val
.V
), DL
, 0, AC
, DT
);
1788 LinearExpression E1
=
1789 GetLinearExpression(CastedValue(Var1
.Val
.V
), DL
, 0, AC
, DT
);
1790 if (E0
.Scale
!= E1
.Scale
|| !E0
.Val
.hasSameCastsAs(E1
.Val
) ||
1791 !isValueEqualInPotentialCycles(E0
.Val
.V
, E1
.Val
.V
, AAQI
))
1794 // We have a hit - Var0 and Var1 only differ by a constant offset!
1796 // If we've been sext'ed then zext'd the maximum difference between Var0 and
1797 // Var1 is possible to calculate, but we're just interested in the absolute
1798 // minimum difference between the two. The minimum distance may occur due to
1799 // wrapping; consider "add i3 %i, 5": if %i == 7 then 7 + 5 mod 8 == 4, and so
1800 // the minimum distance between %i and %i + 5 is 3.
1801 APInt MinDiff
= E0
.Offset
- E1
.Offset
, Wrapped
= -MinDiff
;
1802 MinDiff
= APIntOps::umin(MinDiff
, Wrapped
);
1803 APInt MinDiffBytes
=
1804 MinDiff
.zextOrTrunc(Var0
.Scale
.getBitWidth()) * Var0
.Scale
.abs();
1806 // We can't definitely say whether GEP1 is before or after V2 due to wrapping
1807 // arithmetic (i.e. for some values of GEP1 and V2 GEP1 < V2, and for other
1808 // values GEP1 > V2). We'll therefore only declare NoAlias if both V1Size and
1809 // V2Size can fit in the MinDiffBytes gap.
1810 return MinDiffBytes
.uge(V1Size
+ GEP
.Offset
.abs()) &&
1811 MinDiffBytes
.uge(V2Size
+ GEP
.Offset
.abs());
1814 //===----------------------------------------------------------------------===//
1815 // BasicAliasAnalysis Pass
1816 //===----------------------------------------------------------------------===//
1818 AnalysisKey
BasicAA::Key
;
1820 BasicAAResult
BasicAA::run(Function
&F
, FunctionAnalysisManager
&AM
) {
1821 auto &TLI
= AM
.getResult
<TargetLibraryAnalysis
>(F
);
1822 auto &AC
= AM
.getResult
<AssumptionAnalysis
>(F
);
1823 auto *DT
= &AM
.getResult
<DominatorTreeAnalysis
>(F
);
1824 return BasicAAResult(F
.getParent()->getDataLayout(), F
, TLI
, AC
, DT
);
1827 BasicAAWrapperPass::BasicAAWrapperPass() : FunctionPass(ID
) {
1828 initializeBasicAAWrapperPassPass(*PassRegistry::getPassRegistry());
1831 char BasicAAWrapperPass::ID
= 0;
1833 void BasicAAWrapperPass::anchor() {}
1835 INITIALIZE_PASS_BEGIN(BasicAAWrapperPass
, "basic-aa",
1836 "Basic Alias Analysis (stateless AA impl)", true, true)
1837 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker
)
1838 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass
)
1839 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass
)
1840 INITIALIZE_PASS_END(BasicAAWrapperPass
, "basic-aa",
1841 "Basic Alias Analysis (stateless AA impl)", true, true)
1843 FunctionPass
*llvm::createBasicAAWrapperPass() {
1844 return new BasicAAWrapperPass();
1847 bool BasicAAWrapperPass::runOnFunction(Function
&F
) {
1848 auto &ACT
= getAnalysis
<AssumptionCacheTracker
>();
1849 auto &TLIWP
= getAnalysis
<TargetLibraryInfoWrapperPass
>();
1850 auto &DTWP
= getAnalysis
<DominatorTreeWrapperPass
>();
1852 Result
.reset(new BasicAAResult(F
.getParent()->getDataLayout(), F
,
1853 TLIWP
.getTLI(F
), ACT
.getAssumptionCache(F
),
1854 &DTWP
.getDomTree()));
1859 void BasicAAWrapperPass::getAnalysisUsage(AnalysisUsage
&AU
) const {
1860 AU
.setPreservesAll();
1861 AU
.addRequiredTransitive
<AssumptionCacheTracker
>();
1862 AU
.addRequiredTransitive
<DominatorTreeWrapperPass
>();
1863 AU
.addRequiredTransitive
<TargetLibraryInfoWrapperPass
>();